Description
stringlengths
18
161k
Code
stringlengths
15
300k
natural language toolkit word list corpus reader c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt this is a class to read the panlex swadesh list from david kamholz jonathan pool and susan m colowick 2014 panlex building a resource for panlingual lexical translation in lrec http www lrecconf orgproceedingslrec2014pdf1029paper pdf license cc0 1 0 universal https creativecommons orgpublicdomainzero1 0legalcode find the swadesh size using the fileids path return a list of liststr return a list of liststr return a tuple of words for the specified fileids natural language toolkit word list corpus reader c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt 1 panlex uid 2 iso 639 language code 3 iso 639 language type see readme 4 normal scripts of expressions 5 panlex default name 6 uid of the language variety in which the default name is an expression this is a class to read the panlex swadesh list from david kamholz jonathan pool and susan m colowick 2014 panlex building a resource for panlingual lexical translation in lrec http www lrec conf org proceedings lrec2014 pdf 1029_paper pdf license cc0 1 0 universal https creativecommons org publicdomain zero 1 0 legalcode find the swadesh size using the fileids path skip empty lines return a list of list str return a list of list str return a tuple of words for the specified fileids
import re from collections import defaultdict, namedtuple from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.corpus.reader.wordlist import WordListCorpusReader from nltk.tokenize import line_tokenize PanlexLanguage = namedtuple( "PanlexLanguage", [ "panlex_uid", "iso639", "iso639_type", "script", "name", "langvar_uid", ], ) class PanlexSwadeshCorpusReader(WordListCorpusReader): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.swadesh_size = re.match(r"swadesh([0-9].*)\/", self.fileids()[0]).group(1) self._languages = {lang.panlex_uid: lang for lang in self.get_languages()} self._macro_langauges = self.get_macrolanguages() def license(self): return "CC0 1.0 Universal" def language_codes(self): return self._languages.keys() def get_languages(self): for line in self.raw(f"langs{self.swadesh_size}.txt").split("\n"): if not line.strip(): continue yield PanlexLanguage(*line.strip().split("\t")) def get_macrolanguages(self): macro_langauges = defaultdict(list) for lang in self._languages.values(): macro_langauges[lang.iso639].append(lang.panlex_uid) return macro_langauges def words_by_lang(self, lang_code): fileid = f"swadesh{self.swadesh_size}/{lang_code}.txt" return [concept.split("\t") for concept in self.words(fileid)] def words_by_iso639(self, iso63_code): fileids = [ f"swadesh{self.swadesh_size}/{lang_code}.txt" for lang_code in self._macro_langauges[iso63_code] ] return [ concept.split("\t") for fileid in fileids for concept in self.words(fileid) ] def entries(self, fileids=None): if not fileids: fileids = self.fileids() wordlists = [self.words(f) for f in fileids] return list(zip(*wordlists))
natural language toolkit c 20012023 nltk project piotr kasprzyk p j kasprzykgmail com url https www nltk org for license information see license txt warning skip header to be implemented in the pl196x corpus each category is stored in single file and thus both methods provide identical functionality in order to accommodate finer granularity a nonstandard textids method was implemented all the main functions can be supplied with a list of required chunksgiving much more control to the user natural language toolkit c 2001 2023 nltk project piotr kasprzyk p j kasprzyk gmail com url https www nltk org for license information see license txt warning skip header tag startswith c to be implemented in the pl196x corpus each category is stored in single file and thus both methods provide identical functionality in order to accommodate finer granularity a non standard textids method was implemented all the main functions can be supplied with a list of required chunks giving much more control to the user
from nltk.corpus.reader.api import * from nltk.corpus.reader.xmldocs import XMLCorpusReader PARA = re.compile(r"<p(?: [^>]*){0,1}>(.*?)</p>") SENT = re.compile(r"<s(?: [^>]*){0,1}>(.*?)</s>") TAGGEDWORD = re.compile(r"<([wc](?: [^>]*){0,1}>)(.*?)</[wc]>") WORD = re.compile(r"<[wc](?: [^>]*){0,1}>(.*?)</[wc]>") TYPE = re.compile(r'type="(.*?)"') ANA = re.compile(r'ana="(.*?)"') TEXTID = re.compile(r'text id="(.*?)"') class TEICorpusView(StreamBackedCorpusView): def __init__( self, corpus_file, tagged, group_by_sent, group_by_para, tagset=None, head_len=0, textids=None, ): self._tagged = tagged self._textids = textids self._group_by_sent = group_by_sent self._group_by_para = group_by_para StreamBackedCorpusView.__init__(self, corpus_file, startpos=head_len) _pagesize = 4096 def read_block(self, stream): block = stream.readlines(self._pagesize) block = concat(block) while (block.count("<text id") > block.count("</text>")) or block.count( "<text id" ) == 0: tmp = stream.readline() if len(tmp) <= 0: break block += tmp block = block.replace("\n", "") textids = TEXTID.findall(block) if self._textids: for tid in textids: if tid not in self._textids: beg = block.find(tid) - 1 end = block[beg:].find("</text>") + len("</text>") block = block[:beg] + block[beg + end :] output = [] for para_str in PARA.findall(block): para = [] for sent_str in SENT.findall(para_str): if not self._tagged: sent = WORD.findall(sent_str) else: sent = list(map(self._parse_tag, TAGGEDWORD.findall(sent_str))) if self._group_by_sent: para.append(sent) else: para.extend(sent) if self._group_by_para: output.append(para) else: output.extend(para) return output def _parse_tag(self, tag_word_tuple): (tag, word) = tag_word_tuple if tag.startswith("w"): tag = ANA.search(tag).group(1) else: tag = TYPE.search(tag).group(1) return word, tag class Pl196xCorpusReader(CategorizedCorpusReader, XMLCorpusReader): head_len = 2770 def __init__(self, *args, **kwargs): if "textid_file" in kwargs: self._textids = kwargs["textid_file"] else: self._textids = None XMLCorpusReader.__init__(self, *args) CategorizedCorpusReader.__init__(self, kwargs) self._init_textids() def _init_textids(self): self._f2t = defaultdict(list) self._t2f = defaultdict(list) if self._textids is not None: with open(self._textids) as fp: for line in fp: line = line.strip() file_id, text_ids = line.split(" ", 1) if file_id not in self.fileids(): raise ValueError( "In text_id mapping file %s: %s not found" % (self._textids, file_id) ) for text_id in text_ids.split(self._delimiter): self._add_textids(file_id, text_id) def _add_textids(self, file_id, text_id): self._f2t[file_id].append(text_id) self._t2f[text_id].append(file_id) def _resolve(self, fileids, categories, textids=None): tmp = None if ( len( list( filter( lambda accessor: accessor is None, (fileids, categories, textids), ) ) ) != 1 ): raise ValueError( "Specify exactly one of: fileids, " "categories or textids" ) if fileids is not None: return fileids, None if categories is not None: return self.fileids(categories), None if textids is not None: if isinstance(textids, str): textids = [textids] files = sum((self._t2f[t] for t in textids), []) tdict = dict() for f in files: tdict[f] = set(self._f2t[f]) & set(textids) return files, tdict def decode_tag(self, tag): return tag def textids(self, fileids=None, categories=None): fileids, _ = self._resolve(fileids, categories) if fileids is None: return sorted(self._t2f) if isinstance(fileids, str): fileids = [fileids] return sorted(sum((self._f2t[d] for d in fileids), [])) def words(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), False, False, False, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), False, False, False, head_len=self.head_len, ) for fileid in fileids ] ) def sents(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), False, True, False, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), False, True, False, head_len=self.head_len ) for fileid in fileids ] ) def paras(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), False, True, True, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), False, True, True, head_len=self.head_len ) for fileid in fileids ] ) def tagged_words(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), True, False, False, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), True, False, False, head_len=self.head_len ) for fileid in fileids ] ) def tagged_sents(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), True, True, False, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), True, True, False, head_len=self.head_len ) for fileid in fileids ] ) def tagged_paras(self, fileids=None, categories=None, textids=None): fileids, textids = self._resolve(fileids, categories, textids) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] if textids: return concat( [ TEICorpusView( self.abspath(fileid), True, True, True, head_len=self.head_len, textids=textids[fileid], ) for fileid in fileids ] ) else: return concat( [ TEICorpusView( self.abspath(fileid), True, True, True, head_len=self.head_len ) for fileid in fileids ] ) def xml(self, fileids=None, categories=None): fileids, _ = self._resolve(fileids, categories) if len(fileids) == 1: return XMLCorpusReader.xml(self, fileids[0]) else: raise TypeError("Expected a single file")
natural language toolkit plaintext corpus reader c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com nitin madnani nmadnaniumiacs umd edu url https www nltk org for license information see license txt a reader for corpora that consist of plaintext documents reader for corpora that consist of plaintext documents paragraphs are assumed to be split using blank lines sentences and words can be tokenized using the default tokenizers or by custom tokenizers specified as parameters to the constructor this corpus reader can be customized e g to skip preface sections of specific document formats by creating a subclass and overriding the corpusview class variable the corpus view class used by this reader subclasses of plaintextcorpusreader may specify alternative corpus view classes e g to skip the preface sections of documents def init self root fileids wordtokenizerwordpuncttokenizer senttokenizernltk data lazyloadertokenizerspunktenglish pickle parablockreaderreadblanklineblock encodingutf8 r construct a new plaintext corpus reader for a set of documents located at the given root directory example usage root usrlocalsharenltkdatacorporawebtext reader plaintextcorpusreaderroot txt doctest skip param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus param wordtokenizer tokenizer for breaking sentences or paragraphs into words param senttokenizer tokenizer for breaking paragraphs into words param parablockreader the block reader used to divide the corpus into paragraph blocks return the given files as a list of words and punctuation symbols rtype liststr return the given files as a list of sentences or utterances each encoded as a list of word strings rtype listliststr return the given files as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word strings rtype listlistliststr a reader for plaintext corpora whose documents are divided into categories based on their file identifiers initialize the corpus reader categorization arguments catpattern catmap and catfile are passed to the categorizedcorpusreader constructor the remaining arguments are passed to the plaintextcorpusreader constructor fixme is there a better way how to not hardcode this possibly add a language kwargs to categorizedplaintextcorpusreader to override the senttokenizer reader for europarl corpora that consist of plaintext documents documents are divided into chapters instead of paragraphs as for regular plaintext documents chapters are separated using blank lines everything is inherited from plaintextcorpusreader except that since the corpus is preprocessed and pretokenized the word tokenizer should just split the line at whitespaces for the same reason the sentence tokenizer should just split the paragraph at line breaks there is a new chapters method that returns chapters instead instead of paragraphs the paras method inherited from plaintextcorpusreader is made nonfunctional to remove any confusion between chapters and paragraphs for europarl return the given files as a list of chapters each encoded as a list of sentences which are in turn encoded as lists of word strings rtype listlistliststr natural language toolkit plaintext corpus reader c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com nitin madnani nmadnani umiacs umd edu url https www nltk org for license information see license txt a reader for corpora that consist of plaintext documents reader for corpora that consist of plaintext documents paragraphs are assumed to be split using blank lines sentences and words can be tokenized using the default tokenizers or by custom tokenizers specified as parameters to the constructor this corpus reader can be customized e g to skip preface sections of specific document formats by creating a subclass and overriding the corpusview class variable the corpus view class used by this reader subclasses of plaintextcorpusreader may specify alternative corpus view classes e g to skip the preface sections of documents construct a new plaintext corpus reader for a set of documents located at the given root directory example usage root usr local share nltk_data corpora webtext reader plaintextcorpusreader root txt doctest skip param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus param word_tokenizer tokenizer for breaking sentences or paragraphs into words param sent_tokenizer tokenizer for breaking paragraphs into words param para_block_reader the block reader used to divide the corpus into paragraph blocks return the given file s as a list of words and punctuation symbols rtype list str return the given file s as a list of sentences or utterances each encoded as a list of word strings rtype list list str return the given file s as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word strings rtype list list list str read 20 lines at a time a reader for plaintext corpora whose documents are divided into categories based on their file identifiers initialize the corpus reader categorization arguments cat_pattern cat_map and cat_file are passed to the categorizedcorpusreader constructor the remaining arguments are passed to the plaintextcorpusreader constructor fixme is there a better way how to not hardcode this possibly add a language kwargs to categorizedplaintextcorpusreader to override the sent_tokenizer reader for europarl corpora that consist of plaintext documents documents are divided into chapters instead of paragraphs as for regular plaintext documents chapters are separated using blank lines everything is inherited from plaintextcorpusreader except that since the corpus is pre processed and pre tokenized the word tokenizer should just split the line at whitespaces for the same reason the sentence tokenizer should just split the paragraph at line breaks there is a new chapters method that returns chapters instead instead of paragraphs the paras method inherited from plaintextcorpusreader is made non functional to remove any confusion between chapters and paragraphs for europarl read 20 lines at a time return the given file s as a list of chapters each encoded as a list of sentences which are in turn encoded as lists of word strings rtype list list list str
import nltk.data from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.tokenize import * class PlaintextCorpusReader(CorpusReader): CorpusView = StreamBackedCorpusView def __init__( self, root, fileids, word_tokenizer=WordPunctTokenizer(), sent_tokenizer=nltk.data.LazyLoader("tokenizers/punkt/english.pickle"), para_block_reader=read_blankline_block, encoding="utf8", ): r CorpusReader.__init__(self, root, fileids, encoding) self._word_tokenizer = word_tokenizer self._sent_tokenizer = sent_tokenizer self._para_block_reader = para_block_reader def words(self, fileids=None): return concat( [ self.CorpusView(path, self._read_word_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def sents(self, fileids=None): if self._sent_tokenizer is None: raise ValueError("No sentence tokenizer for this corpus") return concat( [ self.CorpusView(path, self._read_sent_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def paras(self, fileids=None): if self._sent_tokenizer is None: raise ValueError("No sentence tokenizer for this corpus") return concat( [ self.CorpusView(path, self._read_para_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def _read_word_block(self, stream): words = [] for i in range(20): words.extend(self._word_tokenizer.tokenize(stream.readline())) return words def _read_sent_block(self, stream): sents = [] for para in self._para_block_reader(stream): sents.extend( [ self._word_tokenizer.tokenize(sent) for sent in self._sent_tokenizer.tokenize(para) ] ) return sents def _read_para_block(self, stream): paras = [] for para in self._para_block_reader(stream): paras.append( [ self._word_tokenizer.tokenize(sent) for sent in self._sent_tokenizer.tokenize(para) ] ) return paras class CategorizedPlaintextCorpusReader(CategorizedCorpusReader, PlaintextCorpusReader): def __init__(self, *args, **kwargs): CategorizedCorpusReader.__init__(self, kwargs) PlaintextCorpusReader.__init__(self, *args, **kwargs) class PortugueseCategorizedPlaintextCorpusReader(CategorizedPlaintextCorpusReader): def __init__(self, *args, **kwargs): CategorizedCorpusReader.__init__(self, kwargs) kwargs["sent_tokenizer"] = nltk.data.LazyLoader( "tokenizers/punkt/portuguese.pickle" ) PlaintextCorpusReader.__init__(self, *args, **kwargs) class EuroparlCorpusReader(PlaintextCorpusReader): def _read_word_block(self, stream): words = [] for i in range(20): words.extend(stream.readline().split()) return words def _read_sent_block(self, stream): sents = [] for para in self._para_block_reader(stream): sents.extend([sent.split() for sent in para.splitlines()]) return sents def _read_para_block(self, stream): paras = [] for para in self._para_block_reader(stream): paras.append([sent.split() for sent in para.splitlines()]) return paras def chapters(self, fileids=None): return concat( [ self.CorpusView(fileid, self._read_para_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def paras(self, fileids=None): raise NotImplementedError( "The Europarl corpus reader does not support paragraphs. Please use chapters() instead." )
natural language toolkit pp attachment corpus reader c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt read lines from the prepositional phrase attachment corpus the pp attachment corpus contains several files having the format sentenceid verb noun1 preposition noun2 attachment for example 42960 gives ity to administration v 46742 gives inventors of microchip n the pp attachment is to the verb phrase v or noun phrase n i e vp gives np ity pp to administration vp gives np inventors pp of microchip the corpus contains the following files training training set devset development test set used for algorithm development test test set used to report results bitstrings word classes derived from mutual information clustering for the wall street journal ratnaparkhi adwait 1994 a maximum entropy model for prepositional phrase attachment proceedings of the arpa human language technology conference http www cis upenn eduadwaitpapershlt94 ps the pp attachment corpus is distributed with nltk with the permission of the sentenceid verb noun1 preposition noun2 attachment natural language toolkit pp attachment corpus reader c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt read lines from the prepositional phrase attachment corpus the pp attachment corpus contains several files having the format sentence_id verb noun1 preposition noun2 attachment for example 42960 gives ity to administration v 46742 gives inventors of microchip n the pp attachment is to the verb phrase v or noun phrase n i e vp gives np ity pp to administration vp gives np inventors pp of microchip the corpus contains the following files training training set devset development test set used for algorithm development test test set used to report results bitstrings word classes derived from mutual information clustering for the wall street journal ratnaparkhi adwait 1994 a maximum entropy model for prepositional phrase attachment proceedings of the arpa human language technology conference http www cis upenn edu adwait papers hlt94 ps the pp attachment corpus is distributed with nltk with the permission of the sentence_id verb noun1 preposition noun2 attachment
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * class PPAttachment: def __init__(self, sent, verb, noun1, prep, noun2, attachment): self.sent = sent self.verb = verb self.noun1 = noun1 self.prep = prep self.noun2 = noun2 self.attachment = attachment def __repr__(self): return ( "PPAttachment(sent=%r, verb=%r, noun1=%r, prep=%r, " "noun2=%r, attachment=%r)" % (self.sent, self.verb, self.noun1, self.prep, self.noun2, self.attachment) ) class PPAttachmentCorpusReader(CorpusReader): def attachments(self, fileids): return concat( [ StreamBackedCorpusView(fileid, self._read_obj_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tuples(self, fileids): return concat( [ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def _read_tuple_block(self, stream): line = stream.readline() if line: return [tuple(line.split())] else: return [] def _read_obj_block(self, stream): line = stream.readline() if line: return [PPAttachment(*line.split())] else: return []
natural language toolkit pros and cons corpus reader c 20012023 nltk project pierpaolo pantone 24alsecondogmail com url https www nltk org for license information see license txt corpusreader for the pros and cons dataset pros and cons dataset information contact bing liu liubcs uic edu https www cs uic eduliub distributed with permission related papers murthy ganapathibhotla and bing liu mining opinions in comparative sentences proceedings of the 22nd international conference on computational linguistics coling2008 manchester 1822 august 2008 bing liu minqing hu and junsheng cheng opinion observer analyzing and comparing opinions on the web proceedings of the 14th international world wide web conference www2005 may 1014 2005 in chiba japan reader for the pros and cons sentence dataset from nltk corpus import proscons proscons sentscategories cons doctest normalizewhitespace east batteries on off switch too easy to maneuver eats no gulps batteries proscons words integratedpros txt easy to use economical param root the root directory for the corpus param fileids a list or regexp specifying the fileids in the corpus param wordtokenizer a tokenizer for breaking sentences or paragraphs into words default whitespacetokenizer param encoding the encoding that should be used to read the corpus param kwargs additional parameters passed to categorizedcorpusreader return all sentences in the corpus or in the specified filescategories param fileids a list or regexp specifying the ids of the files whose sentences have to be returned param categories a list specifying the categories whose sentences have to be returned return the given files as a list of sentences each sentence is tokenized using the specified wordtokenizer rtype listliststr return all words and punctuation symbols in the corpus or in the specified filescategories param fileids a list or regexp specifying the ids of the files whose words have to be returned param categories a list specifying the categories whose words have to be returned return the given files as a list of words and punctuation symbols rtype liststr natural language toolkit pros and cons corpus reader c 2001 2023 nltk project pierpaolo pantone 24alsecondo gmail com url https www nltk org for license information see license txt corpusreader for the pros and cons dataset pros and cons dataset information contact bing liu liub cs uic edu https www cs uic edu liub distributed with permission related papers murthy ganapathibhotla and bing liu mining opinions in comparative sentences proceedings of the 22nd international conference on computational linguistics coling 2008 manchester 18 22 august 2008 bing liu minqing hu and junsheng cheng opinion observer analyzing and comparing opinions on the web proceedings of the 14th international world wide web conference www 2005 may 10 14 2005 in chiba japan reader for the pros and cons sentence dataset from nltk corpus import pros_cons pros_cons sents categories cons doctest normalize_whitespace east batteries on off switch too easy to maneuver eats no gulps batteries pros_cons words integratedpros txt easy to use economical param root the root directory for the corpus param fileids a list or regexp specifying the fileids in the corpus param word_tokenizer a tokenizer for breaking sentences or paragraphs into words default whitespacetokenizer param encoding the encoding that should be used to read the corpus param kwargs additional parameters passed to categorizedcorpusreader return all sentences in the corpus or in the specified files categories param fileids a list or regexp specifying the ids of the files whose sentences have to be returned param categories a list specifying the categories whose sentences have to be returned return the given file s as a list of sentences each sentence is tokenized using the specified word_tokenizer rtype list list str return all words and punctuation symbols in the corpus or in the specified files categories param fileids a list or regexp specifying the ids of the files whose words have to be returned param categories a list specifying the categories whose words have to be returned return the given file s as a list of words and punctuation symbols rtype list str read 20 lines at a time
import re from nltk.corpus.reader.api import * from nltk.tokenize import * class ProsConsCorpusReader(CategorizedCorpusReader, CorpusReader): CorpusView = StreamBackedCorpusView def __init__( self, root, fileids, word_tokenizer=WordPunctTokenizer(), encoding="utf8", **kwargs ): CorpusReader.__init__(self, root, fileids, encoding) CategorizedCorpusReader.__init__(self, kwargs) self._word_tokenizer = word_tokenizer def sents(self, fileids=None, categories=None): fileids = self._resolve(fileids, categories) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] return concat( [ self.CorpusView(path, self._read_sent_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def words(self, fileids=None, categories=None): fileids = self._resolve(fileids, categories) if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] return concat( [ self.CorpusView(path, self._read_word_block, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def _read_sent_block(self, stream): sents = [] for i in range(20): line = stream.readline() if not line: continue sent = re.match(r"^(?!\n)\s*<(Pros|Cons)>(.*)</(?:Pros|Cons)>", line) if sent: sents.append(self._word_tokenizer.tokenize(sent.group(2).strip())) return sents def _read_word_block(self, stream): words = [] for sent in self._read_sent_block(stream): words.extend(sent) return words
natural language toolkit rte corpus reader c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt corpus reader for the recognizing textual entailment rte challenge corpora the files were taken from the rte1 rte2 and rte3 datasets and the files were regularized filenames are of the form rtedev xml and rtetest xml the latter are the gold standard annotated files each entailment corpus is a list of text hypothesis pairs the following example is taken from rte3 pair id1 entailmentyes taskie lengthshort tthe sale was made to pay yukos us 27 5 billion tax bill yuganskneftegaz was originally sold for us 9 4 billion to a little known company baikalfinansgroup which was later bought by the russian stateowned oil company rosneft t hbaikalfinansgroup was sold to rosneft h pair in order to provide globally unique ids for each pair a new attribute challenge has been added to the root element entailmentcorpus of each file taking values 1 2 or 3 the gid is formatted mn where m is the challenge number and n is the pair id normalize the string value in an rte pair s value or entailment attribute as an integer 1 0 param valuestring the label used to classify a texthypothesis pair type valuestring str rtype int container for rte texthypothesis pairs the entailment relation is signalled by the value attribute in rte1 and by entailment in rte2 and rte3 these both get mapped on to the entailment attribute of this class param challenge version of the rte challenge i e rte1 rte2 or rte3 param id identifier for the pair param text the text component of the pair param hyp the hypothesis component of the pair param value classification label for the pair param task attribute for the particular nlp task that the data was drawn from param length attribute for the length of the text of the pair corpus reader for corpora in rte challenges this is just a wrapper around the xmlcorpusreader see module docstring above for the expected structure of input documents map the xml input into an rtepair this uses the getiterator method from the elementtree package to find all the pair elements param doc a parsed xml document rtype listrtepair build a list of rtepairs from a rte corpus param fileids a list of rte corpus fileids type list rtype listrtepair natural language toolkit rte corpus reader c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt corpus reader for the recognizing textual entailment rte challenge corpora the files were taken from the rte1 rte2 and rte3 datasets and the files were regularized filenames are of the form rte _dev xml and rte _test xml the latter are the gold standard annotated files each entailment corpus is a list of text hypothesis pairs the following example is taken from rte3 pair id 1 entailment yes task ie length short t the sale was made to pay yukos us 27 5 billion tax bill yuganskneftegaz was originally sold for us 9 4 billion to a little known company baikalfinansgroup which was later bought by the russian state owned oil company rosneft t h baikalfinansgroup was sold to rosneft h pair in order to provide globally unique ids for each pair a new attribute challenge has been added to the root element entailment corpus of each file taking values 1 2 or 3 the gid is formatted m n where m is the challenge number and n is the pair id normalize the string value in an rte pair s value or entailment attribute as an integer 1 0 param value_string the label used to classify a text hypothesis pair type value_string str rtype int container for rte text hypothesis pairs the entailment relation is signalled by the value attribute in rte1 and by entailment in rte2 and rte3 these both get mapped on to the entailment attribute of this class param challenge version of the rte challenge i e rte1 rte2 or rte3 param id identifier for the pair param text the text component of the pair param hyp the hypothesis component of the pair param value classification label for the pair param task attribute for the particular nlp task that the data was drawn from param length attribute for the length of the text of the pair corpus reader for corpora in rte challenges this is just a wrapper around the xmlcorpusreader see module docstring above for the expected structure of input documents map the xml input into an rtepair this uses the getiterator method from the elementtree package to find all the pair elements param doc a parsed xml document rtype list rtepair build a list of rtepairs from a rte corpus param fileids a list of rte corpus fileids type list rtype list rtepair
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.corpus.reader.xmldocs import * def norm(value_string): valdict = {"TRUE": 1, "FALSE": 0, "YES": 1, "NO": 0} return valdict[value_string.upper()] class RTEPair: def __init__( self, pair, challenge=None, id=None, text=None, hyp=None, value=None, task=None, length=None, ): self.challenge = challenge self.id = pair.attrib["id"] self.gid = f"{self.challenge}-{self.id}" self.text = pair[0].text self.hyp = pair[1].text if "value" in pair.attrib: self.value = norm(pair.attrib["value"]) elif "entailment" in pair.attrib: self.value = norm(pair.attrib["entailment"]) else: self.value = value if "task" in pair.attrib: self.task = pair.attrib["task"] else: self.task = task if "length" in pair.attrib: self.length = pair.attrib["length"] else: self.length = length def __repr__(self): if self.challenge: return f"<RTEPair: gid={self.challenge}-{self.id}>" else: return "<RTEPair: id=%s>" % self.id class RTECorpusReader(XMLCorpusReader): def _read_etree(self, doc): try: challenge = doc.attrib["challenge"] except KeyError: challenge = None pairiter = doc.iter("pair") return [RTEPair(pair, challenge=challenge) for pair in pairiter] def pairs(self, fileids): if isinstance(fileids, str): fileids = [fileids] return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
natural language toolkit semcor corpus reader c 20012023 nltk project nathan schneider nschneidcs cmu edu url https www nltk org for license information see license txt corpus reader for the semcor corpus corpus reader for the semcor corpus for access to the complete xml data structure use the xml method for access to simple word lists and tagged word lists use words sents taggedwords and taggedsents return the given files as a list of words and punctuation symbols rtype liststr return the given files as a list of chunks each of which is a list of words and punctuation symbols that form a unit rtype listliststr return the given files as a list of tagged chunks represented in tree form rtype listtree param tag pos part of speech sem semantic or both to indicate the kind of tags to include semantic tags consist of wordnet lemma ids plus an ne node if the chunk is a named entity without a specific entry in wordnet named entities of type other have no lemma other chunks not in wordnet have no semantic tag punctuation tokens have none for their part of speech tag return the given files as a list of sentences each encoded as a list of word strings rtype listliststr return the given files as a list of sentences each encoded as a list of chunks rtype listlistliststr return the given files as a list of sentences each sentence is represented as a list of tagged chunks in tree form rtype listlisttree param tag pos part of speech sem semantic or both to indicate the kind of tags to include semantic tags consist of wordnet lemma ids plus an ne node if the chunk is a named entity without a specific entry in wordnet named entities of type other have no lemma other chunks not in wordnet have no semantic tag punctuation tokens have none for their part of speech tag the result of the semcorwordview may be a multiword unit so the lazyconcatenation will make sure the sentence is flattened helper used to implement the view methods returns a list of tokens segmented words chunks or sentences the tokens and chunks may optionally be tagged with pos and sense information param fileid the name of the underlying file param unit one of token word or chunk param bracketsent if true include sentence bracketing param postag whether to include partofspeech tags param semtag whether to include semantic tags namely wordnet lemma and oov named entity status does not exactly match the enclosed string e g due to typographical adjustments or discontinuity of a multiword expression if a redefinition has occurred the rdf attribute holds its inflected form and lemma holds its lemma for nes rdf lemma and pn all hold the same value the ne class cannot retrieve the wordnet lemma object possible reasons a the wordnet corpus is not downloaded b a nonexistent sense is annotated e g such s 00 triggers nltk corpus reader wordnet wordneterror no synset found for key u such5 00 01 specified 00 solution just use the lemma name as a string a list of words augmented by an attribute num used to record the sentence identifier the n attribute from the xml a stream backed corpus view specialized for use with the bnc corpus param fileid the name of the underlying file param unit one of token word or chunk param bracketsent if true include sentence bracketing param postag whether to include partofspeech tags param semtag whether to include semantic tags namely wordnet lemma and oov named entity status natural language toolkit semcor corpus reader c 2001 2023 nltk project nathan schneider nschneid cs cmu edu url https www nltk org for license information see license txt corpus reader for the semcor corpus corpus reader for the semcor corpus for access to the complete xml data structure use the xml method for access to simple word lists and tagged word lists use words sents tagged_words and tagged_sents return the given file s as a list of words and punctuation symbols rtype list str return the given file s as a list of chunks each of which is a list of words and punctuation symbols that form a unit rtype list list str return the given file s as a list of tagged chunks represented in tree form rtype list tree param tag pos part of speech sem semantic or both to indicate the kind of tags to include semantic tags consist of wordnet lemma ids plus an ne node if the chunk is a named entity without a specific entry in wordnet named entities of type other have no lemma other chunks not in wordnet have no semantic tag punctuation tokens have none for their part of speech tag return the given file s as a list of sentences each encoded as a list of word strings rtype list list str return the given file s as a list of sentences each encoded as a list of chunks rtype list list list str return the given file s as a list of sentences each sentence is represented as a list of tagged chunks in tree form rtype list list tree param tag pos part of speech sem semantic or both to indicate the kind of tags to include semantic tags consist of wordnet lemma ids plus an ne node if the chunk is a named entity without a specific entry in wordnet named entities of type other have no lemma other chunks not in wordnet have no semantic tag punctuation tokens have none for their part of speech tag the result of the semcorwordview may be a multiword unit so the lazyconcatenation will make sure the sentence is flattened helper used to implement the view methods returns a list of tokens segmented words chunks or sentences the tokens and chunks may optionally be tagged with pos and sense information param fileid the name of the underlying file param unit one of token word or chunk param bracket_sent if true include sentence bracketing param pos_tag whether to include part of speech tags param sem_tag whether to include semantic tags namely wordnet lemma and oov named entity status fixes issue 337 lemma or ne class lex_sense locator for the lemma s sense see http wordnet princeton edu man senseidx 5wn html redefinition this indicates the lookup string does not exactly match the enclosed string e g due to typographical adjustments or discontinuity of a multiword expression if a redefinition has occurred the rdf attribute holds its inflected form and lemma holds its lemma for nes rdf lemma and pn all hold the same value the ne class wordnet sense number a personal name ne not in wordnet part of speech for the whole chunk none for punctuation todo case where punctuation intervenes in mwe lemma object cannot retrieve the wordnet lemma object possible reasons a the wordnet corpus is not downloaded b a nonexistent sense is annotated e g such s 00 triggers nltk corpus reader wordnet wordneterror no synset found for key u such 5 00 01 specified 00 solution just use the lemma name as a string e g reach v 02 e g the sense number may be 2 1 other ne chunk as a list a list of words augmented by an attribute num used to record the sentence identifier the n attribute from the xml a stream backed corpus view specialized for use with the bnc corpus param fileid the name of the underlying file param unit one of token word or chunk param bracket_sent if true include sentence bracketing param pos_tag whether to include part of speech tags param sem_tag whether to include semantic tags namely wordnet lemma and oov named entity status
__docformat__ = "epytext en" from nltk.corpus.reader.api import * from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView from nltk.tree import Tree class SemcorCorpusReader(XMLCorpusReader): def __init__(self, root, fileids, wordnet, lazy=True): XMLCorpusReader.__init__(self, root, fileids) self._lazy = lazy self._wordnet = wordnet def words(self, fileids=None): return self._items(fileids, "word", False, False, False) def chunks(self, fileids=None): return self._items(fileids, "chunk", False, False, False) def tagged_chunks(self, fileids=None, tag=("pos" or "sem" or "both")): return self._items(fileids, "chunk", False, tag != "sem", tag != "pos") def sents(self, fileids=None): return self._items(fileids, "word", True, False, False) def chunk_sents(self, fileids=None): return self._items(fileids, "chunk", True, False, False) def tagged_sents(self, fileids=None, tag=("pos" or "sem" or "both")): return self._items(fileids, "chunk", True, tag != "sem", tag != "pos") def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag): if unit == "word" and not bracket_sent: _ = lambda *args: LazyConcatenation( (SemcorWordView if self._lazy else self._words)(*args) ) else: _ = SemcorWordView if self._lazy else self._words return concat( [ _(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet) for fileid in self.abspaths(fileids) ] ) def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag): assert unit in ("token", "word", "chunk") result = [] xmldoc = ElementTree.parse(fileid).getroot() for xmlsent in xmldoc.findall(".//s"): sent = [] for xmlword in _all_xmlwords_in(xmlsent): itm = SemcorCorpusReader._word( xmlword, unit, pos_tag, sem_tag, self._wordnet ) if unit == "word": sent.extend(itm) else: sent.append(itm) if bracket_sent: result.append(SemcorSentence(xmlsent.attrib["snum"], sent)) else: result.extend(sent) assert None not in result return result @staticmethod def _word(xmlword, unit, pos_tag, sem_tag, wordnet): tkn = xmlword.text if not tkn: tkn = "" lemma = xmlword.get("lemma", tkn) lexsn = xmlword.get("lexsn") if lexsn is not None: sense_key = lemma + "%" + lexsn wnpos = ("n", "v", "a", "r", "s")[ int(lexsn.split(":")[0]) - 1 ] else: sense_key = wnpos = None redef = xmlword.get( "rdf", tkn ) sensenum = xmlword.get("wnsn") isOOVEntity = "pn" in xmlword.keys() pos = xmlword.get( "pos" ) if unit == "token": if not pos_tag and not sem_tag: itm = tkn else: itm = ( (tkn,) + ((pos,) if pos_tag else ()) + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ()) ) return itm else: ww = tkn.split("_") if unit == "word": return ww else: if sensenum is not None: try: sense = wordnet.lemma_from_key(sense_key) except Exception: try: sense = "%s.%s.%02d" % ( lemma, wnpos, int(sensenum), ) except ValueError: sense = ( lemma + "." + wnpos + "." + sensenum ) bottom = [Tree(pos, ww)] if pos_tag else ww if sem_tag and isOOVEntity: if sensenum is not None: return Tree(sense, [Tree("NE", bottom)]) else: return Tree("NE", bottom) elif sem_tag and sensenum is not None: return Tree(sense, bottom) elif pos_tag: return bottom[0] else: return bottom def _all_xmlwords_in(elt, result=None): if result is None: result = [] for child in elt: if child.tag in ("wf", "punc"): result.append(child) else: _all_xmlwords_in(child, result) return result class SemcorSentence(list): def __init__(self, num, items): self.num = num list.__init__(self, items) class SemcorWordView(XMLCorpusView): def __init__(self, fileid, unit, bracket_sent, pos_tag, sem_tag, wordnet): if bracket_sent: tagspec = ".*/s" else: tagspec = ".*/s/(punc|wf)" self._unit = unit self._sent = bracket_sent self._pos_tag = pos_tag self._sem_tag = sem_tag self._wordnet = wordnet XMLCorpusView.__init__(self, fileid, tagspec) def handle_elt(self, elt, context): if self._sent: return self.handle_sent(elt) else: return self.handle_word(elt) def handle_word(self, elt): return SemcorCorpusReader._word( elt, self._unit, self._pos_tag, self._sem_tag, self._wordnet ) def handle_sent(self, elt): sent = [] for child in elt: if child.tag in ("wf", "punc"): itm = self.handle_word(child) if self._unit == "word": sent.extend(itm) else: sent.append(itm) else: raise ValueError("Unexpected element %s" % child.tag) return SemcorSentence(elt.attrib["snum"], sent)
natural language toolkit senseval 2 corpus reader c 20012023 nltk project trevor cohn tacohncs mu oz au steven bird stevenbird1gmail com modifications url https www nltk org for license information see license txt read from the senseval 2 corpus senseval http www senseval org evaluation exercises for word sense disambiguation organized by aclsiglex https www siglex org prepared by ted pedersen tpederseumn edu university of minnesota https www d umn edutpedersedata html distributed with permission the nltk version of the senseval 2 files uses wellformed xml each instance of the ambiguous words hard interest line and serve is tagged with a sense identifier and supplied with context decide which lexical element we re in start of a lexical element start of an instance body of an instance end of an instance some santiy checks record the position of the head add on the head word itself fix the various issues with senseval pseudoxml or or fix lone fix fix s snumdd s snumdd fix foreign word tag remove i fix word remove p p remove m and t and ms remove doctype lines remove hi and p etc take the thing out of the brackets hellip and remove the for those patterns that aren t regular xml fix abc pfoo style tags now wf posfooabcwf natural language toolkit senseval 2 corpus reader c 2001 2023 nltk project trevor cohn tacohn cs mu oz au steven bird stevenbird1 gmail com modifications url https www nltk org for license information see license txt read from the senseval 2 corpus senseval http www senseval org evaluation exercises for word sense disambiguation organized by acl siglex https www siglex org prepared by ted pedersen tpederse umn edu university of minnesota https www d umn edu tpederse data html distributed with permission the nltk version of the senseval 2 files uses well formed xml each instance of the ambiguous words hard interest line and serve is tagged with a sense identifier and supplied with context list of streampos list of lexelt names decide which lexical element we re in start of a lexical element lexelt has no item start of an instance body of an instance end of an instance is this ok to do some santiy checks record the position of the head add on the head word itself sentence boundary marker fix the various issues with senseval pseudo xml or or fix lone fix fix s snum dd s snum dd fix foreign word tag remove i fix word remove p p remove m and t and ms remove doctype lines remove hi and p etc take the thing out of the brackets hellip and remove the for those patterns that aren t regular xml fix abc p foo style tags now wf pos foo abc wf
import re from xml.etree import ElementTree from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.tokenize import * class SensevalInstance: def __init__(self, word, position, context, senses): self.word = word self.senses = tuple(senses) self.position = position self.context = context def __repr__(self): return "SensevalInstance(word=%r, position=%r, " "context=%r, senses=%r)" % ( self.word, self.position, self.context, self.senses, ) class SensevalCorpusReader(CorpusReader): def instances(self, fileids=None): return concat( [ SensevalCorpusView(fileid, enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def _entry(self, tree): elts = [] for lexelt in tree.findall("lexelt"): for inst in lexelt.findall("instance"): sense = inst[0].attrib["senseid"] context = [(w.text, w.attrib["pos"]) for w in inst[1]] elts.append((sense, context)) return elts class SensevalCorpusView(StreamBackedCorpusView): def __init__(self, fileid, encoding): StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) self._word_tokenizer = WhitespaceTokenizer() self._lexelt_starts = [0] self._lexelts = [None] def read_block(self, stream): lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell()) - 1 lexelt = self._lexelts[lexelt_num] instance_lines = [] in_instance = False while True: line = stream.readline() if line == "": assert instance_lines == [] return [] if line.lstrip().startswith("<lexelt"): lexelt_num += 1 m = re.search("item=(\"[^\"]+\"|'[^']+')", line) assert m is not None lexelt = m.group(1)[1:-1] if lexelt_num < len(self._lexelts): assert lexelt == self._lexelts[lexelt_num] else: self._lexelts.append(lexelt) self._lexelt_starts.append(stream.tell()) if line.lstrip().startswith("<instance"): assert instance_lines == [] in_instance = True if in_instance: instance_lines.append(line) if line.lstrip().startswith("</instance"): xml_block = "\n".join(instance_lines) xml_block = _fixXML(xml_block) inst = ElementTree.fromstring(xml_block) return [self._parse_instance(inst, lexelt)] def _parse_instance(self, instance, lexelt): senses = [] context = [] position = None for child in instance: if child.tag == "answer": senses.append(child.attrib["senseid"]) elif child.tag == "context": context += self._word_tokenizer.tokenize(child.text) for cword in child: if cword.tag == "compound": cword = cword[0] if cword.tag == "head": assert position is None, "head specified twice" assert cword.text.strip() or len(cword) == 1 assert not (cword.text.strip() and len(cword) == 1) position = len(context) if cword.text.strip(): context.append(cword.text.strip()) elif cword[0].tag == "wf": context.append((cword[0].text, cword[0].attrib["pos"])) if cword[0].tail: context += self._word_tokenizer.tokenize(cword[0].tail) else: assert False, "expected CDATA or wf in <head>" elif cword.tag == "wf": context.append((cword.text, cword.attrib["pos"])) elif cword.tag == "s": pass else: print("ACK", cword.tag) assert False, "expected CDATA or <wf> or <head>" if cword.tail: context += self._word_tokenizer.tokenize(cword.tail) else: assert False, "unexpected tag %s" % child.tag return SensevalInstance(lexelt, position, context, senses) def _fixXML(text): text = re.sub(r"<([~\^])>", r"\1", text) text = re.sub(r"(\s+)\&(\s+)", r"\1&amp;\2", text) text = re.sub(r'"""', "'\"'", text) text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text) text = re.sub(r"<\&frasl>\s*<p[^>]*>", "FRASL", text) text = re.sub(r"<\&I[^>]*>", "", text) text = re.sub(r"<{([^}]+)}>", r"\1", text) text = re.sub(r"<(@|/?p)>", r"", text) text = re.sub(r"<&\w+ \.>", r"", text) text = re.sub(r"<!DOCTYPE[^>]*>", r"", text) text = re.sub(r"<\[\/?[^>]+\]*>", r"", text) text = re.sub(r"<(\&\w+;)>", r"\1", text) text = re.sub(r"&(?!amp|gt|lt|apos|quot)", r"", text) text = re.sub( r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>', r' <wf pos="\2">\1</wf>', text ) text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text) return text
natural language toolkit string category corpus reader c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt read tuples from a corpus consisting of categorized strings for example from the question classification corpus num dist how far is it from denver to aspen loc city what county is modesto california in hum desc who was galileo desc def what is an atom num date when did hawaii become a state based on ppattachmentcorpusreader xx should the order of the tuple be reversed in most other places in nltk we use the form data tag e g tagged words and labeled texts for classifiers param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus param delimiter field delimiter natural language toolkit string category corpus reader c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt read tuples from a corpus consisting of categorized strings for example from the question classification corpus num dist how far is it from denver to aspen loc city what county is modesto california in hum desc who was galileo desc def what is an atom num date when did hawaii become a state based on ppattachmentcorpusreader xx should the order of the tuple be reversed in most other places in nltk we use the form data tag e g tagged words and labeled texts for classifiers param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus param delimiter field delimiter
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * class StringCategoryCorpusReader(CorpusReader): def __init__(self, root, fileids, delimiter=" ", encoding="utf8"): CorpusReader.__init__(self, root, fileids, encoding) self._delimiter = delimiter def tuples(self, fileids=None): if fileids is None: fileids = self._fileids elif isinstance(fileids, str): fileids = [fileids] return concat( [ StreamBackedCorpusView(fileid, self._read_tuple_block, encoding=enc) for (fileid, enc) in self.abspaths(fileids, True) ] ) def _read_tuple_block(self, stream): line = stream.readline().strip() if line: return [tuple(line.split(self._delimiter, 1))] else: return []
natural language toolkit switchboard corpus reader c 20012023 nltk project edward loper edlopergmail com url https www nltk org for license information see license txt a specialized list object used to encode switchboard utterances the elements of the list are the words in the utterance and two attributes speaker and id are provided to retrieve the spearker identifier and utterance id note that utterance ids are only unique within a given discourse use the tagged file even for nontagged data methods since it s tokenized returns at most 1 discourse the other methods depend on this returns at most 1 discourse the other methods depend on this natural language toolkit switchboard corpus reader c 2001 2023 nltk project edward loper edloper gmail com url https www nltk org for license information see license txt a specialized list object used to encode switchboard utterances the elements of the list are the words in the utterance and two attributes speaker and id are provided to retrieve the spearker identifier and utterance id note that utterance ids are only unique within a given discourse use the tagged file even for non tagged data methods since it s tokenized returns at most 1 discourse the other methods depend on this returns at most 1 discourse the other methods depend on this
import re from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.tag import map_tag, str2tuple class SwitchboardTurn(list): def __init__(self, words, speaker, id): list.__init__(self, words) self.speaker = speaker self.id = int(id) def __repr__(self): if len(self) == 0: text = "" elif isinstance(self[0], tuple): text = " ".join("%s/%s" % w for w in self) else: text = " ".join(self) return f"<{self.speaker}.{self.id}: {text!r}>" class SwitchboardCorpusReader(CorpusReader): _FILES = ["tagged"] def __init__(self, root, tagset=None): CorpusReader.__init__(self, root, self._FILES) self._tagset = tagset def words(self): return StreamBackedCorpusView(self.abspath("tagged"), self._words_block_reader) def tagged_words(self, tagset=None): def tagged_words_block_reader(stream): return self._tagged_words_block_reader(stream, tagset) return StreamBackedCorpusView(self.abspath("tagged"), tagged_words_block_reader) def turns(self): return StreamBackedCorpusView(self.abspath("tagged"), self._turns_block_reader) def tagged_turns(self, tagset=None): def tagged_turns_block_reader(stream): return self._tagged_turns_block_reader(stream, tagset) return StreamBackedCorpusView(self.abspath("tagged"), tagged_turns_block_reader) def discourses(self): return StreamBackedCorpusView( self.abspath("tagged"), self._discourses_block_reader ) def tagged_discourses(self, tagset=False): def tagged_discourses_block_reader(stream): return self._tagged_discourses_block_reader(stream, tagset) return StreamBackedCorpusView( self.abspath("tagged"), tagged_discourses_block_reader ) def _discourses_block_reader(self, stream): return [ [ self._parse_utterance(u, include_tag=False) for b in read_blankline_block(stream) for u in b.split("\n") if u.strip() ] ] def _tagged_discourses_block_reader(self, stream, tagset=None): return [ [ self._parse_utterance(u, include_tag=True, tagset=tagset) for b in read_blankline_block(stream) for u in b.split("\n") if u.strip() ] ] def _turns_block_reader(self, stream): return self._discourses_block_reader(stream)[0] def _tagged_turns_block_reader(self, stream, tagset=None): return self._tagged_discourses_block_reader(stream, tagset)[0] def _words_block_reader(self, stream): return sum(self._discourses_block_reader(stream)[0], []) def _tagged_words_block_reader(self, stream, tagset=None): return sum(self._tagged_discourses_block_reader(stream, tagset)[0], []) _UTTERANCE_RE = re.compile(r"(\w+)\.(\d+)\:\s*(.*)") _SEP = "/" def _parse_utterance(self, utterance, include_tag, tagset=None): m = self._UTTERANCE_RE.match(utterance) if m is None: raise ValueError("Bad utterance %r" % utterance) speaker, id, text = m.groups() words = [str2tuple(s, self._SEP) for s in text.split()] if not include_tag: words = [w for (w, t) in words] elif tagset and tagset != self._tagset: words = [(w, map_tag(self._tagset, tagset, t)) for (w, t) in words] return SwitchboardTurn(words, speaker, id)
natural language toolkit tagged corpus reader c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com jacob perkins japerkgmail com url https www nltk org for license information see license txt a reader for corpora whose documents contain partofspeechtagged words reader for simple partofspeech tagged corpora paragraphs are assumed to be split using blank lines sentences and words can be tokenized using the default tokenizers or by custom tokenizers specified as parameters to the constructor words are parsed using nltk tag str2tuple by default is used as the separator i e words should have the form word1tag1 word2tag2 word3tag3 but custom separators may be specified as parameters to the constructor part of speech tags are casenormalized to upper case construct a new tagged corpus reader for a set of documents located at the given root directory example usage root path to corpus reader taggedcorpusreaderroot txt doctest skip param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus return the given files as a list of words and punctuation symbols rtype liststr return the given files as a list of sentences or utterances each encoded as a list of word strings rtype listliststr return the given files as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word strings rtype listlistliststr return the given files as a list of tagged words and punctuation symbols encoded as tuples word tag rtype listtuplestr str return the given files as a list of sentences each encoded as a list of word tag tuples rtype listlisttuplestr str return the given files as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word tag tuples rtype listlistlisttuplestr str a reader for partofspeech tagged corpora whose documents are divided into categories based on their file identifiers initialize the corpus reader categorization arguments catpattern catmap and catfile are passed to the categorizedcorpusreader constructor the remaining arguments are passed to the taggedcorpusreader a specialized corpus view for tagged documents it can be customized via flags to divide the tagged corpus documents up by sentence or paragraph and to include or omit part of speech tags taggedcorpusview objects are typically taggedcorpusreader not directly by nltk users reads one paragraph at a time block for parastr in self parablockreaderstream para for sentstr in self senttokenizer tokenizeparastr sent str2tuples self sep for s in self wordtokenizer tokenizesentstr if self tagmappingfunction sent w self tagmappingfunctiont for w t in sent if not self tagged sent w for w t in sent if self groupbysent para appendsent else para extendsent if self groupbypara block appendpara else block extendpara return block needs to implement simplified tags class macmorphocorpusreadertaggedcorpusreader def initself root fileids encodingutf8 tagsetnone taggedcorpusreader init self root fileids sep wordtokenizerlinetokenizer senttokenizerregexptokenizer n parablockreaderself readblock encodingencoding tagsettagset def readblockself stream return readregexpblockstream r r class timittaggedcorpusreadertaggedcorpusreader def initself args kwargs taggedcorpusreader init self parablockreaderreadtimitblock args kwargs def parasself raise notimplementederroruse sents instead def taggedparasself raise notimplementederroruse taggedsents instead natural language toolkit tagged corpus reader c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com jacob perkins japerk gmail com url https www nltk org for license information see license txt a reader for corpora whose documents contain part of speech tagged words reader for simple part of speech tagged corpora paragraphs are assumed to be split using blank lines sentences and words can be tokenized using the default tokenizers or by custom tokenizers specified as parameters to the constructor words are parsed using nltk tag str2tuple by default is used as the separator i e words should have the form word1 tag1 word2 tag2 word3 tag3 but custom separators may be specified as parameters to the constructor part of speech tags are case normalized to upper case construct a new tagged corpus reader for a set of documents located at the given root directory example usage root path to corpus reader taggedcorpusreader root txt doctest skip param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus return the given file s as a list of words and punctuation symbols rtype list str return the given file s as a list of sentences or utterances each encoded as a list of word strings rtype list list str return the given file s as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word strings rtype list list list str return the given file s as a list of tagged words and punctuation symbols encoded as tuples word tag rtype list tuple str str return the given file s as a list of sentences each encoded as a list of word tag tuples rtype list list tuple str str return the given file s as a list of paragraphs each encoded as a list of sentences which are in turn encoded as lists of word tag tuples rtype list list list tuple str str a reader for part of speech tagged corpora whose documents are divided into categories based on their file identifiers initialize the corpus reader categorization arguments cat_pattern cat_map and cat_file are passed to the categorizedcorpusreader constructor the remaining arguments are passed to the taggedcorpusreader a specialized corpus view for tagged documents it can be customized via flags to divide the tagged corpus documents up by sentence or paragraph and to include or omit part of speech tags taggedcorpusview objects are typically taggedcorpusreader not directly by nltk users reads one paragraph at a time needs to implement simplified tags a corpus reader for the mac_morpho corpus each line contains a single tagged word using _ as a separator sentence boundaries are based on the end sentence tag _ paragraph information is not included in the corpus so each paragraph returned by self paras and self tagged_paras contains a single sentence a corpus reader for tagged sentences that are included in the timit corpus
import os from nltk.corpus.reader.api import * from nltk.corpus.reader.timit import read_timit_block from nltk.corpus.reader.util import * from nltk.tag import map_tag, str2tuple from nltk.tokenize import * class TaggedCorpusReader(CorpusReader): def __init__( self, root, fileids, sep="/", word_tokenizer=WhitespaceTokenizer(), sent_tokenizer=RegexpTokenizer("\n", gaps=True), para_block_reader=read_blankline_block, encoding="utf8", tagset=None, ): CorpusReader.__init__(self, root, fileids, encoding) self._sep = sep self._word_tokenizer = word_tokenizer self._sent_tokenizer = sent_tokenizer self._para_block_reader = para_block_reader self._tagset = tagset def words(self, fileids=None): return concat( [ TaggedCorpusView( fileid, enc, False, False, False, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, None, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) def sents(self, fileids=None): return concat( [ TaggedCorpusView( fileid, enc, False, True, False, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, None, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) def paras(self, fileids=None): return concat( [ TaggedCorpusView( fileid, enc, False, True, True, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, None, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_words(self, fileids=None, tagset=None): if tagset and tagset != self._tagset: tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) else: tag_mapping_function = None return concat( [ TaggedCorpusView( fileid, enc, True, False, False, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, tag_mapping_function, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_sents(self, fileids=None, tagset=None): if tagset and tagset != self._tagset: tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) else: tag_mapping_function = None return concat( [ TaggedCorpusView( fileid, enc, True, True, False, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, tag_mapping_function, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) def tagged_paras(self, fileids=None, tagset=None): if tagset and tagset != self._tagset: tag_mapping_function = lambda t: map_tag(self._tagset, tagset, t) else: tag_mapping_function = None return concat( [ TaggedCorpusView( fileid, enc, True, True, True, self._sep, self._word_tokenizer, self._sent_tokenizer, self._para_block_reader, tag_mapping_function, ) for (fileid, enc) in self.abspaths(fileids, True) ] ) class CategorizedTaggedCorpusReader(CategorizedCorpusReader, TaggedCorpusReader): def __init__(self, *args, **kwargs): CategorizedCorpusReader.__init__(self, kwargs) TaggedCorpusReader.__init__(self, *args, **kwargs) def tagged_words(self, fileids=None, categories=None, tagset=None): return super().tagged_words(self._resolve(fileids, categories), tagset) def tagged_sents(self, fileids=None, categories=None, tagset=None): return super().tagged_sents(self._resolve(fileids, categories), tagset) def tagged_paras(self, fileids=None, categories=None, tagset=None): return super().tagged_paras(self._resolve(fileids, categories), tagset) class TaggedCorpusView(StreamBackedCorpusView): def __init__( self, corpus_file, encoding, tagged, group_by_sent, group_by_para, sep, word_tokenizer, sent_tokenizer, para_block_reader, tag_mapping_function=None, ): self._tagged = tagged self._group_by_sent = group_by_sent self._group_by_para = group_by_para self._sep = sep self._word_tokenizer = word_tokenizer self._sent_tokenizer = sent_tokenizer self._para_block_reader = para_block_reader self._tag_mapping_function = tag_mapping_function StreamBackedCorpusView.__init__(self, corpus_file, encoding=encoding) def read_block(self, stream): block = [] for para_str in self._para_block_reader(stream): para = [] for sent_str in self._sent_tokenizer.tokenize(para_str): sent = [ str2tuple(s, self._sep) for s in self._word_tokenizer.tokenize(sent_str) ] if self._tag_mapping_function: sent = [(w, self._tag_mapping_function(t)) for (w, t) in sent] if not self._tagged: sent = [w for (w, t) in sent] if self._group_by_sent: para.append(sent) else: para.extend(sent) if self._group_by_para: block.append(para) else: block.extend(para) return block class MacMorphoCorpusReader(TaggedCorpusReader): def __init__(self, root, fileids, encoding="utf8", tagset=None): TaggedCorpusReader.__init__( self, root, fileids, sep="_", word_tokenizer=LineTokenizer(), sent_tokenizer=RegexpTokenizer(".*\n"), para_block_reader=self._read_block, encoding=encoding, tagset=tagset, ) def _read_block(self, stream): return read_regexp_block(stream, r".*", r".*_\.") class TimitTaggedCorpusReader(TaggedCorpusReader): def __init__(self, *args, **kwargs): TaggedCorpusReader.__init__( self, para_block_reader=read_timit_block, *args, **kwargs ) def paras(self): raise NotImplementedError("use sents() instead") def tagged_paras(self): raise NotImplementedError("use tagged_sents() instead")
natural language toolkit timit corpus reader c 20012007 nltk project haejoong lee haejoongldc upenn edu steven bird stevenbird1gmail com jacob perkins japerkgmail com url https www nltk org for license information see license txt xx this docstring is outofdate read tokens phonemes and audio data from the nltk timit corpus this corpus contains selected portion of the timit corpus 16 speakers from 8 dialect regions 1 male and 1 female from each dialect region total 130 sentences 10 sentences per speaker note that some sentences are shared among other speakers especially sa1 and sa2 are spoken by all speakers total 160 recording of sentences 10 recordings per speaker audio format nist sphere single channel 16khz sampling 16 bit sample pcm encoding module contents the timit corpus reader provides 4 functions and 4 data items utterances list of utterances in the corpus there are total 160 utterances each of which corresponds to a unique utterance of a speaker here s an example of an utterance identifier in the list dr1fvmh0sx206 sentence number sentence type a all i shared x exclusive speaker id sex m male f female dialect region 1 8 speakers list of speaker ids an example of speaker id dr1fvmh0 note that if you split an item id with colon and take the first element of the result you will get a speaker id itemid dr1fvmh0sx206 spkrid sentid itemid split spkrid dr1fvmh0 the second element of the result is a sentence id dictionary phonetic dictionary of words contained in this corpus this is a python dictionary from words to phoneme lists spkrinfo speaker information table it s a python dictionary from speaker ids to records of 10 fields speaker ids the same as the ones in timie speakers each record is a dictionary from field names to values and the fields are as follows id speaker id as defined in the original timit speaker info table sex speaker gender m male f female dr speaker dialect region 1 new england 2 northern 3 north midland 4 south midland 5 southern 6 new york city 7 western 8 army brat moved around use corpus type trn training tst test in this sample corpus only trn is available recdate recording date birthdate speaker birth date ht speaker height race speaker race wht white blk black amr american indian spn spanishamerican orn oriental unknown edu speaker education level hs high school as associate degree bs bachelor s degree bs or ba ms master s degree ms or ma phd doctorate degree phd jd md unknown comments comments by the recorder the 4 functions are as follows tokenizedsentencesitems offsetfalse given a list of items returns an iterator of a list of word lists each of which corresponds to an item sentence if offset is set to true each element of the word list is a tuple of wordstring start offset and end offset where offset is represented as a number of 16khz samples phoneticsentencesitems offsetfalse given a list of items returns an iterator of a list of phoneme lists each of which corresponds to an item sentence if offset is set to true each element of the phoneme list is a tuple of wordstring start offset and end offset where offset is represented as a number of 16khz samples audiodataitem start0 endnone given an item returns a chunk of audio samples formatted into a string when the function is called if start and end are omitted the entire samples of the recording will be returned if only end is omitted samples from the start offset to the end of the recording will be returned playdata play the given audio samples the audio samples can be obtained from the timit audiodata function reader for the timit corpus or any other corpus with the same file layout and use of file formats the corpus root directory should contain the following files timitdic txt dictionary of standard transcriptions spkrinfo txt table of speaker information in addition the root directory should contain one subdirectory for each speaker containing three files for each utterance utteranceid txt text content of utterances utteranceid wrd tokenized text content of utterances utteranceid phn phonetic transcription of utterances utteranceid wav utterance sound file a regexp matching fileids that are used by this corpus reader utterancere rwww txt def initself root encodingutf8 ensure that wave files don t get treated as unicode data if isinstanceencoding str encoding r wav none encoding corpusreader init self root findcorpusfileidsroot self filere encodingencoding self utterances name 4 for name in findcorpusfileidsroot self utterancere return a list of file identifiers for the files that make up this corpus param filetype if specified then filetype indicates that only the files that have the given type should be returned accepted values are txt wrd phn wav or metadata return a list of the utterance identifiers for all utterances in this corpus or for the given speaker dialect region gender sentence type or sentence number if specified return a dictionary giving the standard transcription for each word return a list of all utterances associated with a given speaker return a dictionary mapping something offset is represented as a number of 16khz samples todo check this xx note this is currently broken we re assuming that the fileids are wav fileids aka riff but they re actually nist sphere fileids nltk chunk conflicts with the stdlib module chunk skip past frames before start then read the frames we want open a new temporary file the wave module requires an actual file and won t work w stringio write the parameters data to the new file read the data back from the file and return it the file will automatically be deleted when we return play the given audio sample param utterance the utterance id of the sample to play method 1 os audio dev method 2 pygame fixme this won t work under python 3 method 3 complain block reader for timit tagged sentences which are preceded by a sentence number that will be ignored natural language toolkit timit corpus reader c 2001 2007 nltk project haejoong lee haejoong ldc upenn edu steven bird stevenbird1 gmail com jacob perkins japerk gmail com url https www nltk org for license information see license txt xx this docstring is out of date read tokens phonemes and audio data from the nltk timit corpus this corpus contains selected portion of the timit corpus 16 speakers from 8 dialect regions 1 male and 1 female from each dialect region total 130 sentences 10 sentences per speaker note that some sentences are shared among other speakers especially sa1 and sa2 are spoken by all speakers total 160 recording of sentences 10 recordings per speaker audio format nist sphere single channel 16khz sampling 16 bit sample pcm encoding module contents the timit corpus reader provides 4 functions and 4 data items utterances list of utterances in the corpus there are total 160 utterances each of which corresponds to a unique utterance of a speaker here s an example of an utterance identifier in the list dr1 fvmh0 sx206 _ _ sentence number sentence type a all i shared x exclusive speaker id sex m male f female dialect region 1 8 speakers list of speaker ids an example of speaker id dr1 fvmh0 note that if you split an item id with colon and take the first element of the result you will get a speaker id itemid dr1 fvmh0 sx206 spkrid sentid itemid split spkrid dr1 fvmh0 the second element of the result is a sentence id dictionary phonetic dictionary of words contained in this corpus this is a python dictionary from words to phoneme lists spkrinfo speaker information table it s a python dictionary from speaker ids to records of 10 fields speaker ids the same as the ones in timie speakers each record is a dictionary from field names to values and the fields are as follows id speaker id as defined in the original timit speaker info table sex speaker gender m male f female dr speaker dialect region 1 new england 2 northern 3 north midland 4 south midland 5 southern 6 new york city 7 western 8 army brat moved around use corpus type trn training tst test in this sample corpus only trn is available recdate recording date birthdate speaker birth date ht speaker height race speaker race wht white blk black amr american indian spn spanish american orn oriental unknown edu speaker education level hs high school as associate degree bs bachelor s degree bs or ba ms master s degree ms or ma phd doctorate degree phd jd md unknown comments comments by the recorder the 4 functions are as follows tokenized sentences items offset false given a list of items returns an iterator of a list of word lists each of which corresponds to an item sentence if offset is set to true each element of the word list is a tuple of word string start offset and end offset where offset is represented as a number of 16khz samples phonetic sentences items offset false given a list of items returns an iterator of a list of phoneme lists each of which corresponds to an item sentence if offset is set to true each element of the phoneme list is a tuple of word string start offset and end offset where offset is represented as a number of 16khz samples audiodata item start 0 end none given an item returns a chunk of audio samples formatted into a string when the function is called if start and end are omitted the entire samples of the recording will be returned if only end is omitted samples from the start offset to the end of the recording will be returned play data play the given audio samples the audio samples can be obtained from the timit audiodata function reader for the timit corpus or any other corpus with the same file layout and use of file formats the corpus root directory should contain the following files timitdic txt dictionary of standard transcriptions spkrinfo txt table of speaker information in addition the root directory should contain one subdirectory for each speaker containing three files for each utterance utterance id txt text content of utterances utterance id wrd tokenized text content of utterances utterance id phn phonetic transcription of utterances utterance id wav utterance sound file a regexp matching fileids that are used by this corpus reader construct a new timit corpus reader in the given directory param root the root directory for this corpus ensure that wave files don t get treated as unicode data a list of the utterance identifiers for all utterances in this corpus return a list of file identifiers for the files that make up this corpus param filetype if specified then filetype indicates that only the files that have the given type should be returned accepted values are txt wrd phn wav or metadata return a list of the utterance identifiers for all utterances in this corpus or for the given speaker dialect region gender sentence type or sentence number if specified return a dictionary giving the standard transcription for each word return a list of all utterances associated with a given speaker return a dictionary mapping something offset is represented as a number of 16khz samples todo check this xx note this is currently broken we re assuming that the fileids are wav fileids aka riff but they re actually nist sphere fileids nltk chunk conflicts with the stdlib module chunk skip past frames before start then read the frames we want open a new temporary file the wave module requires an actual file and won t work w stringio write the parameters data to the new file read the data back from the file and return it the file will automatically be deleted when we return play the given audio sample param utterance the utterance id of the sample to play method 1 os audio dev method 2 pygame fixme this won t work under python 3 method 3 complain block reader for timit tagged sentences which are preceded by a sentence number that will be ignored
import sys import time from nltk.corpus.reader.api import * from nltk.internals import import_from_stdlib from nltk.tree import Tree class TimitCorpusReader(CorpusReader): _FILE_RE = r"(\w+-\w+/\w+\.(phn|txt|wav|wrd))|" + r"timitdic\.txt|spkrinfo\.txt" _UTTERANCE_RE = r"\w+-\w+/\w+\.txt" def __init__(self, root, encoding="utf8"): if isinstance(encoding, str): encoding = [(r".*\.wav", None), (".*", encoding)] CorpusReader.__init__( self, root, find_corpus_fileids(root, self._FILE_RE), encoding=encoding ) self._utterances = [ name[:-4] for name in find_corpus_fileids(root, self._UTTERANCE_RE) ] self._speakerinfo = None self._root = root self.speakers = sorted({u.split("/")[0] for u in self._utterances}) def fileids(self, filetype=None): if filetype is None: return CorpusReader.fileids(self) elif filetype in ("txt", "wrd", "phn", "wav"): return [f"{u}.{filetype}" for u in self._utterances] elif filetype == "metadata": return ["timitdic.txt", "spkrinfo.txt"] else: raise ValueError("Bad value for filetype: %r" % filetype) def utteranceids( self, dialect=None, sex=None, spkrid=None, sent_type=None, sentid=None ): if isinstance(dialect, str): dialect = [dialect] if isinstance(sex, str): sex = [sex] if isinstance(spkrid, str): spkrid = [spkrid] if isinstance(sent_type, str): sent_type = [sent_type] if isinstance(sentid, str): sentid = [sentid] utterances = self._utterances[:] if dialect is not None: utterances = [u for u in utterances if u[2] in dialect] if sex is not None: utterances = [u for u in utterances if u[4] in sex] if spkrid is not None: utterances = [u for u in utterances if u[:9] in spkrid] if sent_type is not None: utterances = [u for u in utterances if u[11] in sent_type] if sentid is not None: utterances = [u for u in utterances if u[10:] in spkrid] return utterances def transcription_dict(self): _transcriptions = {} with self.open("timitdic.txt") as fp: for line in fp: if not line.strip() or line[0] == ";": continue m = re.match(r"\s*(\S+)\s+/(.*)/\s*$", line) if not m: raise ValueError("Bad line: %r" % line) _transcriptions[m.group(1)] = m.group(2).split() return _transcriptions def spkrid(self, utterance): return utterance.split("/")[0] def sentid(self, utterance): return utterance.split("/")[1] def utterance(self, spkrid, sentid): return f"{spkrid}/{sentid}" def spkrutteranceids(self, speaker): return [ utterance for utterance in self._utterances if utterance.startswith(speaker + "/") ] def spkrinfo(self, speaker): if speaker in self._utterances: speaker = self.spkrid(speaker) if self._speakerinfo is None: self._speakerinfo = {} with self.open("spkrinfo.txt") as fp: for line in fp: if not line.strip() or line[0] == ";": continue rec = line.strip().split(None, 9) key = f"dr{rec[2]}-{rec[1].lower()}{rec[0].lower()}" self._speakerinfo[key] = SpeakerInfo(*rec) return self._speakerinfo[speaker] def phones(self, utterances=None): results = [] for fileid in self._utterance_fileids(utterances, ".phn"): with self.open(fileid) as fp: for line in fp: if line.strip(): results.append(line.split()[-1]) return results def phone_times(self, utterances=None): results = [] for fileid in self._utterance_fileids(utterances, ".phn"): with self.open(fileid) as fp: for line in fp: if line.strip(): results.append( ( line.split()[2], int(line.split()[0]), int(line.split()[1]), ) ) return results def words(self, utterances=None): results = [] for fileid in self._utterance_fileids(utterances, ".wrd"): with self.open(fileid) as fp: for line in fp: if line.strip(): results.append(line.split()[-1]) return results def word_times(self, utterances=None): results = [] for fileid in self._utterance_fileids(utterances, ".wrd"): with self.open(fileid) as fp: for line in fp: if line.strip(): results.append( ( line.split()[2], int(line.split()[0]), int(line.split()[1]), ) ) return results def sents(self, utterances=None): results = [] for fileid in self._utterance_fileids(utterances, ".wrd"): with self.open(fileid) as fp: results.append([line.split()[-1] for line in fp if line.strip()]) return results def sent_times(self, utterances=None): return [ ( line.split(None, 2)[-1].strip(), int(line.split()[0]), int(line.split()[1]), ) for fileid in self._utterance_fileids(utterances, ".txt") for line in self.open(fileid) if line.strip() ] def phone_trees(self, utterances=None): if utterances is None: utterances = self._utterances if isinstance(utterances, str): utterances = [utterances] trees = [] for utterance in utterances: word_times = self.word_times(utterance) phone_times = self.phone_times(utterance) sent_times = self.sent_times(utterance) while sent_times: (sent, sent_start, sent_end) = sent_times.pop(0) trees.append(Tree("S", [])) while ( word_times and phone_times and phone_times[0][2] <= word_times[0][1] ): trees[-1].append(phone_times.pop(0)[0]) while word_times and word_times[0][2] <= sent_end: (word, word_start, word_end) = word_times.pop(0) trees[-1].append(Tree(word, [])) while phone_times and phone_times[0][2] <= word_end: trees[-1][-1].append(phone_times.pop(0)[0]) while phone_times and phone_times[0][2] <= sent_end: trees[-1].append(phone_times.pop(0)[0]) return trees def wav(self, utterance, start=0, end=None): wave = import_from_stdlib("wave") w = wave.open(self.open(utterance + ".wav"), "rb") if end is None: end = w.getnframes() w.readframes(start) frames = w.readframes(end - start) tf = tempfile.TemporaryFile() out = wave.open(tf, "w") out.setparams(w.getparams()) out.writeframes(frames) out.close() tf.seek(0) return tf.read() def audiodata(self, utterance, start=0, end=None): assert end is None or end > start headersize = 44 with self.open(utterance + ".wav") as fp: if end is None: data = fp.read() else: data = fp.read(headersize + end * 2) return data[headersize + start * 2 :] def _utterance_fileids(self, utterances, extension): if utterances is None: utterances = self._utterances if isinstance(utterances, str): utterances = [utterances] return [f"{u}{extension}" for u in utterances] def play(self, utterance, start=0, end=None): try: import ossaudiodev try: dsp = ossaudiodev.open("w") dsp.setfmt(ossaudiodev.AFMT_S16_LE) dsp.channels(1) dsp.speed(16000) dsp.write(self.audiodata(utterance, start, end)) dsp.close() except OSError as e: print( ( "can't acquire the audio device; please " "activate your audio device." ), file=sys.stderr, ) print("system error message:", str(e), file=sys.stderr) return except ImportError: pass try: import pygame.mixer import StringIO pygame.mixer.init(16000) f = StringIO.StringIO(self.wav(utterance, start, end)) pygame.mixer.Sound(f).play() while pygame.mixer.get_busy(): time.sleep(0.01) return except ImportError: pass print( ("you must install pygame or ossaudiodev " "for audio playback."), file=sys.stderr, ) class SpeakerInfo: def __init__( self, id, sex, dr, use, recdate, birthdate, ht, race, edu, comments=None ): self.id = id self.sex = sex self.dr = dr self.use = use self.recdate = recdate self.birthdate = birthdate self.ht = ht self.race = race self.edu = edu self.comments = comments def __repr__(self): attribs = "id sex dr use recdate birthdate ht race edu comments" args = [f"{attr}={getattr(self, attr)!r}" for attr in attribs.split()] return "SpeakerInfo(%s)" % (", ".join(args)) def read_timit_block(stream): line = stream.readline() if not line: return [] n, sent = line.split(" ", 1) return [sent]
natural language toolkit toolbox reader c 20012023 nltk project greg aumann gregaumannsil org stuart robinson stuart robinsonmpi nl steven bird stevenbird1gmail com url https www nltk org for license information see license txt module for reading writing and manipulating toolbox databases and settings fileids should probably be done lazily natural language toolkit toolbox reader c 2001 2023 nltk project greg aumann greg_aumann sil org stuart robinson stuart robinson mpi nl steven bird stevenbird1 gmail com url https www nltk org for license information see license txt module for reading writing and manipulating toolbox databases and settings fileids should probably be done lazily the default key in mdf
from nltk.corpus.reader.api import * from nltk.corpus.reader.util import * from nltk.toolbox import ToolboxData class ToolboxCorpusReader(CorpusReader): def xml(self, fileids, key=None): return concat( [ ToolboxData(path, enc).parse(key=key) for (path, enc) in self.abspaths(fileids, True) ] ) def fields( self, fileids, strip=True, unwrap=True, encoding="utf8", errors="strict", unicode_fields=None, ): return concat( [ list( ToolboxData(fileid, enc).fields( strip, unwrap, encoding, errors, unicode_fields ) ) for (fileid, enc) in self.abspaths(fileids, include_encoding=True) ] ) def entries(self, fileids, **kwargs): if "key" in kwargs: key = kwargs["key"] del kwargs["key"] else: key = "lx" entries = [] for marker, contents in self.fields(fileids, **kwargs): if marker == key: entries.append((contents, [])) else: try: entries[-1][-1].append((marker, contents)) except IndexError: pass return entries def words(self, fileids, key="lx"): return [contents for marker, contents in self.fields(fileids) if marker == key] def demo(): pass if __name__ == "__main__": demo()
natural language toolkit twitter corpus reader c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt a reader for corpora that consist of tweets it is assumed that the tweets have been serialised into linedelimited json corpusview streambackedcorpusview def init self root fileidsnone wordtokenizertweettokenizer encodingutf8 corpusreader initself root fileids encoding for path in self abspathsself fileids if isinstancepath zipfilepathpointer pass elif os path getsizepath 0 raise valueerrorffile path is empty returns the full tweet objects as specified by twitter documentation on tweets https dev twitter comdocsplatformobjectstweets return the given files as a list of dictionaries deserialised from json rtype listdict returns only the text content of tweets in the files return the given files as a list of tweets rtype liststr return the given files as a list of the text content of tweets as as a list of words screenanames hashtags urls and punctuation symbols rtype listliststr assumes that each line in stream is a jsonserialised object natural language toolkit twitter corpus reader c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt a reader for corpora that consist of tweets it is assumed that the tweets have been serialised into line delimited json reader for corpora that consist of tweets represented as a list of line delimited json individual tweets can be tokenized using the default tokenizer or by a custom tokenizer specified as a parameter to the constructor construct a new tweet corpus reader for a set of documents located at the given root directory if you made your own tweet collection in a directory called twitter files then you can initialise the reader as from nltk corpus import twittercorpusreader reader twittercorpusreader root path to twitter files json however the recommended approach is to set the relevant directory as the value of the environmental variable twitter and then invoke the reader as follows root os environ twitter reader twittercorpusreader root json if you want to work directly with the raw tweets the json library can be used import json for tweet in reader docs print json dumps tweet indent 1 sort_keys true the corpus view class used by this reader param root the root directory for this corpus param fileids a list or regexp specifying the fileids in this corpus param word_tokenizer tokenizer for breaking the text of tweets into smaller units including but not limited to words check that all user created corpus files are non empty returns the full tweet objects as specified by twitter documentation on tweets https dev twitter com docs platform objects tweets _ return the given file s as a list of dictionaries deserialised from json rtype list dict returns only the text content of tweets in the file s return the given file s as a list of tweets rtype list str return the given file s as a list of the text content of tweets as as a list of words screenanames hashtags urls and punctuation symbols rtype list list str assumes that each line in stream is a json serialised object
import json import os from nltk.corpus.reader.api import CorpusReader from nltk.corpus.reader.util import StreamBackedCorpusView, ZipFilePathPointer, concat from nltk.tokenize import TweetTokenizer class TwitterCorpusReader(CorpusReader): r CorpusView = StreamBackedCorpusView def __init__( self, root, fileids=None, word_tokenizer=TweetTokenizer(), encoding="utf8" ): CorpusReader.__init__(self, root, fileids, encoding) for path in self.abspaths(self._fileids): if isinstance(path, ZipFilePathPointer): pass elif os.path.getsize(path) == 0: raise ValueError(f"File {path} is empty") self._word_tokenizer = word_tokenizer def docs(self, fileids=None): return concat( [ self.CorpusView(path, self._read_tweets, encoding=enc) for (path, enc, fileid) in self.abspaths(fileids, True, True) ] ) def strings(self, fileids=None): fulltweets = self.docs(fileids) tweets = [] for jsono in fulltweets: try: text = jsono["text"] if isinstance(text, bytes): text = text.decode(self.encoding) tweets.append(text) except KeyError: pass return tweets def tokenized(self, fileids=None): tweets = self.strings(fileids) tokenizer = self._word_tokenizer return [tokenizer.tokenize(t) for t in tweets] def _read_tweets(self, stream): tweets = [] for i in range(10): line = stream.readline() if not line: return tweets tweet = json.loads(line) tweets.append(tweet) return tweets
udhr corpus reader it mostly deals with encodings the following files are not fully decodable because they were truncated at wrong bytes unfortunately encodings required for reading the following files are not supported by python the following files are encoded for specific fonts what are these the following files are unintended yeah the following files are not fully decodable because they were truncated at wrong bytes unfortunately encodings required for reading the following files are not supported by python latin3 raises an exception the following files are encoded for specific fonts what are these the following files are unintended
from nltk.corpus.reader.plaintext import PlaintextCorpusReader from nltk.corpus.reader.util import find_corpus_fileids class UdhrCorpusReader(PlaintextCorpusReader): ENCODINGS = [ (".*-Latin1$", "latin-1"), (".*-Hebrew$", "hebrew"), (".*-Arabic$", "cp1256"), ("Czech_Cesky-UTF8", "cp1250"), ("Polish-Latin2", "cp1250"), ("Polish_Polski-Latin2", "cp1250"), (".*-Cyrillic$", "cyrillic"), (".*-SJIS$", "SJIS"), (".*-GB2312$", "GB2312"), (".*-Latin2$", "ISO-8859-2"), (".*-Greek$", "greek"), (".*-UTF8$", "utf-8"), ("Hungarian_Magyar-Unicode", "utf-16-le"), ("Amahuaca", "latin1"), ("Turkish_Turkce-Turkish", "latin5"), ("Lithuanian_Lietuviskai-Baltic", "latin4"), ("Japanese_Nihongo-EUC", "EUC-JP"), ("Japanese_Nihongo-JIS", "iso2022_jp"), ("Chinese_Mandarin-HZ", "hz"), (r"Abkhaz\-Cyrillic\+Abkh", "cp1251"), ] SKIP = { "Burmese_Myanmar-UTF8", "Japanese_Nihongo-JIS", "Chinese_Mandarin-HZ", "Chinese_Mandarin-UTF8", "Gujarati-UTF8", "Hungarian_Magyar-Unicode", "Lao-UTF8", "Magahi-UTF8", "Marathi-UTF8", "Tamil-UTF8", "Vietnamese-VPS", "Vietnamese-VIQR", "Vietnamese-TCVN", "Magahi-Agra", "Bhojpuri-Agra", "Esperanto-T61", "Burmese_Myanmar-WinResearcher", "Armenian-DallakHelv", "Tigrinya_Tigrigna-VG2Main", "Amharic-Afenegus6..60375", "Navaho_Dine-Navajo-Navaho-font", "Azeri_Azerbaijani_Cyrillic-Az.Times.Cyr.Normal0117", "Azeri_Azerbaijani_Latin-Az.Times.Lat0117", "Czech-Latin2-err", "Russian_Russky-UTF8~", } def __init__(self, root="udhr"): fileids = find_corpus_fileids(root, r"(?!README|\.).*") super().__init__( root, [fileid for fileid in fileids if fileid not in self.SKIP], encoding=self.ENCODINGS, )
natural language toolkit verbnet corpus reader c 20012023 nltk project edward loper edlopergmail com url https www nltk org for license information see license txt an nltk interface to the verbnet verb lexicon for details about verbnet see https verbs colorado edumpalmerprojectsverbnet html an nltk interface to the verbnet verb lexicon from the verbnet site verbnet vn kipperschuler 2006 is the largest online verb lexicon currently available for english it is a hierarchical domainindependent broadcoverage verb lexicon with mappings to other lexical resources such as wordnet miller 1990 fellbaum 1998 xtag xtag research group 2001 and framenet baker et al 1998 for details about verbnet see https verbs colorado edumpalmerprojectsverbnet html no unicode encoding param since the data files are all xml a dictionary mapping from verb lemma strings to lists of verbnet class identifiers self wordnettoclass defaultdictlist a dictionary mapping from class identifiers to corresponding file identifiers the keys of this dictionary provide a complete list of all classes and subclasses self shortidtolongid initialize the dictionaries use the quick regexpbased method instead of the slow xmlbased method because it runs 230 times faster self quickindex longidre re compiler d regular expression that matches shortids indexre re compile r member name wn r vnsubclass id return a list of all verb lemmas that appear in any class or in the classid if specified xx should this include subclass members return a list of all wordnet identifiers that appear in any class or in classid if specified xx should this include subclass members return a list of the verbnet class identifiers if a file identifier is specified then return only the verbnet class identifiers for classes and subclasses defined by that file if a lemma is specified then return only verbnet class identifiers for classes that contain that lemma as a member if a wordnetid is specified then return only identifiers for classes that contain that wordnetid as a member if a classid is specified then return only identifiers for subclasses of the specified verbnet class if nothing is specified return all classids within verbnet returns verbnet class elementtree return an elementtree containing the xml for the specified verbnet class param fileidorclassid an identifier specifying which class should be returned can be a file identifier such as put9 1 xml or a verbnet class identifier such as put9 1 or a short verbnet class identifier such as 9 1 file identifier just return the xml class identifier get the xml and find the right elt return a list of fileids that make up this corpus if vnclassids is specified then return the fileids that make up the specified verbnet classes given a verbnet class this method returns verbnet frames the members returned are 1 example 2 description 3 syntax 4 semantics param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return frames a list of frame dictionaries returns subclass ids if any exist given a verbnet class this method returns subclass ids if they exist in a list of strings param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return list of subclasses returns thematic roles participating in a verbnet class members returned as part of roles are 1 type 2 modifiers param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return themroles a list of thematic roles in the verbnet class index initialization initialize the indexes lemmatoclass wordnettoclass and classtofileid by scanning through the corpus fileids this is fast if elementtree uses the c implementation 0 1 secs but quite slow 10 secs if only the python implementation is available helper for index vnclass xmltree getid self classtofileidvnclass fileid self shortidtolongidself shortidvnclass vnclass for member in xmltree findallmembersmember self lemmatoclassmember getname appendvnclass for wn in member getwn split self wordnettoclasswn appendvnclass for subclass in xmltree findallsubclassesvnsubclass self indexhelpersubclass fileid def quickindexself nb if we got rid of wordnettoclass this would run 23 times faster for fileid in self fileids vnclass fileid 4 strip the xml self classtofileidvnclass fileid self shortidtolongidself shortidvnclass vnclass with self openfileid as fp for m in self indexre finditerfp read groups m groups if groups0 is not none self lemmatoclassgroups0 appendvnclass for wn in groups1 split self wordnettoclasswn appendvnclass elif groups2 is not none self classtofileidgroups2 fileid vnclass groups2 for member elts self shortidtolongidself shortidvnclass vnclass else assert false unexpected match condition identifier conversion def longidself shortid returns shortid of a verbnet class given a long verbnet class identifier eg confess37 10 map it to a short id eg 37 10 if longid is already a short id then return it asis if self shortidre matchlongid return longid it s already a shortid m self longidre matchlongid if m return m group2 else raise valueerrorvnclass identifier r not found longid frame access utility functions def getsemanticswithinframeself vnframe semanticswithinsingleframe for pred in vnframe findallsemanticspred arguments type arg gettype value arg getvalue for arg in pred findallargsarg semanticswithinsingleframe append predicatevalue pred getvalue arguments arguments negated pred getbool return semanticswithinsingleframe def getexamplewithinframeself vnframe exampleelement vnframe findexamplesexample if exampleelement is not none exampletext exampleelement text else exampletext return exampletext def getdescriptionwithinframeself vnframe descriptionelement vnframe finddescription return primary descriptionelement attribprimary secondary descriptionelement getsecondary def getsyntacticlistwithinframeself vnframe syntaxwithinsingleframe for elt in vnframe findsyntax postag elt tag modifiers dict modifiersvalue elt getvalue if value in elt attrib else modifiersselrestrs value restr getvalue type restr gettype for restr in elt findallselrestrsselrestr modifierssynrestrs value restr getvalue type restr gettype for restr in elt findallsynrestrssynrestr syntaxwithinsingleframe append postag postag modifiers modifiers return syntaxwithinsingleframe pretty printing def pprintself vnclass if isinstancevnclass str vnclass self vnclassvnclass s vnclass getid n s self pprintsubclassesvnclass indent n s self pprintmembersvnclass indent n s thematic roles n s self pprintthemrolesvnclass indent n s frames n s self pprintframesvnclass indent return s def pprintsubclassesself vnclass indent if isinstancevnclass str vnclass self vnclassvnclass subclasses self subclassesvnclass if not subclasses subclasses none s subclasses joinsubclasses return textwrap fill s 70 initialindentindent subsequentindentindent def pprintmembersself vnclass indent if isinstancevnclass str vnclass self vnclassvnclass members self lemmasvnclass if not members members none s members joinmembers return textwrap fill s 70 initialindentindent subsequentindentindent def pprintthemrolesself vnclass indent if isinstancevnclass str vnclass self vnclassvnclass pieces for themrole in self themrolesvnclass piece indent themrole gettype modifiers modifiervalue modifiertype for modifier in themrolemodifiers if modifiers piece format joinmodifiers pieces appendpiece return n joinpieces def pprintframesself vnclass indent if isinstancevnclass str vnclass self vnclassvnclass pieces for vnframe in self framesvnclass pieces appendself pprintsingleframevnframe indent return n joinpieces def pprintsingleframeself vnframe indent framestring self pprintdescriptionwithinframevnframe indent n framestring self pprintexamplewithinframevnframe indent n framestring self pprintsyntaxwithinframevnframe indent syntax n framestring indent semantics n framestring self pprintsemanticswithinframevnframe indent return framestring def pprintexamplewithinframeself vnframe indent if vnframeexample return indent example vnframeexample def pprintdescriptionwithinframeself vnframe indent description indent vnframedescriptionprimary if vnframedescriptionsecondary description formatvnframedescriptionsecondary return description def pprintsyntaxwithinframeself vnframe indent pieces for element in vnframesyntax piece elementpostag modifierlist if value in elementmodifiers and elementmodifiersvalue modifierlist appendelementmodifiersvalue modifierlist formatrestrvalue restrtype for restr in elementmodifiersselrestrs elementmodifierssynrestrs if modifierlist piece format joinmodifierlist pieces appendpiece return indent joinpieces def pprintsemanticswithinframeself vnframe indent pieces for predicate in vnframesemantics arguments argumentvalue for argument in predicatearguments pieces append f if predicate negated else predicate predicatevalue joinarguments return n joinfindent piece for piece in pieces natural language toolkit verbnet corpus reader c 2001 2023 nltk project edward loper edloper gmail com url https www nltk org for license information see license txt an nltk interface to the verbnet verb lexicon for details about verbnet see https verbs colorado edu mpalmer projects verbnet html an nltk interface to the verbnet verb lexicon from the verbnet site verbnet vn kipper schuler 2006 is the largest on line verb lexicon currently available for english it is a hierarchical domain independent broad coverage verb lexicon with mappings to other lexical resources such as wordnet miller 1990 fellbaum 1998 xtag xtag research group 2001 and framenet baker et al 1998 for details about verbnet see https verbs colorado edu mpalmer projects verbnet html no unicode encoding param since the data files are all xml a dictionary mapping from verb lemma strings to lists of verbnet class identifiers a dictionary mapping from wordnet identifier strings to lists of verbnet class identifiers a dictionary mapping from class identifiers to corresponding file identifiers the keys of this dictionary provide a complete list of all classes and subclasses initialize the dictionaries use the quick regexp based method instead of the slow xml based method because it runs 2 30 times faster regular expression that matches and decomposes longids regular expression that matches shortids regular expression used by _index to quickly scan the corpus for basic information return a list of all verb lemmas that appear in any class or in the classid if specified xx should this include subclass members return a list of all wordnet identifiers that appear in any class or in classid if specified xx should this include subclass members return a list of the verbnet class identifiers if a file identifier is specified then return only the verbnet class identifiers for classes and subclasses defined by that file if a lemma is specified then return only verbnet class identifiers for classes that contain that lemma as a member if a wordnetid is specified then return only identifiers for classes that contain that wordnetid as a member if a classid is specified then return only identifiers for subclasses of the specified verbnet class if nothing is specified return all classids within verbnet returns verbnet class elementtree return an elementtree containing the xml for the specified verbnet class param fileid_or_classid an identifier specifying which class should be returned can be a file identifier such as put 9 1 xml or a verbnet class identifier such as put 9 1 or a short verbnet class identifier such as 9 1 file identifier just return the xml class identifier get the xml and find the right elt we saw it during _index return a list of fileids that make up this corpus if vnclass_ids is specified then return the fileids that make up the specified verbnet class es given a verbnet class this method returns verbnet frames the members returned are 1 example 2 description 3 syntax 4 semantics param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return frames a list of frame dictionaries returns subclass ids if any exist given a verbnet class this method returns subclass ids if they exist in a list of strings param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return list of subclasses returns thematic roles participating in a verbnet class members returned as part of roles are 1 type 2 modifiers param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class return themroles a list of thematic roles in the verbnet class index initialization initialize the indexes _lemma_to_class _wordnet_to_class and _class_to_fileid by scanning through the corpus fileids this is fast if elementtree uses the c implementation 0 1 secs but quite slow 10 secs if only the python implementation is available helper for _index initialize the indexes _lemma_to_class _wordnet_to_class and _class_to_fileid by scanning through the corpus fileids this doesn t do proper xml parsing but is good enough to find everything in the standard verbnet corpus and it runs about 30 times faster than xml parsing with the python elementtree only 2 3 times faster if elementtree uses the c implementation nb if we got rid of wordnet_to_class this would run 2 3 times faster strip the xml for member elts identifier conversion returns longid of a verbnet class given a short verbnet class identifier eg 37 10 map it to a long id eg confess 37 10 if shortid is already a long id then return it as is it s already a longid returns shortid of a verbnet class given a long verbnet class identifier eg confess 37 10 map it to a short id eg 37 10 if longid is already a short id then return it as is it s already a shortid frame access utility functions returns semantics within a single frame a utility function to retrieve semantics within a frame in verbnet members of the semantics dictionary 1 predicate value 2 arguments param vnframe an elementtree containing the xml contents of a verbnet frame return semantics semantics dictionary returns example within a frame a utility function to retrieve an example within a frame in verbnet param vnframe an elementtree containing the xml contents of a verbnet frame return example_text the example sentence for this particular frame returns member description within frame a utility function to retrieve a description of participating members within a frame in verbnet param vnframe an elementtree containing the xml contents of a verbnet frame return description a description dictionary with members primary and secondary returns semantics within a frame a utility function to retrieve semantics within a frame in verbnet members of the syntactic dictionary 1 pos tag 2 modifiers param vnframe an elementtree containing the xml contents of a verbnet frame return syntax_within_single_frame pretty printing returns pretty printed version of a verbnet class return a string containing a pretty printed representation of the given verbnet class param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class returns pretty printed version of subclasses of verbnet class return a string containing a pretty printed representation of the given verbnet class s subclasses param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class returns pretty printed version of members in a verbnet class return a string containing a pretty printed representation of the given verbnet class s member verbs param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class returns pretty printed version of thematic roles in a verbnet class return a string containing a pretty printed representation of the given verbnet class s thematic roles param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class returns pretty version of all frames in a verbnet class return a string containing a pretty printed representation of the list of frames within the verbnet class param vnclass a verbnet class identifier or an elementtree containing the xml contents of a verbnet class returns pretty printed version of a single frame in a verbnet class returns a string containing a pretty printed representation of the given frame param vnframe an elementtree containing the xml contents of a verbnet frame returns pretty printed version of example within frame in a verbnet class return a string containing a pretty printed representation of the given verbnet frame example param vnframe an elementtree containing the xml contents of a verbnet frame returns pretty printed version of a verbnet frame description return a string containing a pretty printed representation of the given verbnet frame description param vnframe an elementtree containing the xml contents of a verbnet frame returns pretty printed version of syntax within a frame in a verbnet class return a string containing a pretty printed representation of the given verbnet frame syntax param vnframe an elementtree containing the xml contents of a verbnet frame returns a pretty printed version of semantics within frame in a verbnet class return a string containing a pretty printed representation of the given verbnet frame semantics param vnframe an elementtree containing the xml contents of a verbnet frame
import re import textwrap from collections import defaultdict from nltk.corpus.reader.xmldocs import XMLCorpusReader class VerbnetCorpusReader(XMLCorpusReader): def __init__(self, root, fileids, wrap_etree=False): XMLCorpusReader.__init__(self, root, fileids, wrap_etree) self._lemma_to_class = defaultdict(list) self._wordnet_to_class = defaultdict(list) self._class_to_fileid = {} self._shortid_to_longid = {} self._quick_index() _LONGID_RE = re.compile(r"([^\-\.]*)-([\d+.\-]+)$") _SHORTID_RE = re.compile(r"[\d+.\-]+$") _INDEX_RE = re.compile( r'<MEMBER name="\??([^"]+)" wn="([^"]*)"[^>]+>|' r'<VNSUBCLASS ID="([^"]+)"/?>' ) def lemmas(self, vnclass=None): if vnclass is None: return sorted(self._lemma_to_class.keys()) else: if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) return [member.get("name") for member in vnclass.findall("MEMBERS/MEMBER")] def wordnetids(self, vnclass=None): if vnclass is None: return sorted(self._wordnet_to_class.keys()) else: if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) return sum( ( member.get("wn", "").split() for member in vnclass.findall("MEMBERS/MEMBER") ), [], ) def classids(self, lemma=None, wordnetid=None, fileid=None, classid=None): if fileid is not None: return [c for (c, f) in self._class_to_fileid.items() if f == fileid] elif lemma is not None: return self._lemma_to_class[lemma] elif wordnetid is not None: return self._wordnet_to_class[wordnetid] elif classid is not None: xmltree = self.vnclass(classid) return [ subclass.get("ID") for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS") ] else: return sorted(self._class_to_fileid.keys()) def vnclass(self, fileid_or_classid): if fileid_or_classid in self._fileids: return self.xml(fileid_or_classid) classid = self.longid(fileid_or_classid) if classid in self._class_to_fileid: fileid = self._class_to_fileid[self.longid(classid)] tree = self.xml(fileid) if classid == tree.get("ID"): return tree else: for subclass in tree.findall(".//VNSUBCLASS"): if classid == subclass.get("ID"): return subclass else: assert False else: raise ValueError(f"Unknown identifier {fileid_or_classid}") def fileids(self, vnclass_ids=None): if vnclass_ids is None: return self._fileids elif isinstance(vnclass_ids, str): return [self._class_to_fileid[self.longid(vnclass_ids)]] else: return [ self._class_to_fileid[self.longid(vnclass_id)] for vnclass_id in vnclass_ids ] def frames(self, vnclass): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) frames = [] vnframes = vnclass.findall("FRAMES/FRAME") for vnframe in vnframes: frames.append( { "example": self._get_example_within_frame(vnframe), "description": self._get_description_within_frame(vnframe), "syntax": self._get_syntactic_list_within_frame(vnframe), "semantics": self._get_semantics_within_frame(vnframe), } ) return frames def subclasses(self, vnclass): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) subclasses = [ subclass.get("ID") for subclass in vnclass.findall("SUBCLASSES/VNSUBCLASS") ] return subclasses def themroles(self, vnclass): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) themroles = [] for trole in vnclass.findall("THEMROLES/THEMROLE"): themroles.append( { "type": trole.get("type"), "modifiers": [ {"value": restr.get("Value"), "type": restr.get("type")} for restr in trole.findall("SELRESTRS/SELRESTR") ], } ) return themroles def _index(self): for fileid in self._fileids: self._index_helper(self.xml(fileid), fileid) def _index_helper(self, xmltree, fileid): vnclass = xmltree.get("ID") self._class_to_fileid[vnclass] = fileid self._shortid_to_longid[self.shortid(vnclass)] = vnclass for member in xmltree.findall("MEMBERS/MEMBER"): self._lemma_to_class[member.get("name")].append(vnclass) for wn in member.get("wn", "").split(): self._wordnet_to_class[wn].append(vnclass) for subclass in xmltree.findall("SUBCLASSES/VNSUBCLASS"): self._index_helper(subclass, fileid) def _quick_index(self): for fileid in self._fileids: vnclass = fileid[:-4] self._class_to_fileid[vnclass] = fileid self._shortid_to_longid[self.shortid(vnclass)] = vnclass with self.open(fileid) as fp: for m in self._INDEX_RE.finditer(fp.read()): groups = m.groups() if groups[0] is not None: self._lemma_to_class[groups[0]].append(vnclass) for wn in groups[1].split(): self._wordnet_to_class[wn].append(vnclass) elif groups[2] is not None: self._class_to_fileid[groups[2]] = fileid vnclass = groups[2] self._shortid_to_longid[self.shortid(vnclass)] = vnclass else: assert False, "unexpected match condition" def longid(self, shortid): if self._LONGID_RE.match(shortid): return shortid elif not self._SHORTID_RE.match(shortid): raise ValueError("vnclass identifier %r not found" % shortid) try: return self._shortid_to_longid[shortid] except KeyError as e: raise ValueError("vnclass identifier %r not found" % shortid) from e def shortid(self, longid): if self._SHORTID_RE.match(longid): return longid m = self._LONGID_RE.match(longid) if m: return m.group(2) else: raise ValueError("vnclass identifier %r not found" % longid) def _get_semantics_within_frame(self, vnframe): semantics_within_single_frame = [] for pred in vnframe.findall("SEMANTICS/PRED"): arguments = [ {"type": arg.get("type"), "value": arg.get("value")} for arg in pred.findall("ARGS/ARG") ] semantics_within_single_frame.append( { "predicate_value": pred.get("value"), "arguments": arguments, "negated": pred.get("bool") == "!", } ) return semantics_within_single_frame def _get_example_within_frame(self, vnframe): example_element = vnframe.find("EXAMPLES/EXAMPLE") if example_element is not None: example_text = example_element.text else: example_text = "" return example_text def _get_description_within_frame(self, vnframe): description_element = vnframe.find("DESCRIPTION") return { "primary": description_element.attrib["primary"], "secondary": description_element.get("secondary", ""), } def _get_syntactic_list_within_frame(self, vnframe): syntax_within_single_frame = [] for elt in vnframe.find("SYNTAX"): pos_tag = elt.tag modifiers = dict() modifiers["value"] = elt.get("value") if "value" in elt.attrib else "" modifiers["selrestrs"] = [ {"value": restr.get("Value"), "type": restr.get("type")} for restr in elt.findall("SELRESTRS/SELRESTR") ] modifiers["synrestrs"] = [ {"value": restr.get("Value"), "type": restr.get("type")} for restr in elt.findall("SYNRESTRS/SYNRESTR") ] syntax_within_single_frame.append( {"pos_tag": pos_tag, "modifiers": modifiers} ) return syntax_within_single_frame def pprint(self, vnclass): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) s = vnclass.get("ID") + "\n" s += self.pprint_subclasses(vnclass, indent=" ") + "\n" s += self.pprint_members(vnclass, indent=" ") + "\n" s += " Thematic roles:\n" s += self.pprint_themroles(vnclass, indent=" ") + "\n" s += " Frames:\n" s += self.pprint_frames(vnclass, indent=" ") return s def pprint_subclasses(self, vnclass, indent=""): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) subclasses = self.subclasses(vnclass) if not subclasses: subclasses = ["(none)"] s = "Subclasses: " + " ".join(subclasses) return textwrap.fill( s, 70, initial_indent=indent, subsequent_indent=indent + " " ) def pprint_members(self, vnclass, indent=""): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) members = self.lemmas(vnclass) if not members: members = ["(none)"] s = "Members: " + " ".join(members) return textwrap.fill( s, 70, initial_indent=indent, subsequent_indent=indent + " " ) def pprint_themroles(self, vnclass, indent=""): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) pieces = [] for themrole in self.themroles(vnclass): piece = indent + "* " + themrole.get("type") modifiers = [ modifier["value"] + modifier["type"] for modifier in themrole["modifiers"] ] if modifiers: piece += "[{}]".format(" ".join(modifiers)) pieces.append(piece) return "\n".join(pieces) def pprint_frames(self, vnclass, indent=""): if isinstance(vnclass, str): vnclass = self.vnclass(vnclass) pieces = [] for vnframe in self.frames(vnclass): pieces.append(self._pprint_single_frame(vnframe, indent)) return "\n".join(pieces) def _pprint_single_frame(self, vnframe, indent=""): frame_string = self._pprint_description_within_frame(vnframe, indent) + "\n" frame_string += self._pprint_example_within_frame(vnframe, indent + " ") + "\n" frame_string += ( self._pprint_syntax_within_frame(vnframe, indent + " Syntax: ") + "\n" ) frame_string += indent + " Semantics:\n" frame_string += self._pprint_semantics_within_frame(vnframe, indent + " ") return frame_string def _pprint_example_within_frame(self, vnframe, indent=""): if vnframe["example"]: return indent + " Example: " + vnframe["example"] def _pprint_description_within_frame(self, vnframe, indent=""): description = indent + vnframe["description"]["primary"] if vnframe["description"]["secondary"]: description += " ({})".format(vnframe["description"]["secondary"]) return description def _pprint_syntax_within_frame(self, vnframe, indent=""): pieces = [] for element in vnframe["syntax"]: piece = element["pos_tag"] modifier_list = [] if "value" in element["modifiers"] and element["modifiers"]["value"]: modifier_list.append(element["modifiers"]["value"]) modifier_list += [ "{}{}".format(restr["value"], restr["type"]) for restr in ( element["modifiers"]["selrestrs"] + element["modifiers"]["synrestrs"] ) ] if modifier_list: piece += "[{}]".format(" ".join(modifier_list)) pieces.append(piece) return indent + " ".join(pieces) def _pprint_semantics_within_frame(self, vnframe, indent=""): pieces = [] for predicate in vnframe["semantics"]: arguments = [argument["value"] for argument in predicate["arguments"]] pieces.append( f"{'¬' if predicate['negated'] else ''}{predicate['predicate_value']}({', '.join(arguments)})" ) return "\n".join(f"{indent}* {piece}" for piece in pieces)
natural language toolkit xml corpus reader c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt corpus reader for corpora whose documents are xml files note not named xml to avoid conflicting w standard xml package corpus reader for corpora whose documents are xml files note that the xmlcorpusreader constructor does not take an encoding argument because the unicode encoding is specified by the xml files themselves see the xml specs for more info make sure we have exactly one file no concatenating xml read the xml in using elementtree if requested wrap it return the elementtree element returns all of the words and punctuation symbols in the specified file that were in text nodes ie tags are ignored like the xml method fileid can only specify one file return the given file s text nodes as a list of words and punctuation symbols rtype liststr a corpus view that selects out specified elements from an xml file and provides a flat listlike interface for accessing them note xmlcorpusview is not used by xmlcorpusreader itself but may be used by subclasses of xmlcorpusreader every xml corpus view has a tag specification indicating what xml elements should be included in the view and each nonnested element that matches this specification corresponds to one item in the view tag specifications are regular expressions over tag paths where a tag path is a list of element tag names separated by indicating the ancestry of the element some examples foo a toplevel element whose tag is foo foobar an element whose tag is bar and whose parent is a toplevel element whose tag is foo foo an element whose tag is foo appearing anywhere in the xml tree foobar an wlement whose tag is foo or bar appearing anywhere in the xml tree the view items are generated from the selected xml elements via the method handleelt by default this method returns the element asis i e as an elementtree object but it can be overridden either via subclassing or via the elthandler constructor parameter if true then display debugging output to stdout when reading blocks the number of characters read at a time by this corpus reader create a new corpus view based on a specified xml file note that the xmlcorpusview constructor does not take an encoding argument because the unicode encoding is specified by the xml files themselves type tagspec str param tagspec a tag specification indicating what xml elements should be included in the view each nonnested element that matches this specification corresponds to one item in the view param elthandler a function used to transform each element to a value for the view if no handler is specified then self handleelt is called which returns the element as an elementtree object the signature of elthandler is elthandlerelt tagspec value the tag specification for this corpus view self tagcontext 0 no encoding found what should the default be convert an element into an appropriate value for inclusion in the view unless overridden by a subclass or by the elthandler constructor argument this method simply returns elt return the view value corresponding to elt type elt elementtree param elt the element that should be converted type context str param context a string composed of element tags separated by forward slashes indicating the xml context of the given element for example the string foobarbaz indicates that the element is a baz element whose parent is a bar element and whose grandparent is a toplevel foo element a regular expression that matches xml fragments that do not contain any unclosed tags a regular expression used to extract the tag name from a start tag end tag or emptyelt tag string a regular expression used to find all starttags endtags and emptyelt tags in an xml file this regexp is more lenient than the xml spec e g it allows spaces in some places where the spec does not include these so we can skip them these are the ones we actually care about read a string from the given stream that does not contain any unclosed tags in particular this function first reads a block from the stream of size self blocksize it then checks if that block contains an unclosed tag if it does then this function either backtracks to the last or reads another block read a block and add it to the fragment do we have a wellformed xml fragment do we have a fragment that will never be wellformed end of file if not then we must be in the middle of a tag if appropriate backtrack to the most recent character otherwise read another block i e return to the top of the loop read from stream until we find at least one element that matches tagspec and return the result of applying elthandler to each element found use a stack of strings to keep track of our context end of file process each tag in the xml fragment keep context uptodate is this one of the elts we re looking for sanity checks is this the end of an element keep context uptodate if we haven t found any elements yet then keep looping until we do if we ve found at least one element then try backtracking to the start of the element that we re inside of take back the last starttag and return what we ve gotten so far elts is nonempty update the tagcontext dict natural language toolkit xml corpus reader c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt corpus reader for corpora whose documents are xml files note not named xml to avoid conflicting w standard xml package corpus reader for corpora whose documents are xml files note that the xmlcorpusreader constructor does not take an encoding argument because the unicode encoding is specified by the xml files themselves see the xml specs for more info make sure we have exactly one file no concatenating xml read the xml in using elementtree if requested wrap it return the elementtree element returns all of the words and punctuation symbols in the specified file that were in text nodes ie tags are ignored like the xml method fileid can only specify one file return the given file s text nodes as a list of words and punctuation symbols rtype list str a corpus view that selects out specified elements from an xml file and provides a flat list like interface for accessing them note xmlcorpusview is not used by xmlcorpusreader itself but may be used by subclasses of xmlcorpusreader every xml corpus view has a tag specification indicating what xml elements should be included in the view and each non nested element that matches this specification corresponds to one item in the view tag specifications are regular expressions over tag paths where a tag path is a list of element tag names separated by indicating the ancestry of the element some examples foo a top level element whose tag is foo foo bar an element whose tag is bar and whose parent is a top level element whose tag is foo foo an element whose tag is foo appearing anywhere in the xml tree foo bar an wlement whose tag is foo or bar appearing anywhere in the xml tree the view items are generated from the selected xml elements via the method handle_elt by default this method returns the element as is i e as an elementtree object but it can be overridden either via subclassing or via the elt_handler constructor parameter if true then display debugging output to stdout when reading blocks the number of characters read at a time by this corpus reader create a new corpus view based on a specified xml file note that the xmlcorpusview constructor does not take an encoding argument because the unicode encoding is specified by the xml files themselves type tagspec str param tagspec a tag specification indicating what xml elements should be included in the view each non nested element that matches this specification corresponds to one item in the view param elt_handler a function used to transform each element to a value for the view if no handler is specified then self handle_elt is called which returns the element as an elementtree object the signature of elt_handler is elt_handler elt tagspec value the tag specification for this corpus view a dictionary mapping from file positions as returned by stream seek to xml contexts an xml context is a tuple of xml tag names indicating which tags have not yet been closed no encoding found what should the default be convert an element into an appropriate value for inclusion in the view unless overridden by a subclass or by the elt_handler constructor argument this method simply returns elt return the view value corresponding to elt type elt elementtree param elt the element that should be converted type context str param context a string composed of element tags separated by forward slashes indicating the xml context of the given element for example the string foo bar baz indicates that the element is a baz element whose parent is a bar element and whose grandparent is a top level foo element a regular expression that matches xml fragments that do not contain any un closed tags comment cdata raw character data doctype s s doctype decl tag or pi z a regular expression used to extract the tag name from a start tag end tag or empty elt tag string a regular expression used to find all start tags end tags and empty elt tags in an xml file this regexp is more lenient than the xml spec e g it allows spaces in some places where the spec does not include these so we can skip them p comment p cdata cdata p pi p doctype doctype s s these are the ones we actually care about p empty_elt_tag s s s p start_tag s s p end_tag s s read a string from the given stream that does not contain any un closed tags in particular this function first reads a block from the stream of size self _block_size it then checks if that block contains an un closed tag if it does then this function either backtracks to the last or reads another block read a block and add it to the fragment do we have a well formed xml fragment do we have a fragment that will never be well formed end of file if not then we must be in the middle of a tag if appropriate backtrack to the most recent character otherwise read another block i e return to the top of the loop read from stream until we find at least one element that matches tagspec and return the result of applying elt_handler to each element found use a stack of strings to keep track of our context check this could it ever happen where does the elt start what context depth end of file process each tag in the xml fragment keep context up to date is this one of the elts we re looking for sanity checks is this the end of an element keep context up to date if we haven t found any elements yet then keep looping until we do if we ve found at least one element then try backtracking to the start of the element that we re inside of take back the last start tag and return what we ve gotten so far elts is non empty update the _tag_context dict
import codecs from xml.etree import ElementTree from nltk.corpus.reader.api import CorpusReader from nltk.corpus.reader.util import * from nltk.data import SeekableUnicodeStreamReader from nltk.internals import ElementWrapper from nltk.tokenize import WordPunctTokenizer class XMLCorpusReader(CorpusReader): def __init__(self, root, fileids, wrap_etree=False): self._wrap_etree = wrap_etree CorpusReader.__init__(self, root, fileids) def xml(self, fileid=None): if fileid is None and len(self._fileids) == 1: fileid = self._fileids[0] if not isinstance(fileid, str): raise TypeError("Expected a single file identifier string") with self.abspath(fileid).open() as fp: elt = ElementTree.parse(fp).getroot() if self._wrap_etree: elt = ElementWrapper(elt) return elt def words(self, fileid=None): elt = self.xml(fileid) encoding = self.encoding(fileid) word_tokenizer = WordPunctTokenizer() try: iterator = elt.getiterator() except: iterator = elt.iter() out = [] for node in iterator: text = node.text if text is not None: if isinstance(text, bytes): text = text.decode(encoding) toks = word_tokenizer.tokenize(text) out.extend(toks) return out class XMLCorpusView(StreamBackedCorpusView): _DEBUG = False _BLOCK_SIZE = 1024 def __init__(self, fileid, tagspec, elt_handler=None): if elt_handler: self.handle_elt = elt_handler self._tagspec = re.compile(tagspec + r"\Z") self._tag_context = {0: ()} encoding = self._detect_encoding(fileid) StreamBackedCorpusView.__init__(self, fileid, encoding=encoding) def _detect_encoding(self, fileid): if isinstance(fileid, PathPointer): try: infile = fileid.open() s = infile.readline() finally: infile.close() else: with open(fileid, "rb") as infile: s = infile.readline() if s.startswith(codecs.BOM_UTF16_BE): return "utf-16-be" if s.startswith(codecs.BOM_UTF16_LE): return "utf-16-le" if s.startswith(codecs.BOM_UTF32_BE): return "utf-32-be" if s.startswith(codecs.BOM_UTF32_LE): return "utf-32-le" if s.startswith(codecs.BOM_UTF8): return "utf-8" m = re.match(rb'\s*<\?xml\b.*\bencoding="([^"]+)"', s) if m: return m.group(1).decode() m = re.match(rb"\s*<\?xml\b.*\bencoding='([^']+)'", s) if m: return m.group(1).decode() return "utf-8" def handle_elt(self, elt, context): return elt _VALID_XML_RE = re.compile( r, re.DOTALL | re.VERBOSE, ) _XML_TAG_NAME = re.compile(r"<\s*(?:/\s*)?([^\s>]+)") _XML_PIECE = re.compile( r, re.DOTALL | re.VERBOSE, ) def _read_xml_fragment(self, stream): fragment = "" if isinstance(stream, SeekableUnicodeStreamReader): startpos = stream.tell() while True: xml_block = stream.read(self._BLOCK_SIZE) fragment += xml_block if self._VALID_XML_RE.match(fragment): return fragment if re.search("[<>]", fragment).group(0) == ">": pos = stream.tell() - ( len(fragment) - re.search("[<>]", fragment).end() ) raise ValueError('Unexpected ">" near char %s' % pos) if not xml_block: raise ValueError("Unexpected end of file: tag not closed") last_open_bracket = fragment.rfind("<") if last_open_bracket > 0: if self._VALID_XML_RE.match(fragment[:last_open_bracket]): if isinstance(stream, SeekableUnicodeStreamReader): stream.seek(startpos) stream.char_seek_forward(last_open_bracket) else: stream.seek(-(len(fragment) - last_open_bracket), 1) return fragment[:last_open_bracket] def read_block(self, stream, tagspec=None, elt_handler=None): if tagspec is None: tagspec = self._tagspec if elt_handler is None: elt_handler = self.handle_elt context = list(self._tag_context.get(stream.tell())) assert context is not None elts = [] elt_start = None elt_depth = None elt_text = "" while elts == [] or elt_start is not None: if isinstance(stream, SeekableUnicodeStreamReader): startpos = stream.tell() xml_fragment = self._read_xml_fragment(stream) if not xml_fragment: if elt_start is None: break else: raise ValueError("Unexpected end of file") for piece in self._XML_PIECE.finditer(xml_fragment): if self._DEBUG: print("{:>25} {}".format("/".join(context)[-20:], piece.group())) if piece.group("START_TAG"): name = self._XML_TAG_NAME.match(piece.group()).group(1) context.append(name) if elt_start is None: if re.match(tagspec, "/".join(context)): elt_start = piece.start() elt_depth = len(context) elif piece.group("END_TAG"): name = self._XML_TAG_NAME.match(piece.group()).group(1) if not context: raise ValueError("Unmatched tag </%s>" % name) if name != context[-1]: raise ValueError(f"Unmatched tag <{context[-1]}>...</{name}>") if elt_start is not None and elt_depth == len(context): elt_text += xml_fragment[elt_start : piece.end()] elts.append((elt_text, "/".join(context))) elt_start = elt_depth = None elt_text = "" context.pop() elif piece.group("EMPTY_ELT_TAG"): name = self._XML_TAG_NAME.match(piece.group()).group(1) if elt_start is None: if re.match(tagspec, "/".join(context) + "/" + name): elts.append((piece.group(), "/".join(context) + "/" + name)) if elt_start is not None: if elts == []: elt_text += xml_fragment[elt_start:] elt_start = 0 else: if self._DEBUG: print(" " * 36 + "(backtrack)") if isinstance(stream, SeekableUnicodeStreamReader): stream.seek(startpos) stream.char_seek_forward(elt_start) else: stream.seek(-(len(xml_fragment) - elt_start), 1) context = context[: elt_depth - 1] elt_start = elt_depth = None elt_text = "" pos = stream.tell() if pos in self._tag_context: assert tuple(context) == self._tag_context[pos] else: self._tag_context[pos] = tuple(context) return [ elt_handler( ElementTree.fromstring(elt.encode("ascii", "xmlcharrefreplace")), context, ) for (elt, context) in elts ]
natural language toolkit yorktorontohelsinki parsed corpus of old english prose ycoe c 20012015 nltk project selina dennis selinatranzfusion net url https www nltk org for license information see license txt corpus reader for the yorktorontohelsinki parsed corpus of old english prose ycoe a 1 5 million word syntacticallyannotated corpus of old english prose texts the corpus is distributed by the oxford text archive http www ota ahds ac uk it is not included with nltk the ycoe corpus is divided into 100 files each representing an old english prose text tags used within each text complies to the ycoe standard https wwwusers york ac uklang22ycoeycoehome htm corpus reader for the yorktorontohelsinki parsed corpus of old english prose ycoe a 1 5 million word syntacticallyannotated corpus of old english prose texts make sure we have a consistent set of items return a list of document identifiers for all documents in this corpus or for the documents with the given files if specified strip off the pos and psd extensions return a list of file identifiers for the files that make up this corpus or that store the given documents if specified helper that selects the appropriate fileids for a given set of documents from a given subcorpus pos or psd delegate to one of our two subreaders specialized version of the standard bracket parse corpus reader that strips out code and id nodes def parseself t t re subr ucodeid t if re matchrsss t return none return bracketparsecorpusreader parseself t class ycoetaggedcorpusreadertaggedcorpusreader def initself root items encodingutf8 gapsre r u ssscodesssids senttokenizer regexptokenizergapsre gapstrue taggedcorpusreader init self root items sep senttokenizersenttokenizer a list of all documents and their titles in ycoe documents coadrian o34 adrian and ritheus coaelhom o3 lfric supplemental homilies coaelive o3 lfric s lives of saints coalcuin alcuin de virtutibus et vitiis coalex o23 alexander s letter to aristotle coapollo o3 apollonius of tyre coaugust augustine cobede o2 bede s history of the english church cobenrul o3 benedictine rule coblick o23 blickling homilies coboeth o2 boethius consolation of philosophy cobyrhtf o3 byrhtferth s manual cocanedgd canons of edgar d cocanedgx canons of edgar x cocathom1 o3 lfric s catholic homilies i cocathom2 o3 lfric s catholic homilies ii cochad o24 saint chad cochdrul chrodegang of metz rule cochristoph saint christopher cochrona o23 anglosaxon chronicle a cochronc anglosaxon chronicle c cochrond anglosaxon chronicle d cochrone o34 anglosaxon chronicle e cocura o2 cura pastoralis cocurac cura pastoralis cotton codicts o34 dicts of cato codocu1 o1 documents 1 o1 codocu2 o12 documents 2 o1o2 codocu2 o2 documents 2 o2 codocu3 o23 documents 3 o2o3 codocu3 o3 documents 3 o3 codocu4 o24 documents 4 o2o4 coeluc1 honorius of autun elucidarium 1 coeluc2 honorius of autun elucidarium 1 coepigen o3 lfric s epilogue to genesis coeuphr saint euphrosyne coeust saint eustace and his companions coexodusp exodus p cogenesic genesis c cogregdc o24 gregory s dialogues c cogregdh o23 gregory s dialogues h coherbar pseudoapuleius herbarium coinspold o34 wulfstan s institute of polity d coinspolx wulfstan s institute of polity x cojames saint james colacnu o23 lacnunga colaece o2 leechdoms colaw1cn o3 laws cnut i colaw2cn o3 laws cnut ii colaw5atr o3 laws thelred v colaw6atr o3 laws thelred vi colawaf o2 laws alfred colawafint o2 alfred s introduction to laws colawger o34 laws gerefa colawine ox2 laws ine colawnorthu o3 northumbra preosta lagu colawwllad o4 laws william i lad coleofri o4 leofric colsigef o3 lfric s letter to sigefyrth colsigewb lfric s letter to sigeweard b colsigewz o34 lfric s letter to sigeweard z colwgeat lfric s letter to wulfgeat colwsiget lfric s letter to wulfsige t colwsigexa o34 lfric s letter to wulfsige xa colwstan1 o3 lfric s letter to wulfstan i colwstan2 o3 lfric s letter to wulfstan ii comargac o34 saint margaret c comargat saint margaret t comart1 martyrology i comart2 martyrology ii comart3 o23 martyrology iii comarvel o23 marvels of the east comary mary of egypt coneot saint neot conicoda gospel of nicodemus a conicodc gospel of nicodemus c conicodd gospel of nicodemus d conicode gospel of nicodemus e coorosiu o2 orosius cootest o3 heptateuch coprefcath1 o3 lfric s preface to catholic homilies i coprefcath2 o3 lfric s preface to catholic homilies ii coprefcura o2 preface to the cura pastoralis coprefgen o3 lfric s preface to genesis copreflives o3 lfric s preface to lives of saints coprefsolilo preface to augustine s soliloquies coquadru o23 pseudoapuleius medicina de quadrupedibus corood history of the holy roodtree cosevensl seven sleepers cosolilo st augustine s soliloquies cosolsat1 o4 solomon and saturn i cosolsat2 solomon and saturn ii cotempo o3 lfric s de temporibus anni coverhom vercelli homilies coverhome vercelli homilies e coverhoml vercelli homilies l covinceb saint vincent bodley 343 covinsal vindicta salvatoris cowsgosp o3 westsaxon gospels cowulf o34 wulfstan s homilies natural language toolkit york toronto helsinki parsed corpus of old english prose ycoe c 2001 2015 nltk project selina dennis selina tranzfusion net url https www nltk org for license information see license txt corpus reader for the york toronto helsinki parsed corpus of old english prose ycoe a 1 5 million word syntactically annotated corpus of old english prose texts the corpus is distributed by the oxford text archive http www ota ahds ac uk it is not included with nltk the ycoe corpus is divided into 100 files each representing an old english prose text tags used within each text complies to the ycoe standard https www users york ac uk lang22 ycoe ycoehome htm corpus reader for the york toronto helsinki parsed corpus of old english prose ycoe a 1 5 million word syntactically annotated corpus of old english prose texts make sure we have a consistent set of items return a list of document identifiers for all documents in this corpus or for the documents with the given file s if specified strip off the pos and psd extensions return a list of file identifiers for the files that make up this corpus or that store the given document s if specified helper that selects the appropriate fileids for a given set of documents from a given subcorpus pos or psd delegate to one of our two sub readers specialized version of the standard bracket parse corpus reader that strips out code and id nodes a list of all documents and their titles in ycoe
import os import re from nltk.corpus.reader.api import * from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader from nltk.corpus.reader.tagged import TaggedCorpusReader from nltk.corpus.reader.util import * from nltk.tokenize import RegexpTokenizer class YCOECorpusReader(CorpusReader): def __init__(self, root, encoding="utf8"): CorpusReader.__init__(self, root, [], encoding) self._psd_reader = YCOEParseCorpusReader( self.root.join("psd"), ".*", ".psd", encoding=encoding ) self._pos_reader = YCOETaggedCorpusReader(self.root.join("pos"), ".*", ".pos") documents = {f[:-4] for f in self._psd_reader.fileids()} if {f[:-4] for f in self._pos_reader.fileids()} != documents: raise ValueError('Items in "psd" and "pos" ' "subdirectories do not match.") fileids = sorted( ["%s.psd" % doc for doc in documents] + ["%s.pos" % doc for doc in documents] ) CorpusReader.__init__(self, root, fileids, encoding) self._documents = sorted(documents) def documents(self, fileids=None): if fileids is None: return self._documents if isinstance(fileids, str): fileids = [fileids] for f in fileids: if f not in self._fileids: raise KeyError("File id %s not found" % fileids) return sorted({f[:-4] for f in fileids}) def fileids(self, documents=None): if documents is None: return self._fileids elif isinstance(documents, str): documents = [documents] return sorted( set( ["%s.pos" % doc for doc in documents] + ["%s.psd" % doc for doc in documents] ) ) def _getfileids(self, documents, subcorpus): if documents is None: documents = self._documents else: if isinstance(documents, str): documents = [documents] for document in documents: if document not in self._documents: if document[-4:] in (".pos", ".psd"): raise ValueError( "Expected a document identifier, not a file " "identifier. (Use corpus.documents() to get " "a list of document identifiers." ) else: raise ValueError("Document identifier %s not found" % document) return [f"{d}.{subcorpus}" for d in documents] def words(self, documents=None): return self._pos_reader.words(self._getfileids(documents, "pos")) def sents(self, documents=None): return self._pos_reader.sents(self._getfileids(documents, "pos")) def paras(self, documents=None): return self._pos_reader.paras(self._getfileids(documents, "pos")) def tagged_words(self, documents=None): return self._pos_reader.tagged_words(self._getfileids(documents, "pos")) def tagged_sents(self, documents=None): return self._pos_reader.tagged_sents(self._getfileids(documents, "pos")) def tagged_paras(self, documents=None): return self._pos_reader.tagged_paras(self._getfileids(documents, "pos")) def parsed_sents(self, documents=None): return self._psd_reader.parsed_sents(self._getfileids(documents, "psd")) class YCOEParseCorpusReader(BracketParseCorpusReader): def _parse(self, t): t = re.sub(r"(?u)\((CODE|ID)[^\)]*\)", "", t) if re.match(r"\s*\(\s*\)\s*$", t): return None return BracketParseCorpusReader._parse(self, t) class YCOETaggedCorpusReader(TaggedCorpusReader): def __init__(self, root, items, encoding="utf8"): gaps_re = r"(?u)(?<=/\.)\s+|\s*\S*_CODE\s*|\s*\S*_ID\s*" sent_tokenizer = RegexpTokenizer(gaps_re, gaps=True) TaggedCorpusReader.__init__( self, root, items, sep="_", sent_tokenizer=sent_tokenizer ) documents = { "coadrian.o34": "Adrian and Ritheus", "coaelhom.o3": "Ælfric, Supplemental Homilies", "coaelive.o3": "Ælfric's Lives of Saints", "coalcuin": "Alcuin De virtutibus et vitiis", "coalex.o23": "Alexander's Letter to Aristotle", "coapollo.o3": "Apollonius of Tyre", "coaugust": "Augustine", "cobede.o2": "Bede's History of the English Church", "cobenrul.o3": "Benedictine Rule", "coblick.o23": "Blickling Homilies", "coboeth.o2": "Boethius' Consolation of Philosophy", "cobyrhtf.o3": "Byrhtferth's Manual", "cocanedgD": "Canons of Edgar (D)", "cocanedgX": "Canons of Edgar (X)", "cocathom1.o3": "Ælfric's Catholic Homilies I", "cocathom2.o3": "Ælfric's Catholic Homilies II", "cochad.o24": "Saint Chad", "cochdrul": "Chrodegang of Metz, Rule", "cochristoph": "Saint Christopher", "cochronA.o23": "Anglo-Saxon Chronicle A", "cochronC": "Anglo-Saxon Chronicle C", "cochronD": "Anglo-Saxon Chronicle D", "cochronE.o34": "Anglo-Saxon Chronicle E", "cocura.o2": "Cura Pastoralis", "cocuraC": "Cura Pastoralis (Cotton)", "codicts.o34": "Dicts of Cato", "codocu1.o1": "Documents 1 (O1)", "codocu2.o12": "Documents 2 (O1/O2)", "codocu2.o2": "Documents 2 (O2)", "codocu3.o23": "Documents 3 (O2/O3)", "codocu3.o3": "Documents 3 (O3)", "codocu4.o24": "Documents 4 (O2/O4)", "coeluc1": "Honorius of Autun, Elucidarium 1", "coeluc2": "Honorius of Autun, Elucidarium 1", "coepigen.o3": "Ælfric's Epilogue to Genesis", "coeuphr": "Saint Euphrosyne", "coeust": "Saint Eustace and his companions", "coexodusP": "Exodus (P)", "cogenesiC": "Genesis (C)", "cogregdC.o24": "Gregory's Dialogues (C)", "cogregdH.o23": "Gregory's Dialogues (H)", "coherbar": "Pseudo-Apuleius, Herbarium", "coinspolD.o34": "Wulfstan's Institute of Polity (D)", "coinspolX": "Wulfstan's Institute of Polity (X)", "cojames": "Saint James", "colacnu.o23": "Lacnunga", "colaece.o2": "Leechdoms", "colaw1cn.o3": "Laws, Cnut I", "colaw2cn.o3": "Laws, Cnut II", "colaw5atr.o3": "Laws, Æthelred V", "colaw6atr.o3": "Laws, Æthelred VI", "colawaf.o2": "Laws, Alfred", "colawafint.o2": "Alfred's Introduction to Laws", "colawger.o34": "Laws, Gerefa", "colawine.ox2": "Laws, Ine", "colawnorthu.o3": "Northumbra Preosta Lagu", "colawwllad.o4": "Laws, William I, Lad", "coleofri.o4": "Leofric", "colsigef.o3": "Ælfric's Letter to Sigefyrth", "colsigewB": "Ælfric's Letter to Sigeweard (B)", "colsigewZ.o34": "Ælfric's Letter to Sigeweard (Z)", "colwgeat": "Ælfric's Letter to Wulfgeat", "colwsigeT": "Ælfric's Letter to Wulfsige (T)", "colwsigeXa.o34": "Ælfric's Letter to Wulfsige (Xa)", "colwstan1.o3": "Ælfric's Letter to Wulfstan I", "colwstan2.o3": "Ælfric's Letter to Wulfstan II", "comargaC.o34": "Saint Margaret (C)", "comargaT": "Saint Margaret (T)", "comart1": "Martyrology, I", "comart2": "Martyrology, II", "comart3.o23": "Martyrology, III", "comarvel.o23": "Marvels of the East", "comary": "Mary of Egypt", "coneot": "Saint Neot", "conicodA": "Gospel of Nicodemus (A)", "conicodC": "Gospel of Nicodemus (C)", "conicodD": "Gospel of Nicodemus (D)", "conicodE": "Gospel of Nicodemus (E)", "coorosiu.o2": "Orosius", "cootest.o3": "Heptateuch", "coprefcath1.o3": "Ælfric's Preface to Catholic Homilies I", "coprefcath2.o3": "Ælfric's Preface to Catholic Homilies II", "coprefcura.o2": "Preface to the Cura Pastoralis", "coprefgen.o3": "Ælfric's Preface to Genesis", "copreflives.o3": "Ælfric's Preface to Lives of Saints", "coprefsolilo": "Preface to Augustine's Soliloquies", "coquadru.o23": "Pseudo-Apuleius, Medicina de quadrupedibus", "corood": "History of the Holy Rood-Tree", "cosevensl": "Seven Sleepers", "cosolilo": "St. Augustine's Soliloquies", "cosolsat1.o4": "Solomon and Saturn I", "cosolsat2": "Solomon and Saturn II", "cotempo.o3": "Ælfric's De Temporibus Anni", "coverhom": "Vercelli Homilies", "coverhomE": "Vercelli Homilies (E)", "coverhomL": "Vercelli Homilies (L)", "covinceB": "Saint Vincent (Bodley 343)", "covinsal": "Vindicta Salvatoris", "cowsgosp.o3": "West-Saxon Gospels", "cowulf.o34": "Wulfstan's Homilies", }
natural language toolkit corpus reader utility functions c 20012023 nltk project edward loper edlopergmail com url https www nltk org for license information see license txt lazy corpus loader to see the api documentation for this lazily loaded corpus first run corpus ensureloaded and then run helpthiscorpus lazycorpusloader is a proxy object which is used to stand in for a corpus object before the corpus is loaded this allows nltk to create an object for each corpus but defer the costs associated with loading those corpora until the first time that they re actually accessed the first time this object is accessed in any way it will load the corresponding corpus and transform itself into that corpus by modifying its own class and dict attributes if the corpus can not be found then accessing this object will raise an exception displaying installation instructions for the nltk data package once they ve properly installed the data package or modified nltk data path to point to its location they can then use the corpus object without restarting python param name the name of the corpus type name str param readercls the specific corpusreader class e g plaintextcorpusreader wordlistcorpusreader type reader nltk corpus reader api corpusreader param nltkdatasubdir the subdirectory where the corpus is stored type nltkdatasubdir str param args any other nonkeywords arguments that readercls might need param kwargs any other keywords arguments that readercls might need if nltkdatasubdir is set explicitly use the specified subdirectory path pops the nltkdatasubdir argument we don t need it anymore find the corpus root directory load the corpus this is where the magic happens transform ourselves into the corpus by modifying our own dict and class to match that of the corpus unload support assign dict and class back then do gc after reassigning dict there shouldn t be any references to corpus data so the memory should be deallocated after gc collect fix for inspect isclass under python 2 6 see https bugs python orgissue1225107 without this fix tests may take extra 1 5gb ram because all corpora gets loaded during test collection this looks circular but its not since load changes our class to something new if an exception occurs during corpus loading then unload method may be unattached so getattr can be called we shouldn t trigger corpus loading again in this case magic for creating bound methods used for unload natural language toolkit corpus reader utility functions c 2001 2023 nltk project edward loper edloper gmail com url https www nltk org for license information see license txt lazy corpus loader to see the api documentation for this lazily loaded corpus first run corpus ensure_loaded and then run help this_corpus lazycorpusloader is a proxy object which is used to stand in for a corpus object before the corpus is loaded this allows nltk to create an object for each corpus but defer the costs associated with loading those corpora until the first time that they re actually accessed the first time this object is accessed in any way it will load the corresponding corpus and transform itself into that corpus by modifying its own __class__ and __dict__ attributes if the corpus can not be found then accessing this object will raise an exception displaying installation instructions for the nltk data package once they ve properly installed the data package or modified nltk data path to point to its location they can then use the corpus object without restarting python param name the name of the corpus type name str param reader_cls the specific corpusreader class e g plaintextcorpusreader wordlistcorpusreader type reader nltk corpus reader api corpusreader param nltk_data_subdir the subdirectory where the corpus is stored type nltk_data_subdir str param args any other non keywords arguments that reader_cls might need param kwargs any other keywords arguments that reader_cls might need if nltk_data_subdir is set explicitly use the specified subdirectory path pops the nltk_data_subdir argument we don t need it anymore otherwise use nltk_data corpora find the corpus root directory load the corpus this is where the magic happens transform ourselves into the corpus by modifying our own __dict__ and __class__ to match that of the corpus _unload support assign __dict__ and __class__ back then do gc after reassigning __dict__ there shouldn t be any references to corpus data so the memory should be deallocated after gc collect fix for inspect isclass under python 2 6 see https bugs python org issue1225107 without this fix tests may take extra 1 5gb ram because all corpora gets loaded during test collection this looks circular but its not since __load changes our __class__ to something new if an exception occurs during corpus loading then _unload method may be unattached so __getattr__ can be called we shouldn t trigger corpus loading again in this case magic for creating bound methods used for _unload python3
import gc import re import nltk TRY_ZIPFILE_FIRST = False class LazyCorpusLoader: def __init__(self, name, reader_cls, *args, **kwargs): from nltk.corpus.reader.api import CorpusReader assert issubclass(reader_cls, CorpusReader) self.__name = self.__name__ = name self.__reader_cls = reader_cls if "nltk_data_subdir" in kwargs: self.subdir = kwargs["nltk_data_subdir"] kwargs.pop("nltk_data_subdir", None) else: self.subdir = "corpora" self.__args = args self.__kwargs = kwargs def __load(self): zip_name = re.sub(r"(([^/]+)(/.*)?)", r"\2.zip/\1/", self.__name) if TRY_ZIPFILE_FIRST: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError: raise e else: try: root = nltk.data.find(f"{self.subdir}/{self.__name}") except LookupError as e: try: root = nltk.data.find(f"{self.subdir}/{zip_name}") except LookupError: raise e corpus = self.__reader_cls(root, *self.__args, **self.__kwargs) args, kwargs = self.__args, self.__kwargs name, reader_cls = self.__name, self.__reader_cls self.__dict__ = corpus.__dict__ self.__class__ = corpus.__class__ def _unload(self): lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs) self.__dict__ = lazy_reader.__dict__ self.__class__ = lazy_reader.__class__ gc.collect() self._unload = _make_bound_method(_unload, self) def __getattr__(self, attr): if attr == "__bases__": raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'") self.__load() return getattr(self, attr) def __repr__(self): return "<{} in {!r} (not loaded yet)>".format( self.__reader_cls.__name__, ".../corpora/" + self.__name, ) def _unload(self): pass def _make_bound_method(func, self): class Foo: def meth(self): pass f = Foo() bound_method = type(f.meth) try: return bound_method(func, self, self.__class__) except TypeError: return bound_method(func, self)
decorator module by michele simionato michelesimionatolibero it michele simionato distributed under the terms of the bsd license see below http www phyast pitt edumichelespythondocumentation html included in nltk for its support of a nice memoization decorator the basic trick is to generate the source code for the decorated function with the right signature and to evaluate it uncomment the statement print sys stderr funcsrc in decorator to understand what is going on hack to keep nltk s tokenize module from colliding with the tokenize in the python standard library for retrocompatibility reasons we don t use a standard signature instead we use the string generated by this method basically from a signature we create a string and remove the default values returns an info dictionary containing name the name of the function str argnames the names of the arguments list defaults the values of the default arguments tuple signature the signature str fullsignature the full signature signature doc the docstring str module the module name str dict the function dict str def fself x1 y2 args kw pass info getinfof infoname f infoargnames self x y args kw infodefaults 1 2 infosignature self x y args kw infofullsignature signature self x1 y2 args kw convert signature to str pypy compatibility an improvement over functools updatewrapper the wrapper is a generic callable object it works by generating a copy of the wrapper with the right signature and by updating the copy not the original moreovoer model can be a dictionary with keys name doc module dict defaults helper used in decoratorfactory take a class with a caller method and return a callable decorator object it works by adding a suitable call method to the class it raises a typeerror if the class already has a nontrivial call method general purpose decorator factory takes a caller function as input and returns a decorator with the same attributes a caller function is any function like this def callerfunc args kw do something return funcargs kw here is an example of usage decorator def chattyf args kw printcalling r f name return fargs kw chatty name chatty chatty def f pass f calling f decorator can also take in input a class with a caller method in this case it converts the class into a factory of callable decorator objects see the documentation for an example import sys print sys stderr src for debugging purposes memoizedic is created at the first call legalese redistributions of source code must retain the above notice this list of conditions and the following disclaimer redistributions in bytecode form must reproduce the above notice this list of conditions and the following disclaimer in the documentation andor other materials provided with the distribution this software is provided by the holders and contributors as is and any express or implied warranties including but not limited to the implied warranties of merchantability and fitness for a particular purpose are disclaimed in no event shall the holders or contributors be liable for any direct indirect incidental special exemplary or consequential damages including but not limited to procurement of substitute goods or services loss of use data or profits or business interruption however caused and on any theory of liability whether in contract strict liability or tort including negligence or otherwise arising in any way out of the use of this software even if advised of the possibility of such damage the basic trick is to generate the source code for the decorated function with the right signature and to evaluate it uncomment the statement print sys stderr func_src in _decorator to understand what is going on hack to keep nltk s tokenize module from colliding with the tokenize in the python standard library for retrocompatibility reasons we don t use a standard signature instead we use the string generated by this method basically from a signature we create a string and remove the default values returns an info dictionary containing name the name of the function str argnames the names of the arguments list defaults the values of the default arguments tuple signature the signature str fullsignature the full signature signature doc the docstring str module the module name str dict the function __dict__ str def f self x 1 y 2 args kw pass info getinfo f info name f info argnames self x y args kw info defaults 1 2 info signature self x y args kw info fullsignature signature self x 1 y 2 args kw convert signature to str pypy compatibility an improvement over functools update_wrapper the wrapper is a generic callable object it works by generating a copy of the wrapper with the right signature and by updating the copy not the original moreovoer model can be a dictionary with keys name doc module dict defaults assume model is a function helper used in decorator_factory take a class with a caller method and return a callable decorator object it works by adding a suitable __call__ method to the class it raises a typeerror if the class already has a nontrivial __call__ method general purpose decorator factory takes a caller function as input and returns a decorator with the same attributes a caller function is any function like this def caller func args kw do something return func args kw here is an example of usage decorator def chatty f args kw print calling r f __name__ return f args kw chatty __name__ chatty chatty def f pass f calling f decorator can also take in input a class with a caller method in this case it converts the class into a factory of callable decorator objects see the documentation for an example the real meat is here import sys print sys stderr src for debugging purposes memoize_dic is created at the first call legalese redistributions of source code must retain the above notice this list of conditions and the following disclaimer redistributions in bytecode form must reproduce the above notice this list of conditions and the following disclaimer in the documentation and or other materials provided with the distribution this software is provided by the holders and contributors as is and any express or implied warranties including but not limited to the implied warranties of merchantability and fitness for a particular purpose are disclaimed in no event shall the holders or contributors be liable for any direct indirect incidental special exemplary or consequential damages including but not limited to procurement of substitute goods or services loss of use data or profits or business interruption however caused and on any theory of liability whether in contract strict liability or tort including negligence or otherwise arising in any way out of the use of this software even if advised of the possibility of such damage
__docformat__ = "restructuredtext en" __all__ = ["decorator", "new_wrapper", "getinfo"] import sys OLD_SYS_PATH = sys.path[:] sys.path = [p for p in sys.path if p and "nltk" not in str(p)] import inspect sys.path = OLD_SYS_PATH def __legacysignature(signature): listsignature = str(signature)[1:-1].split(",") for counter, param in enumerate(listsignature): if param.count("=") > 0: listsignature[counter] = param[0 : param.index("=")].strip() else: listsignature[counter] = param.strip() return ", ".join(listsignature) def getinfo(func): assert inspect.ismethod(func) or inspect.isfunction(func) argspec = inspect.getfullargspec(func) regargs, varargs, varkwargs = argspec[:3] argnames = list(regargs) if varargs: argnames.append(varargs) if varkwargs: argnames.append(varkwargs) fullsignature = inspect.signature(func) signature = __legacysignature(fullsignature) if hasattr(func, "__closure__"): _closure = func.__closure__ _globals = func.__globals__ else: _closure = func.func_closure _globals = func.func_globals return dict( name=func.__name__, argnames=argnames, signature=signature, fullsignature=fullsignature, defaults=func.__defaults__, doc=func.__doc__, module=func.__module__, dict=func.__dict__, globals=_globals, closure=_closure, ) def update_wrapper(wrapper, model, infodict=None): "akin to functools.update_wrapper" infodict = infodict or getinfo(model) wrapper.__name__ = infodict["name"] wrapper.__doc__ = infodict["doc"] wrapper.__module__ = infodict["module"] wrapper.__dict__.update(infodict["dict"]) wrapper.__defaults__ = infodict["defaults"] wrapper.undecorated = model return wrapper def new_wrapper(wrapper, model): if isinstance(model, dict): infodict = model else: infodict = getinfo(model) assert ( not "_wrapper_" in infodict["argnames"] ), '"_wrapper_" is a reserved argument name!' src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict funcopy = eval(src, dict(_wrapper_=wrapper)) return update_wrapper(funcopy, model, infodict) def __call__(self, func): return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func) def decorator_factory(cls): attrs = set(dir(cls)) if "__call__" in attrs: raise TypeError( "You cannot decorate a class with a nontrivial " "__call__ method" ) if "call" not in attrs: raise TypeError("You cannot decorate a class without a " ".call method") cls.__call__ = __call__ return cls def decorator(caller): if inspect.isclass(caller): return decorator_factory(caller) def _decorator(func): infodict = getinfo(func) argnames = infodict["argnames"] assert not ( "_call_" in argnames or "_func_" in argnames ), "You cannot use _call_ or _func_ as argument names!" src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict dec_func = eval(src, dict(_func_=func, _call_=caller)) return update_wrapper(dec_func, func, infodict) return update_wrapper(_decorator, caller) def getattr_(obj, name, default_thunk): "Similar to .setdefault in dictionaries." try: return getattr(obj, name) except AttributeError: default = default_thunk() setattr(obj, name, default) return default @decorator def memoize(func, *args): dic = getattr_(func, "memoize_dic", dict) if args in dic: return dic[args] result = func(*args) dic[args] = result return result
natural language toolkit graphical representations package c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt import tkinterbased modules if tkinter is installed natural language toolkit graphical representations package c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt import tkinter based modules if tkinter is installed
try: import tkinter except ImportError: import warnings warnings.warn("nltk.draw package not loaded (please install Tkinter library).") else: from nltk.draw.cfg import ProductionList, CFGEditor, CFGDemo from nltk.draw.tree import ( TreeSegmentWidget, tree_to_treesegment, TreeWidget, TreeView, draw_trees, ) from nltk.draw.table import Table from nltk.draw.dispersion import dispersion_plot
natural language toolkit dispersion plots c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt a utility for displaying lexical dispersion generate a lexical dispersion plot param text the source text type text liststr or iterstr param words the target words type words list of str param ignorecase flag to set if case should be ignored when searching text type ignorecase bool return a matplotlib axes object that may still be modified before plotting rtype axes natural language toolkit dispersion plots c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt a utility for displaying lexical dispersion generate a lexical dispersion plot param text the source text type text list str or iter str param words the target words type words list of str param ignore_case flag to set if case should be ignored when searching text type ignore_case bool return a matplotlib axes object that may still be modified before plotting rtype axes
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): try: import matplotlib.pyplot as plt except ImportError as e: raise ImportError( "The plot function requires matplotlib to be installed. " "See https://matplotlib.org/" ) from e word2y = { word.casefold() if ignore_case else word: y for y, word in enumerate(reversed(words)) } xs, ys = [], [] for x, token in enumerate(text): token = token.casefold() if ignore_case else token y = word2y.get(token) if y is not None: xs.append(x) ys.append(y) words = words[::-1] _, ax = plt.subplots() ax.plot(xs, ys, "|") ax.set_yticks(list(range(len(words))), words, color="C0") ax.set_ylim(-1, len(words)) ax.set_title(title) ax.set_xlabel("Word Offset") return ax if __name__ == "__main__": import matplotlib.pyplot as plt from nltk.corpus import gutenberg words = ["Elinor", "Marianne", "Edward", "Willoughby"] dispersion_plot(gutenberg.words("austen-sense.txt"), words) plt.show()
natural language toolkit nltk help c 20012023 nltk project s steven bird stevenbird1gmail com url https www nltk org for license information see license txt provide structured access to documentation utilities natural language toolkit nltk help c 2001 2023 nltk project s steven bird stevenbird1 gmail com url https www nltk org for license information see license txt provide structured access to documentation utilities
import re from textwrap import wrap from nltk.data import load def brown_tagset(tagpattern=None): _format_tagset("brown_tagset", tagpattern) def claws5_tagset(tagpattern=None): _format_tagset("claws5_tagset", tagpattern) def upenn_tagset(tagpattern=None): _format_tagset("upenn_tagset", tagpattern) def _print_entries(tags, tagdict): for tag in tags: entry = tagdict[tag] defn = [tag + ": " + entry[0]] examples = wrap( entry[1], width=75, initial_indent=" ", subsequent_indent=" " ) print("\n".join(defn + examples)) def _format_tagset(tagset, tagpattern=None): tagdict = load("help/tagsets/" + tagset + ".pickle") if not tagpattern: _print_entries(sorted(tagdict), tagdict) elif tagpattern in tagdict: _print_entries([tagpattern], tagdict) else: tagpattern = re.compile(tagpattern) tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)] if tags: _print_entries(tags, tagdict) else: print("No matching tags found.") if __name__ == "__main__": brown_tagset(r"NN.*") upenn_tagset(r".*\$") claws5_tagset("UNDEFINED") brown_tagset(r"NN")
natural language toolkit inference c 20012023 nltk project dan garrette dhgarrettegmail com ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt classes and interfaces for theorem proving and model building natural language toolkit inference c 2001 2023 nltk project dan garrette dhgarrette gmail com ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt classes and interfaces for theorem proving and model building
from nltk.inference.api import ParallelProverBuilder, ParallelProverBuilderCommand from nltk.inference.discourse import ( CfgReadingCommand, DiscourseTester, DrtGlueReadingCommand, ReadingCommand, ) from nltk.inference.mace import Mace, MaceCommand from nltk.inference.prover9 import Prover9, Prover9Command from nltk.inference.resolution import ResolutionProver, ResolutionProverCommand from nltk.inference.tableau import TableauProver, TableauProverCommand
natural language toolkit interface to the mace4 model builder dan garrette dhgarrettegmail com ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt a model builder that makes use of the external mace4 package a macecommand specific to the mace model builder it contains a printassumptions method that is used to print the list of assumptions in multiple formats param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions listsem expression param maxmodels the maximum number of models that mace will try before simply returning false use 0 for no maximum type maxmodels int transform the output file into an nltkstyle valuation return a model if one is generated none otherwise rtype sem valuation find the number of entities in the model replace the integer identifier with a corresponding alphabetic character relation is not nullary relation is nullary convert a mace4style relation table into a dictionary param numentities the number of entities in the model determines the row length in the table type numentities int param values a list of 1 s and 0 s that represent whether a relation holds in a mace4 model type values list of int pick an alphabetic character as identifier for an entity in the model param value where to index into the list of characters type value int print out a mace4 model using any mace4 interpformat format see https www cs unm edumccunemace4manual for details param valuationstr str with the model builder s output param format str indicating the format for displaying models defaults to standard format return str transform the output file into any mace4 interpformat format param format output format for displaying models type format str call the interpformat binary with the given input param inputstr a string whose contents are used as stdin param args a list of commandline arguments return a tuple stdout returncode see configprover9 the maximum model size that mace will try before simply returning false use 1 for no maximum def buildmodelself goalnone assumptionsnone verbosefalse if not assumptions assumptions stdout returncode self callmace4 self prover9inputgoal assumptions verboseverbose return returncode 0 stdout def callmace4self inputstr args verbosefalse if self mace4bin is none self mace4bin self findbinarymace4 verbose updatedinputstr if self endsize 0 updatedinputstr assignendsize d nn self endsize updatedinputstr inputstr return self callupdatedinputstr self mace4bin args verbose def spacernum30 print num def decoderesultfound return true countermodel found false no countermodel found none none found def testmodelfoundarguments for goal assumptions in arguments g expression fromstringgoal alist lp parsea for a in assumptions m macecommandg assumptionsalist maxmodels50 found m buildmodel for a in alist print s a printf g decoderesultfoundn def testbuildmodelarguments g expression fromstringall x manx alist expression fromstringa for a in manjohn mansocrates manbill some x x john manx seesjohn x some x x bill manx all x some y manx givessocrates x y m macecommandg assumptionsalist m buildmodel spacer printassumptions and goal spacer for a in alist print s a printf g decoderesultm buildmodeln spacer printm model standard printm model cooked printvaluation spacer printm valuation n def testtransformoutputargumentpair g expression fromstringargumentpair0 alist lp parsea for a in argumentpair1 m macecommandg assumptionsalist m buildmodel for a in alist print s a printf g m buildmodeln for format in standard portable xml cooked spacer printusing s format format spacer printm modelformatformat def testmakerelationset print macecommand makerelationsetnumentities3 values1 0 1 c a print macecommand makerelationset numentities3 values0 0 0 0 0 0 1 0 0 c a print macecommand makerelationsetnumentities2 values0 0 1 0 0 0 1 0 a b a b b a arguments mortalsocrates all x manx mortalx mansocrates not mortalsocrates all x manx mortalx mansocrates def demo testmodelfoundarguments testbuildmodelarguments testtransformoutputarguments1 if name main demo natural language toolkit interface to the mace4 model builder dan garrette dhgarrette gmail com ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt a model builder that makes use of the external mace4 package a macecommand specific to the mace model builder it contains a print_assumptions method that is used to print the list of assumptions in multiple formats param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions list sem expression param max_models the maximum number of models that mace will try before simply returning false use 0 for no maximum type max_models int transform the output file into an nltk style valuation return a model if one is generated none otherwise rtype sem valuation find the number of entities in the model replace the integer identifier with a corresponding alphabetic character relation is not nullary relation is nullary convert a mace4 style relation table into a dictionary param num_entities the number of entities in the model determines the row length in the table type num_entities int param values a list of 1 s and 0 s that represent whether a relation holds in a mace4 model type values list of int pick an alphabetic character as identifier for an entity in the model param value where to index into the list of characters type value int print out a mace4 model using any mace4 interpformat format see https www cs unm edu mccune mace4 manual for details param valuation_str str with the model builder s output param format str indicating the format for displaying models defaults to standard format return str transform the output file into any mace4 interpformat format param format output format for displaying models type format str call the interpformat binary with the given input param input_str a string whose contents are used as stdin param args a list of command line arguments return a tuple stdout returncode see config_prover9 the maximum model size that mace will try before simply returning false use 1 for no maximum use mace4 to build a first order model return true if a model was found i e mace returns value of 0 else false call the mace4 binary with the given input param input_str a string whose contents are used as stdin param args a list of command line arguments return a tuple stdout returncode see config_prover9 decode the result of model_found param found the output of model_found type found bool try some proofs and exhibit the results try to build a nltk sem valuation print m model standard print m model cooked transform the model into various mace4 interpformat formats
import os import tempfile from nltk.inference.api import BaseModelBuilderCommand, ModelBuilder from nltk.inference.prover9 import Prover9CommandParent, Prover9Parent from nltk.sem import Expression, Valuation from nltk.sem.logic import is_indvar class MaceCommand(Prover9CommandParent, BaseModelBuilderCommand): _interpformat_bin = None def __init__(self, goal=None, assumptions=None, max_models=500, model_builder=None): if model_builder is not None: assert isinstance(model_builder, Mace) else: model_builder = Mace(max_models) BaseModelBuilderCommand.__init__(self, model_builder, goal, assumptions) @property def valuation(mbc): return mbc.model("valuation") def _convert2val(self, valuation_str): valuation_standard_format = self._transform_output(valuation_str, "standard") val = [] for line in valuation_standard_format.splitlines(False): l = line.strip() if l.startswith("interpretation"): num_entities = int(l[l.index("(") + 1 : l.index(",")].strip()) elif l.startswith("function") and l.find("_") == -1: name = l[l.index("(") + 1 : l.index(",")].strip() if is_indvar(name): name = name.upper() value = int(l[l.index("[") + 1 : l.index("]")].strip()) val.append((name, MaceCommand._make_model_var(value))) elif l.startswith("relation"): l = l[l.index("(") + 1 :] if "(" in l: name = l[: l.index("(")].strip() values = [ int(v.strip()) for v in l[l.index("[") + 1 : l.index("]")].split(",") ] val.append( (name, MaceCommand._make_relation_set(num_entities, values)) ) else: name = l[: l.index(",")].strip() value = int(l[l.index("[") + 1 : l.index("]")].strip()) val.append((name, value == 1)) return Valuation(val) @staticmethod def _make_relation_set(num_entities, values): r = set() for position in [pos for (pos, v) in enumerate(values) if v == 1]: r.add( tuple(MaceCommand._make_relation_tuple(position, values, num_entities)) ) return r @staticmethod def _make_relation_tuple(position, values, num_entities): if len(values) == 1: return [] else: sublist_size = len(values) // num_entities sublist_start = position // sublist_size sublist_position = int(position % sublist_size) sublist = values[ sublist_start * sublist_size : (sublist_start + 1) * sublist_size ] return [ MaceCommand._make_model_var(sublist_start) ] + MaceCommand._make_relation_tuple( sublist_position, sublist, num_entities ) @staticmethod def _make_model_var(value): letter = [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", ][value] num = value // 26 return letter + str(num) if num > 0 else letter def _decorate_model(self, valuation_str, format): if not format: return valuation_str elif format == "valuation": return self._convert2val(valuation_str) else: return self._transform_output(valuation_str, format) def _transform_output(self, valuation_str, format): if format in [ "standard", "standard2", "portable", "tabular", "raw", "cooked", "xml", "tex", ]: return self._call_interpformat(valuation_str, [format])[0] else: raise LookupError("The specified format does not exist") def _call_interpformat(self, input_str, args=[], verbose=False): if self._interpformat_bin is None: self._interpformat_bin = self._modelbuilder._find_binary( "interpformat", verbose ) return self._modelbuilder._call( input_str, self._interpformat_bin, args, verbose ) class Mace(Prover9Parent, ModelBuilder): _mace4_bin = None def __init__(self, end_size=500): self._end_size = end_size def _build_model(self, goal=None, assumptions=None, verbose=False): if not assumptions: assumptions = [] stdout, returncode = self._call_mace4( self.prover9_input(goal, assumptions), verbose=verbose ) return (returncode == 0, stdout) def _call_mace4(self, input_str, args=[], verbose=False): if self._mace4_bin is None: self._mace4_bin = self._find_binary("mace4", verbose) updated_input_str = "" if self._end_size > 0: updated_input_str += "assign(end_size, %d).\n\n" % self._end_size updated_input_str += input_str return self._call(updated_input_str, self._mace4_bin, args, verbose) def spacer(num=30): print("-" * num) def decode_result(found): return {True: "Countermodel found", False: "No countermodel found", None: "None"}[ found ] def test_model_found(arguments): for (goal, assumptions) in arguments: g = Expression.fromstring(goal) alist = [lp.parse(a) for a in assumptions] m = MaceCommand(g, assumptions=alist, max_models=50) found = m.build_model() for a in alist: print(" %s" % a) print(f"|- {g}: {decode_result(found)}\n") def test_build_model(arguments): g = Expression.fromstring("all x.man(x)") alist = [ Expression.fromstring(a) for a in [ "man(John)", "man(Socrates)", "man(Bill)", "some x.(-(x = John) & man(x) & sees(John,x))", "some x.(-(x = Bill) & man(x))", "all x.some y.(man(x) -> gives(Socrates,x,y))", ] ] m = MaceCommand(g, assumptions=alist) m.build_model() spacer() print("Assumptions and Goal") spacer() for a in alist: print(" %s" % a) print(f"|- {g}: {decode_result(m.build_model())}\n") spacer() print("Valuation") spacer() print(m.valuation, "\n") def test_transform_output(argument_pair): g = Expression.fromstring(argument_pair[0]) alist = [lp.parse(a) for a in argument_pair[1]] m = MaceCommand(g, assumptions=alist) m.build_model() for a in alist: print(" %s" % a) print(f"|- {g}: {m.build_model()}\n") for format in ["standard", "portable", "xml", "cooked"]: spacer() print("Using '%s' format" % format) spacer() print(m.model(format=format)) def test_make_relation_set(): print( MaceCommand._make_relation_set(num_entities=3, values=[1, 0, 1]) == {("c",), ("a",)} ) print( MaceCommand._make_relation_set( num_entities=3, values=[0, 0, 0, 0, 0, 0, 1, 0, 0] ) == {("c", "a")} ) print( MaceCommand._make_relation_set(num_entities=2, values=[0, 0, 1, 0, 0, 0, 1, 0]) == {("a", "b", "a"), ("b", "b", "a")} ) arguments = [ ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), ("(not mortal(Socrates))", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), ] def demo(): test_model_found(arguments) test_build_model(arguments) test_transform_output(arguments[1]) if __name__ == "__main__": demo()
natural language toolkit nonmonotonic reasoning daniel h garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt a module to perform nonmonotonic reasoning the ideas and demonstrations in this module are based on logical foundations of artificial intelligence by michael r genesereth and nils j nilsson this is a prover decorator that adds domain closure assumptions before proving apply the closed domain assumption to the expression domain unione freee constants for e in allexpressions translate exists x p to zd1 zd2 p replacex z or p replacex d1 p replacex d2 translate all x p to p replacex d1 p replacex d2 param ex expression param domain set of variables return expression this is a prover decorator that adds unique names assumptions before proving domain unione freee constants for e in allexpressions if d1 d2 cannot be proven from the premises then add d1 d2 build a dictionary of obvious equalities put a and b in the same set if a and b are not already in the same equality set we can prove that the names are the same entity remember that they are equal so we don t recheck we can t prove it so assume unique names a list of sets of variables param item variable return the set containing item item is not found in any existing set so create a new set this is a prover decorator that completes predicates before proving if the assumptions contain pa then all x px xa is the completion of p if the assumptions contain all x ostrichx birdx then all x birdx ostrichx is the completion of bird if the assumptions don t contain anything that are p then all x px is the completion of p walksocrates socrates bill all x walkx xsocrates walkbill seesocrates john seejohn mary socrates john john mary all x all y seex y xsocrates yjohn xjohn ymary seesocrates mary all x ostrichx birdx birdtweety ostrichsam sam tweety all x birdx ostrichx xtweety all x ostrichx birdsam turn the signatures into disjuncts turn the properties into disjuncts replace variables from the signature with new sig variables make the assumption disjuncts exist so make an implication nothing has property p quantify the implication this method figures out how many arguments the predicate takes and returns a tuple containing that number of unique variables return an application expression with predicate as the predicate and signature as the list of arguments create a dictionary of predicates from the assumptions param assumptions a list of expressions return dict mapping abstractvariableexpression to predholder collect all the universally quantified variables this class will be used by a dictionary that will store information about predicates to be used by the closedworldprover the signatures property is a list of tuples defining signatures for which the predicate is true for instance seejohn mary would be result in the signature john mary for see the second element of the pair is a list of pairs such that the first element of the pair is a tuple of variables and the second element is an expression of those variables that makes the predicate true for instance all x all y seex y knowx y would result in x y seex y for know define taxonomy default properties specify abnormal entities define entities print the assumptions natural language toolkit nonmonotonic reasoning daniel h garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt a module to perform nonmonotonic reasoning the ideas and demonstrations in this module are based on logical foundations of artificial intelligence by michael r genesereth and nils j nilsson this is a prover decorator that adds domain closure assumptions before proving apply the closed domain assumption to the expression domain union e free e constants for e in all_expressions translate exists x p to z d1 z d2 p replace x z or p replace x d1 p replace x d2 translate all x p to p replace x d1 p replace x d2 param ex expression param domain set of variable s return expression this is a prover decorator that adds unique names assumptions before proving domain union e free e constants for e in all_expressions if d1 d2 cannot be proven from the premises then add d1 d2 build a dictionary of obvious equalities put a and b in the same set if a and b are not already in the same equality set we can prove that the names are the same entity remember that they are equal so we don t re check we can t prove it so assume unique names a list of sets of variables param item variable return the set containing item item is not found in any existing set so create a new set this is a prover decorator that completes predicates before proving if the assumptions contain p a then all x p x x a is the completion of p if the assumptions contain all x ostrich x bird x then all x bird x ostrich x is the completion of bird if the assumptions don t contain anything that are p then all x p x is the completion of p walk socrates socrates bill all x walk x x socrates walk bill see socrates john see john mary socrates john john mary all x all y see x y x socrates y john x john y mary see socrates mary all x ostrich x bird x bird tweety ostrich sam sam tweety all x bird x ostrich x x tweety all x ostrich x bird sam turn the signatures into disjuncts turn the properties into disjuncts replace variables from the signature with new sig variables make the assumption disjuncts exist so make an implication nothing has property p quantify the implication this method figures out how many arguments the predicate takes and returns a tuple containing that number of unique variables return an application expression with predicate as the predicate and signature as the list of arguments create a dictionary of predicates from the assumptions param assumptions a list of expression s return dict mapping abstractvariableexpression to predholder collect all the universally quantified variables this class will be used by a dictionary that will store information about predicates to be used by the closedworldprover the signatures property is a list of tuples defining signatures for which the predicate is true for instance see john mary would be result in the signature john mary for see the second element of the pair is a list of pairs such that the first element of the pair is a tuple of variables and the second element is an expression of those variables that makes the predicate true for instance all x all y see x y know x y would result in x y see x y for know define taxonomy default properties normal animals don t fly normal birds fly normal ostriches don t fly specify abnormal entities flight non flying bird flying ostrich define entities print the assumptions
from collections import defaultdict from functools import reduce from nltk.inference.api import Prover, ProverCommandDecorator from nltk.inference.prover9 import Prover9, Prover9Command from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BooleanExpression, EqualityExpression, ExistsExpression, Expression, ImpExpression, NegatedExpression, Variable, VariableExpression, operator, unique_variable, ) class ProverParseError(Exception): pass def get_domain(goal, assumptions): if goal is None: all_expressions = assumptions else: all_expressions = assumptions + [-goal] return reduce(operator.or_, (a.constants() for a in all_expressions), set()) class ClosedDomainProver(ProverCommandDecorator): def assumptions(self): assumptions = [a for a in self._command.assumptions()] goal = self._command.goal() domain = get_domain(goal, assumptions) return [self.replace_quants(ex, domain) for ex in assumptions] def goal(self): goal = self._command.goal() domain = get_domain(goal, self._command.assumptions()) return self.replace_quants(goal, domain) def replace_quants(self, ex, domain): if isinstance(ex, AllExpression): conjuncts = [ ex.term.replace(ex.variable, VariableExpression(d)) for d in domain ] conjuncts = [self.replace_quants(c, domain) for c in conjuncts] return reduce(lambda x, y: x & y, conjuncts) elif isinstance(ex, BooleanExpression): return ex.__class__( self.replace_quants(ex.first, domain), self.replace_quants(ex.second, domain), ) elif isinstance(ex, NegatedExpression): return -self.replace_quants(ex.term, domain) elif isinstance(ex, ExistsExpression): disjuncts = [ ex.term.replace(ex.variable, VariableExpression(d)) for d in domain ] disjuncts = [self.replace_quants(d, domain) for d in disjuncts] return reduce(lambda x, y: x | y, disjuncts) else: return ex class UniqueNamesProver(ProverCommandDecorator): def assumptions(self): assumptions = self._command.assumptions() domain = list(get_domain(self._command.goal(), assumptions)) eq_sets = SetHolder() for a in assumptions: if isinstance(a, EqualityExpression): av = a.first.variable bv = a.second.variable eq_sets[av].add(bv) new_assumptions = [] for i, a in enumerate(domain): for b in domain[i + 1 :]: if b not in eq_sets[a]: newEqEx = EqualityExpression( VariableExpression(a), VariableExpression(b) ) if Prover9().prove(newEqEx, assumptions): eq_sets[a].add(b) else: new_assumptions.append(-newEqEx) return assumptions + new_assumptions class SetHolder(list): def __getitem__(self, item): assert isinstance(item, Variable) for s in self: if item in s: return s new = {item} self.append(new) return new class ClosedWorldProver(ProverCommandDecorator): def assumptions(self): assumptions = self._command.assumptions() predicates = self._make_predicate_dict(assumptions) new_assumptions = [] for p in predicates: predHolder = predicates[p] new_sig = self._make_unique_signature(predHolder) new_sig_exs = [VariableExpression(v) for v in new_sig] disjuncts = [] for sig in predHolder.signatures: equality_exs = [] for v1, v2 in zip(new_sig_exs, sig): equality_exs.append(EqualityExpression(v1, v2)) disjuncts.append(reduce(lambda x, y: x & y, equality_exs)) for prop in predHolder.properties: bindings = {} for v1, v2 in zip(new_sig_exs, prop[0]): bindings[v2] = v1 disjuncts.append(prop[1].substitute_bindings(bindings)) if disjuncts: antecedent = self._make_antecedent(p, new_sig) consequent = reduce(lambda x, y: x | y, disjuncts) accum = ImpExpression(antecedent, consequent) else: accum = NegatedExpression(self._make_antecedent(p, new_sig)) for new_sig_var in new_sig[::-1]: accum = AllExpression(new_sig_var, accum) new_assumptions.append(accum) return assumptions + new_assumptions def _make_unique_signature(self, predHolder): return tuple(unique_variable() for i in range(predHolder.signature_len)) def _make_antecedent(self, predicate, signature): antecedent = predicate for v in signature: antecedent = antecedent(VariableExpression(v)) return antecedent def _make_predicate_dict(self, assumptions): predicates = defaultdict(PredHolder) for a in assumptions: self._map_predicates(a, predicates) return predicates def _map_predicates(self, expression, predDict): if isinstance(expression, ApplicationExpression): func, args = expression.uncurry() if isinstance(func, AbstractVariableExpression): predDict[func].append_sig(tuple(args)) elif isinstance(expression, AndExpression): self._map_predicates(expression.first, predDict) self._map_predicates(expression.second, predDict) elif isinstance(expression, AllExpression): sig = [expression.variable] term = expression.term while isinstance(term, AllExpression): sig.append(term.variable) term = term.term if isinstance(term, ImpExpression): if isinstance(term.first, ApplicationExpression) and isinstance( term.second, ApplicationExpression ): func1, args1 = term.first.uncurry() func2, args2 = term.second.uncurry() if ( isinstance(func1, AbstractVariableExpression) and isinstance(func2, AbstractVariableExpression) and sig == [v.variable for v in args1] and sig == [v.variable for v in args2] ): predDict[func2].append_prop((tuple(sig), term.first)) predDict[func1].validate_sig_len(sig) class PredHolder: def __init__(self): self.signatures = [] self.properties = [] self.signature_len = None def append_sig(self, new_sig): self.validate_sig_len(new_sig) self.signatures.append(new_sig) def append_prop(self, new_prop): self.validate_sig_len(new_prop[0]) self.properties.append(new_prop) def validate_sig_len(self, new_sig): if self.signature_len is None: self.signature_len = len(new_sig) elif self.signature_len != len(new_sig): raise Exception("Signature lengths do not match") def __str__(self): return f"({self.signatures},{self.properties},{self.signature_len})" def __repr__(self): return "%s" % self def closed_domain_demo(): lexpr = Expression.fromstring p1 = lexpr(r"exists x.walk(x)") p2 = lexpr(r"man(Socrates)") c = lexpr(r"walk(Socrates)") prover = Prover9Command(c, [p1, p2]) print(prover.prove()) cdp = ClosedDomainProver(prover) print("assumptions:") for a in cdp.assumptions(): print(" ", a) print("goal:", cdp.goal()) print(cdp.prove()) p1 = lexpr(r"exists x.walk(x)") p2 = lexpr(r"man(Socrates)") p3 = lexpr(r"-walk(Bill)") c = lexpr(r"walk(Socrates)") prover = Prover9Command(c, [p1, p2, p3]) print(prover.prove()) cdp = ClosedDomainProver(prover) print("assumptions:") for a in cdp.assumptions(): print(" ", a) print("goal:", cdp.goal()) print(cdp.prove()) p1 = lexpr(r"exists x.walk(x)") p2 = lexpr(r"man(Socrates)") p3 = lexpr(r"-walk(Bill)") c = lexpr(r"walk(Socrates)") prover = Prover9Command(c, [p1, p2, p3]) print(prover.prove()) cdp = ClosedDomainProver(prover) print("assumptions:") for a in cdp.assumptions(): print(" ", a) print("goal:", cdp.goal()) print(cdp.prove()) p1 = lexpr(r"walk(Socrates)") p2 = lexpr(r"walk(Bill)") c = lexpr(r"all x.walk(x)") prover = Prover9Command(c, [p1, p2]) print(prover.prove()) cdp = ClosedDomainProver(prover) print("assumptions:") for a in cdp.assumptions(): print(" ", a) print("goal:", cdp.goal()) print(cdp.prove()) p1 = lexpr(r"girl(mary)") p2 = lexpr(r"dog(rover)") p3 = lexpr(r"all x.(girl(x) -> -dog(x))") p4 = lexpr(r"all x.(dog(x) -> -girl(x))") p5 = lexpr(r"chase(mary, rover)") c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))") prover = Prover9Command(c, [p1, p2, p3, p4, p5]) print(prover.prove()) cdp = ClosedDomainProver(prover) print("assumptions:") for a in cdp.assumptions(): print(" ", a) print("goal:", cdp.goal()) print(cdp.prove()) def unique_names_demo(): lexpr = Expression.fromstring p1 = lexpr(r"man(Socrates)") p2 = lexpr(r"man(Bill)") c = lexpr(r"exists x.exists y.(x != y)") prover = Prover9Command(c, [p1, p2]) print(prover.prove()) unp = UniqueNamesProver(prover) print("assumptions:") for a in unp.assumptions(): print(" ", a) print("goal:", unp.goal()) print(unp.prove()) p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))") p2 = lexpr(r"Bill = William") p3 = lexpr(r"Bill = Billy") c = lexpr(r"-walk(William)") prover = Prover9Command(c, [p1, p2, p3]) print(prover.prove()) unp = UniqueNamesProver(prover) print("assumptions:") for a in unp.assumptions(): print(" ", a) print("goal:", unp.goal()) print(unp.prove()) def closed_world_demo(): lexpr = Expression.fromstring p1 = lexpr(r"walk(Socrates)") p2 = lexpr(r"(Socrates != Bill)") c = lexpr(r"-walk(Bill)") prover = Prover9Command(c, [p1, p2]) print(prover.prove()) cwp = ClosedWorldProver(prover) print("assumptions:") for a in cwp.assumptions(): print(" ", a) print("goal:", cwp.goal()) print(cwp.prove()) p1 = lexpr(r"see(Socrates, John)") p2 = lexpr(r"see(John, Mary)") p3 = lexpr(r"(Socrates != John)") p4 = lexpr(r"(John != Mary)") c = lexpr(r"-see(Socrates, Mary)") prover = Prover9Command(c, [p1, p2, p3, p4]) print(prover.prove()) cwp = ClosedWorldProver(prover) print("assumptions:") for a in cwp.assumptions(): print(" ", a) print("goal:", cwp.goal()) print(cwp.prove()) p1 = lexpr(r"all x.(ostrich(x) -> bird(x))") p2 = lexpr(r"bird(Tweety)") p3 = lexpr(r"-ostrich(Sam)") p4 = lexpr(r"Sam != Tweety") c = lexpr(r"-bird(Sam)") prover = Prover9Command(c, [p1, p2, p3, p4]) print(prover.prove()) cwp = ClosedWorldProver(prover) print("assumptions:") for a in cwp.assumptions(): print(" ", a) print("goal:", cwp.goal()) print(cwp.prove()) def combination_prover_demo(): lexpr = Expression.fromstring p1 = lexpr(r"see(Socrates, John)") p2 = lexpr(r"see(John, Mary)") c = lexpr(r"-see(Socrates, Mary)") prover = Prover9Command(c, [p1, p2]) print(prover.prove()) command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) for a in command.assumptions(): print(a) print(command.prove()) def default_reasoning_demo(): lexpr = Expression.fromstring premises = [] premises.append(lexpr(r"all x.(elephant(x) -> animal(x))")) premises.append(lexpr(r"all x.(bird(x) -> animal(x))")) premises.append(lexpr(r"all x.(dove(x) -> bird(x))")) premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))")) premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))")) premises.append( lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))") ) premises.append( lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))") ) premises.append( lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))") ) premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) premises.append(lexpr(r"elephant(E)")) premises.append(lexpr(r"dove(D)")) premises.append(lexpr(r"ostrich(O)")) prover = Prover9Command(None, premises) command = UniqueNamesProver(ClosedWorldProver(prover)) for a in command.assumptions(): print(a) print_proof("-fly(E)", premises) print_proof("fly(D)", premises) print_proof("-fly(O)", premises) def print_proof(goal, premises): lexpr = Expression.fromstring prover = Prover9Command(lexpr(goal), premises) command = UniqueNamesProver(ClosedWorldProver(prover)) print(goal, prover.prove(), command.prove()) def demo(): closed_domain_demo() unique_names_demo() closed_world_demo() combination_prover_demo() default_reasoning_demo() if __name__ == "__main__": demo()
natural language toolkit interface to the prover9 theorem prover c 20012023 nltk project dan garrette dhgarrettegmail com ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt a theorem prover that makes use of the external prover9 package following is not yet used return code for 2 actually realized as 512 sos list exhausted a common base class used by both prover9command and macecommand which is responsible for maintaining a goal and a set of assumptions and generating prover9style input files from them print the list of the current assumptions a provercommand specific to the prover9 prover it contains the a printassumptions method that is used to print the list of assumptions in multiple formats param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions listsem expression param timeout number of seconds before timeout set to 0 for no timeout type timeout int param prover a prover if not set one will be created type prover prover9 see baseprovercommand decorateproof a common class extended by both prover9 and mace mace mace it contains the functionality required to convert nltkstyle expressions into prover9style expressions return the input string that should be provided to the prover9 binary this string is formed based on the goal assumptions and timeout value of this object a list of directories that should be searched for the prover9 executables this list is used by configprover9 when searching for the prover9 executables call the binary with the given input param inputstr a string whose contents are used as stdin param binary the location of the binary to call param args a list of commandline arguments return a tuple stdout returncode see configprover9 call prover9 via a subprocess convert a logic expression to prover9 format convert logic expression to prover9 formatted string the timeout value for prover9 if a proof can not be found in this amount of time then prover9 will return false use 0 for no timeout def proveself goalnone assumptionsnone verbosefalse if not assumptions assumptions stdout returncode self callprover9 self prover9inputgoal assumptions verboseverbose return returncode 0 stdout def prover9inputself goal assumptions s clearautodenials n only one proof required return s prover9parent prover9inputself goal assumptions def callprover9self inputstr args verbosefalse if self prover9bin is none self prover9bin self findbinaryprover9 verbose updatedinputstr if self timeout 0 updatedinputstr assignmaxseconds d nn self timeout updatedinputstr inputstr stdout returncode self call updatedinputstr self prover9bin args verbose if returncode not in 0 2 errormsgprefix error if errormsgprefix in stdout msgstart stdout indexerrormsgprefix errormsg stdoutmsgstart strip else errormsg none if returncode in 3 4 5 6 raise prover9limitexceededexceptionreturncode errormsg else raise prover9fatalexceptionreturncode errormsg return stdout returncode def callprooftransself inputstr args verbosefalse if self prooftransbin is none self prooftransbin self findbinaryprooftrans verbose return self callinputstr self prooftransbin args verbose class prover9exceptionexception def initself returncode message msg p9returncodesreturncode if message msg ns message exception initself msg class prover9fatalexceptionprover9exception pass class prover9limitexceededexceptionprover9exception pass tests and demos def testconfig a expression fromstringwalkj singj g expression fromstringwalkj p prover9commandg assumptionsa p executablepath none p prover9search p prove configprover9 usrlocalbin printp prove printp proof def testconverttoprover9expr for t in expr e expression fromstringt printconverttoprover9e def testprovearguments for goal assumptions in arguments g expression fromstringgoal alist expression fromstringa for a in assumptions p prover9commandg assumptionsalist prove for a in alist print s a printf g pn arguments manx not not manx not manx not manx manx not manx manx not manx manx manx not manx not manx manx not manx manx manx manx manx not manx not manx mortalsocrates all x manx mortalx mansocrates all x manx walksx mansocrates some y walksy all x manx all x manx some x all y seesx y some e3 walke3 subje3 mary some e1 seee1 subje1 john some e2 prede1 e2 walke2 subje2 mary some x e1 seee1 subje1 x some e2 prede1 e2 walke2 subje2 mary some e1 seee1 subje1 john some e2 prede1 e2 walke2 subje2 mary expressions rsome x y seesx y rsome x manx walksx rx manx walksx rx y seesx y rwalksjohn rx bigx y mousey rwalksx runsx threesx foursx rwalksx runsx rsome x prox seesjohn x rsome x manx not walksx rall x manx walksx def spacernum45 print num def demo printtesting configuration spacer testconfig print printtesting conversion to prover9 format spacer testconverttoprover9expressions print printtesting proofs spacer testprovearguments if name main demo natural language toolkit interface to the prover9 theorem prover c 2001 2023 nltk project dan garrette dhgarrette gmail com ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt a theorem prover that makes use of the external prover9 package following is not yet used return code for 2 actually realized as 512 a fatal error occurred user s syntax error sos_empty prover9 ran out of things to do sos list exhausted the max_megs memory limit parameter was exceeded the max_seconds parameter was exceeded the max_given parameter was exceeded the max_kept parameter was exceeded a prover9 action terminated the search prover9 crashed most probably due to a bug a common base class used by both prover9command and macecommand which is responsible for maintaining a goal and a set of assumptions and generating prover9 style input files from them print the list of the current assumptions a provercommand specific to the prover9 prover it contains the a print_assumptions method that is used to print the list of assumptions in multiple formats param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions list sem expression param timeout number of seconds before timeout set to 0 for no timeout type timeout int param prover a prover if not set one will be created type prover prover9 see baseprovercommand decorate_proof a common class extended by both prover9 and mace mace mace it contains the functionality required to convert nltk style expressions into prover9 style expressions return the input string that should be provided to the prover9 binary this string is formed based on the goal assumptions and timeout value of this object a list of directories that should be searched for the prover9 executables this list is used by config_prover9 when searching for the prover9 executables call the binary with the given input param input_str a string whose contents are used as stdin param binary the location of the binary to call param args a list of command line arguments return a tuple stdout returncode see config_prover9 call prover9 via a subprocess convert a logic expression to prover9 format convert logic expression to prover9 formatted string the timeout value for prover9 if a proof can not be found in this amount of time then prover9 will return false use 0 for no timeout use prover9 to prove a theorem return a pair whose first element is a boolean indicating if the proof was successful i e returns value of 0 and whose second element is the output of the prover see prover9parent prover9_input only one proof required call the prover9 binary with the given input param input_str a string whose contents are used as stdin param args a list of command line arguments return a tuple stdout returncode see config_prover9 call the prooftrans binary with the given input param input_str a string whose contents are used as stdin param args a list of command line arguments return a tuple stdout returncode see config_prover9 tests and demos config_prover9 usr local bin test that parsing works ok try some proofs and exhibit the results
import os import subprocess import nltk from nltk.inference.api import BaseProverCommand, Prover from nltk.sem.logic import ( AllExpression, AndExpression, EqualityExpression, ExistsExpression, Expression, IffExpression, ImpExpression, NegatedExpression, OrExpression, ) p9_return_codes = { 0: True, 1: "(FATAL)", 2: False, 3: "(MAX_MEGS)", 4: "(MAX_SECONDS)", 5: "(MAX_GIVEN)", 6: "(MAX_KEPT)", 7: "(ACTION)", 101: "(SIGSEGV)", } class Prover9CommandParent: def print_assumptions(self, output_format="nltk"): if output_format.lower() == "nltk": for a in self.assumptions(): print(a) elif output_format.lower() == "prover9": for a in convert_to_prover9(self.assumptions()): print(a) else: raise NameError( "Unrecognized value for 'output_format': %s" % output_format ) class Prover9Command(Prover9CommandParent, BaseProverCommand): def __init__(self, goal=None, assumptions=None, timeout=60, prover=None): if not assumptions: assumptions = [] if prover is not None: assert isinstance(prover, Prover9) else: prover = Prover9(timeout) BaseProverCommand.__init__(self, prover, goal, assumptions) def decorate_proof(self, proof_string, simplify=True): if simplify: return self._prover._call_prooftrans(proof_string, ["striplabels"])[ 0 ].rstrip() else: return proof_string.rstrip() class Prover9Parent: _binary_location = None def config_prover9(self, binary_location, verbose=False): if binary_location is None: self._binary_location = None self._prover9_bin = None else: name = "prover9" self._prover9_bin = nltk.internals.find_binary( name, path_to_bin=binary_location, env_vars=["PROVER9"], url="https://www.cs.unm.edu/~mccune/prover9/", binary_names=[name, name + ".exe"], verbose=verbose, ) self._binary_location = self._prover9_bin.rsplit(os.path.sep, 1) def prover9_input(self, goal, assumptions): s = "" if assumptions: s += "formulas(assumptions).\n" for p9_assumption in convert_to_prover9(assumptions): s += " %s.\n" % p9_assumption s += "end_of_list.\n\n" if goal: s += "formulas(goals).\n" s += " %s.\n" % convert_to_prover9(goal) s += "end_of_list.\n\n" return s def binary_locations(self): return [ "/usr/local/bin/prover9", "/usr/local/bin/prover9/bin", "/usr/local/bin", "/usr/bin", "/usr/local/prover9", "/usr/local/share/prover9", ] def _find_binary(self, name, verbose=False): binary_locations = self.binary_locations() if self._binary_location is not None: binary_locations += [self._binary_location] return nltk.internals.find_binary( name, searchpath=binary_locations, env_vars=["PROVER9"], url="https://www.cs.unm.edu/~mccune/prover9/", binary_names=[name, name + ".exe"], verbose=verbose, ) def _call(self, input_str, binary, args=[], verbose=False): if verbose: print("Calling:", binary) print("Args:", args) print("Input:\n", input_str, "\n") cmd = [binary] + args try: input_str = input_str.encode("utf8") except AttributeError: pass p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE ) (stdout, stderr) = p.communicate(input=input_str) if verbose: print("Return code:", p.returncode) if stdout: print("stdout:\n", stdout, "\n") if stderr: print("stderr:\n", stderr, "\n") return (stdout.decode("utf-8"), p.returncode) def convert_to_prover9(input): if isinstance(input, list): result = [] for s in input: try: result.append(_convert_to_prover9(s.simplify())) except: print("input %s cannot be converted to Prover9 input syntax" % input) raise return result else: try: return _convert_to_prover9(input.simplify()) except: print("input %s cannot be converted to Prover9 input syntax" % input) raise def _convert_to_prover9(expression): if isinstance(expression, ExistsExpression): return ( "exists " + str(expression.variable) + " " + _convert_to_prover9(expression.term) ) elif isinstance(expression, AllExpression): return ( "all " + str(expression.variable) + " " + _convert_to_prover9(expression.term) ) elif isinstance(expression, NegatedExpression): return "-(" + _convert_to_prover9(expression.term) + ")" elif isinstance(expression, AndExpression): return ( "(" + _convert_to_prover9(expression.first) + " & " + _convert_to_prover9(expression.second) + ")" ) elif isinstance(expression, OrExpression): return ( "(" + _convert_to_prover9(expression.first) + " | " + _convert_to_prover9(expression.second) + ")" ) elif isinstance(expression, ImpExpression): return ( "(" + _convert_to_prover9(expression.first) + " -> " + _convert_to_prover9(expression.second) + ")" ) elif isinstance(expression, IffExpression): return ( "(" + _convert_to_prover9(expression.first) + " <-> " + _convert_to_prover9(expression.second) + ")" ) elif isinstance(expression, EqualityExpression): return ( "(" + _convert_to_prover9(expression.first) + " = " + _convert_to_prover9(expression.second) + ")" ) else: return str(expression) class Prover9(Prover9Parent, Prover): _prover9_bin = None _prooftrans_bin = None def __init__(self, timeout=60): self._timeout = timeout def _prove(self, goal=None, assumptions=None, verbose=False): if not assumptions: assumptions = [] stdout, returncode = self._call_prover9( self.prover9_input(goal, assumptions), verbose=verbose ) return (returncode == 0, stdout) def prover9_input(self, goal, assumptions): s = "clear(auto_denials).\n" return s + Prover9Parent.prover9_input(self, goal, assumptions) def _call_prover9(self, input_str, args=[], verbose=False): if self._prover9_bin is None: self._prover9_bin = self._find_binary("prover9", verbose) updated_input_str = "" if self._timeout > 0: updated_input_str += "assign(max_seconds, %d).\n\n" % self._timeout updated_input_str += input_str stdout, returncode = self._call( updated_input_str, self._prover9_bin, args, verbose ) if returncode not in [0, 2]: errormsgprefix = "%%ERROR:" if errormsgprefix in stdout: msgstart = stdout.index(errormsgprefix) errormsg = stdout[msgstart:].strip() else: errormsg = None if returncode in [3, 4, 5, 6]: raise Prover9LimitExceededException(returncode, errormsg) else: raise Prover9FatalException(returncode, errormsg) return stdout, returncode def _call_prooftrans(self, input_str, args=[], verbose=False): if self._prooftrans_bin is None: self._prooftrans_bin = self._find_binary("prooftrans", verbose) return self._call(input_str, self._prooftrans_bin, args, verbose) class Prover9Exception(Exception): def __init__(self, returncode, message): msg = p9_return_codes[returncode] if message: msg += "\n%s" % message Exception.__init__(self, msg) class Prover9FatalException(Prover9Exception): pass class Prover9LimitExceededException(Prover9Exception): pass def test_config(): a = Expression.fromstring("(walk(j) & sing(j))") g = Expression.fromstring("walk(j)") p = Prover9Command(g, assumptions=[a]) p._executable_path = None p.prover9_search = [] p.prove() print(p.prove()) print(p.proof()) def test_convert_to_prover9(expr): for t in expr: e = Expression.fromstring(t) print(convert_to_prover9(e)) def test_prove(arguments): for (goal, assumptions) in arguments: g = Expression.fromstring(goal) alist = [Expression.fromstring(a) for a in assumptions] p = Prover9Command(g, assumptions=alist).prove() for a in alist: print(" %s" % a) print(f"|- {g}: {p}\n") arguments = [ ("(man(x) <-> (not (not man(x))))", []), ("(not (man(x) & (not man(x))))", []), ("(man(x) | (not man(x)))", []), ("(man(x) & (not man(x)))", []), ("(man(x) -> man(x))", []), ("(not (man(x) & (not man(x))))", []), ("(man(x) | (not man(x)))", []), ("(man(x) -> man(x))", []), ("(man(x) <-> man(x))", []), ("(not (man(x) <-> (not man(x))))", []), ("mortal(Socrates)", ["all x.(man(x) -> mortal(x))", "man(Socrates)"]), ("((all x.(man(x) -> walks(x)) & man(Socrates)) -> some y.walks(y))", []), ("(all x.man(x) -> all x.man(x))", []), ("some x.all y.sees(x,y)", []), ( "some e3.(walk(e3) & subj(e3, mary))", [ "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" ], ), ( "some x e1.(see(e1) & subj(e1, x) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))", [ "some e1.(see(e1) & subj(e1, john) & some e2.(pred(e1, e2) & walk(e2) & subj(e2, mary)))" ], ), ] expressions = [ r"some x y.sees(x,y)", r"some x.(man(x) & walks(x))", r"\x.(man(x) & walks(x))", r"\x y.sees(x,y)", r"walks(john)", r"\x.big(x, \y.mouse(y))", r"(walks(x) & (runs(x) & (threes(x) & fours(x))))", r"(walks(x) -> runs(x))", r"some x.(PRO(x) & sees(John, x))", r"some x.(man(x) & (not walks(x)))", r"all x.(man(x) -> walks(x))", ] def spacer(num=45): print("-" * num) def demo(): print("Testing configuration") spacer() test_config() print() print("Testing conversion to Prover9 format") spacer() test_convert_to_prover9(expressions) print() print("Testing proofs") spacer() test_prove(arguments) if __name__ == "__main__": demo()
natural language toolkit firstorder resolutionbased theorem prover dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt module for a resolutionbased first order theorem prover param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions listsem expression map indices to lists of indices to store attempted unifications since we try clauses in order we should start after the last index tried don t 1 unify a clause with itself 2 use tautologies param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions listsem expression perform the actual proof store the result to prevent unnecessary reproving decorate the proof output attempt to unify this clause with the other returning a list of resulting unified clauses param other clause with which to unify param bindings bindingdict containing bindings that should be used during the unification param used tuple of two lists of atoms the first lists the atoms from self that were successfully unified with atoms from other the second lists the atoms from other that were successfully unified with atoms from self param skipped tuple of two clause objects the first is a list of all the atoms from the self clause that have not been unified with anything on the path the second is same thing for the other clause param debug bool indicating whether debug statements should print return list containing all the resulting clause objects that could be obtained by unification remove subsumed clauses make a list of all indices of subsumed clauses and then remove them from the list return true iff every term in self is a term in other param other clause return bool return true iff self subsumes other this is if there is a substitution such that every term in self can be unified with a term in other param other clause return bool self is a tautology if it contains ground terms p and p the ground term p must be an exact match ie not using unification replace every instance of variable with expression across every atom in the clause param variable variable param expression expression replace every binding param bindings a list of tuples mapping variable expressions to the expressions to which they are bound return clause this method facilitates movement through the terms of self explore this self atom skip this possible self atom unification found so progress with this line of unification put skipped and unused terms back into play for later unification the atoms could not be unified this method facilitates movement through the terms of other skip this possible pairing and move to the next unification found so progress with this line of unification put skipped and unused terms back into play for later unification the atoms could not be unified this method attempts to unify two terms two expressions are unifiable if there exists a substitution function s such that sa sb param a expression param b expression param bindings bindingdict a starting set of bindings with which the unification must be consistent return bindingdict a dictionary of the bindings required to unify raise bindingexception if the terms cannot be unified use resolution use demodulation if there are no skipped terms and no terms left in first then all of the terms in the original self were unified with terms in other therefore there exists a binding this one such that every term in self can be unified with a term in other which is the definition of subsumption skolemize clausify and standardize the variables apart param expression a skolemized expression in cnf param bindinglist list of abstractvariableexpression atomicexpression to initialize the dictionary a binding is consistent with the dict if its variable is not already bound or if its variable is already bound to its argument param variable variable the variable to bind param binding expression the atomic to which variable should be bound raise bindingexception if the variable cannot be bound in this dictionary since variable is already bound try to bind binding to variable return the expression to which variable is bound param other bindingdict the dict with which to combine self return bindingdict a new dict containing all the elements of both parameters raise bindingexception if the parameter dictionaries are not consistent with each other find the most general unification of the two given expressions param a expression param b expression param bindings bindingdict a starting set of bindings with which the unification must be consistent return a list of bindings raise bindingexception if the expressions cannot be unified natural language toolkit first order resolution based theorem prover dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt module for a resolution based first order theorem prover param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions list sem expression map indices to lists of indices to store attempted unifications since we try clauses in order we should start after the last index tried nothing tried yet for i so start with the next don t 1 unify a clause with itself 2 use tautologies if there s an empty clause since we added a new clause restart from the top param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions list sem expression perform the actual proof store the result to prevent unnecessary re proving decorate the proof output attempt to unify this clause with the other returning a list of resulting unified clauses param other clause with which to unify param bindings bindingdict containing bindings that should be used during the unification param used tuple of two lists of atoms the first lists the atoms from self that were successfully unified with atoms from other the second lists the atoms from other that were successfully unified with atoms from self param skipped tuple of two clause objects the first is a list of all the atoms from the self clause that have not been unified with anything on the path the second is same thing for the other clause param debug bool indicating whether debug statements should print return list containing all the resulting clause objects that could be obtained by unification remove subsumed clauses make a list of all indices of subsumed clauses and then remove them from the list return true iff every term in self is a term in other param other clause return bool return true iff self subsumes other this is if there is a substitution such that every term in self can be unified with a term in other param other clause return bool self is a tautology if it contains ground terms p and p the ground term p must be an exact match ie not using unification replace every instance of variable with expression across every atom in the clause param variable variable param expression expression replace every binding param bindings a list of tuples mapping variable expressions to the expressions to which they are bound return clause this method facilitates movement through the terms of self if no more recursions can be performed explore this self atom skip this possible self atom unification found so progress with this line of unification put skipped and unused terms back into play for later unification the atoms could not be unified this method facilitates movement through the terms of other if no more recursions can be performed skip this possible pairing and move to the next unification found so progress with this line of unification put skipped and unused terms back into play for later unification the atoms could not be unified this method attempts to unify two terms two expressions are unifiable if there exists a substitution function s such that s a s b param a expression param b expression param bindings bindingdict a starting set of bindings with which the unification must be consistent return bindingdict a dictionary of the bindings required to unify raise bindingexception if the terms cannot be unified use resolution use demodulation if bindings were made along the path no bindings made means no unification occurred so no result if there are no skipped terms and no terms left in first then all of the terms in the original self were unified with terms in other therefore there exists a binding this one such that every term in self can be unified with a term in other which is the definition of subsumption skolemize clausify and standardize the variables apart param expression a skolemized expression in cnf param binding_list list of abstractvariableexpression atomicexpression to initialize the dictionary a binding is consistent with the dict if its variable is not already bound or if its variable is already bound to its argument param variable variable the variable to bind param binding expression the atomic to which variable should be bound raise bindingexception if the variable cannot be bound in this dictionary since variable is already bound try to bind binding to variable return the expression to which variable is bound param other bindingdict the dict with which to combine self return bindingdict a new dict containing all the elements of both parameters raise bindingexception if the parameter dictionaries are not consistent with each other find the most general unification of the two given expressions param a expression param b expression param bindings bindingdict a starting set of bindings with which the unification must be consistent return a list of bindings raise bindingexception if the expressions cannot be unified
import operator from collections import defaultdict from functools import reduce from nltk.inference.api import BaseProverCommand, Prover from nltk.sem import skolemize from nltk.sem.logic import ( AndExpression, ApplicationExpression, EqualityExpression, Expression, IndividualVariableExpression, NegatedExpression, OrExpression, Variable, VariableExpression, is_indvar, unique_variable, ) class ProverParseError(Exception): pass class ResolutionProver(Prover): ANSWER_KEY = "ANSWER" _assume_false = True def _prove(self, goal=None, assumptions=None, verbose=False): if not assumptions: assumptions = [] result = None try: clauses = [] if goal: clauses.extend(clausify(-goal)) for a in assumptions: clauses.extend(clausify(a)) result, clauses = self._attempt_proof(clauses) if verbose: print(ResolutionProverCommand._decorate_clauses(clauses)) except RuntimeError as e: if self._assume_false and str(e).startswith( "maximum recursion depth exceeded" ): result = False clauses = [] else: if verbose: print(e) else: raise e return (result, clauses) def _attempt_proof(self, clauses): tried = defaultdict(list) i = 0 while i < len(clauses): if not clauses[i].is_tautology(): if tried[i]: j = tried[i][-1] + 1 else: j = i + 1 while j < len(clauses): if i != j and j and not clauses[j].is_tautology(): tried[i].append(j) newclauses = clauses[i].unify(clauses[j]) if newclauses: for newclause in newclauses: newclause._parents = (i + 1, j + 1) clauses.append(newclause) if not len(newclause): return (True, clauses) i = -1 break j += 1 i += 1 return (False, clauses) class ResolutionProverCommand(BaseProverCommand): def __init__(self, goal=None, assumptions=None, prover=None): if prover is not None: assert isinstance(prover, ResolutionProver) else: prover = ResolutionProver() BaseProverCommand.__init__(self, prover, goal, assumptions) self._clauses = None def prove(self, verbose=False): if self._result is None: self._result, clauses = self._prover._prove( self.goal(), self.assumptions(), verbose ) self._clauses = clauses self._proof = ResolutionProverCommand._decorate_clauses(clauses) return self._result def find_answers(self, verbose=False): self.prove(verbose) answers = set() answer_ex = VariableExpression(Variable(ResolutionProver.ANSWER_KEY)) for clause in self._clauses: for term in clause: if ( isinstance(term, ApplicationExpression) and term.function == answer_ex and not isinstance(term.argument, IndividualVariableExpression) ): answers.add(term.argument) return answers @staticmethod def _decorate_clauses(clauses): out = "" max_clause_len = max(len(str(clause)) for clause in clauses) max_seq_len = len(str(len(clauses))) for i in range(len(clauses)): parents = "A" taut = "" if clauses[i].is_tautology(): taut = "Tautology" if clauses[i]._parents: parents = str(clauses[i]._parents) parents = " " * (max_clause_len - len(str(clauses[i])) + 1) + parents seq = " " * (max_seq_len - len(str(i + 1))) + str(i + 1) out += f"[{seq}] {clauses[i]} {parents} {taut}\n" return out class Clause(list): def __init__(self, data): list.__init__(self, data) self._is_tautology = None self._parents = None def unify(self, other, bindings=None, used=None, skipped=None, debug=False): if bindings is None: bindings = BindingDict() if used is None: used = ([], []) if skipped is None: skipped = ([], []) if isinstance(debug, bool): debug = DebugObject(debug) newclauses = _iterate_first( self, other, bindings, used, skipped, _complete_unify_path, debug ) subsumed = [] for i, c1 in enumerate(newclauses): if i not in subsumed: for j, c2 in enumerate(newclauses): if i != j and j not in subsumed and c1.subsumes(c2): subsumed.append(j) result = [] for i in range(len(newclauses)): if i not in subsumed: result.append(newclauses[i]) return result def isSubsetOf(self, other): for a in self: if a not in other: return False return True def subsumes(self, other): negatedother = [] for atom in other: if isinstance(atom, NegatedExpression): negatedother.append(atom.term) else: negatedother.append(-atom) negatedotherClause = Clause(negatedother) bindings = BindingDict() used = ([], []) skipped = ([], []) debug = DebugObject(False) return ( len( _iterate_first( self, negatedotherClause, bindings, used, skipped, _subsumes_finalize, debug, ) ) > 0 ) def __getslice__(self, start, end): return Clause(list.__getslice__(self, start, end)) def __sub__(self, other): return Clause([a for a in self if a not in other]) def __add__(self, other): return Clause(list.__add__(self, other)) def is_tautology(self): if self._is_tautology is not None: return self._is_tautology for i, a in enumerate(self): if not isinstance(a, EqualityExpression): j = len(self) - 1 while j > i: b = self[j] if isinstance(a, NegatedExpression): if a.term == b: self._is_tautology = True return True elif isinstance(b, NegatedExpression): if a == b.term: self._is_tautology = True return True j -= 1 self._is_tautology = False return False def free(self): return reduce(operator.or_, ((atom.free() | atom.constants()) for atom in self)) def replace(self, variable, expression): return Clause([atom.replace(variable, expression) for atom in self]) def substitute_bindings(self, bindings): return Clause([atom.substitute_bindings(bindings) for atom in self]) def __str__(self): return "{" + ", ".join("%s" % item for item in self) + "}" def __repr__(self): return "%s" % self def _iterate_first(first, second, bindings, used, skipped, finalize_method, debug): debug.line(f"unify({first},{second}) {bindings}") if not len(first) or not len(second): return finalize_method(first, second, bindings, used, skipped, debug) else: result = _iterate_second( first, second, bindings, used, skipped, finalize_method, debug + 1 ) newskipped = (skipped[0] + [first[0]], skipped[1]) result += _iterate_first( first[1:], second, bindings, used, newskipped, finalize_method, debug + 1 ) try: newbindings, newused, unused = _unify_terms( first[0], second[0], bindings, used ) newfirst = first[1:] + skipped[0] + unused[0] newsecond = second[1:] + skipped[1] + unused[1] result += _iterate_first( newfirst, newsecond, newbindings, newused, ([], []), finalize_method, debug + 1, ) except BindingException: pass return result def _iterate_second(first, second, bindings, used, skipped, finalize_method, debug): debug.line(f"unify({first},{second}) {bindings}") if not len(first) or not len(second): return finalize_method(first, second, bindings, used, skipped, debug) else: newskipped = (skipped[0], skipped[1] + [second[0]]) result = _iterate_second( first, second[1:], bindings, used, newskipped, finalize_method, debug + 1 ) try: newbindings, newused, unused = _unify_terms( first[0], second[0], bindings, used ) newfirst = first[1:] + skipped[0] + unused[0] newsecond = second[1:] + skipped[1] + unused[1] result += _iterate_second( newfirst, newsecond, newbindings, newused, ([], []), finalize_method, debug + 1, ) except BindingException: pass return result def _unify_terms(a, b, bindings=None, used=None): assert isinstance(a, Expression) assert isinstance(b, Expression) if bindings is None: bindings = BindingDict() if used is None: used = ([], []) if isinstance(a, NegatedExpression) and isinstance(b, ApplicationExpression): newbindings = most_general_unification(a.term, b, bindings) newused = (used[0] + [a], used[1] + [b]) unused = ([], []) elif isinstance(a, ApplicationExpression) and isinstance(b, NegatedExpression): newbindings = most_general_unification(a, b.term, bindings) newused = (used[0] + [a], used[1] + [b]) unused = ([], []) elif isinstance(a, EqualityExpression): newbindings = BindingDict([(a.first.variable, a.second)]) newused = (used[0] + [a], used[1]) unused = ([], [b]) elif isinstance(b, EqualityExpression): newbindings = BindingDict([(b.first.variable, b.second)]) newused = (used[0], used[1] + [b]) unused = ([a], []) else: raise BindingException((a, b)) return newbindings, newused, unused def _complete_unify_path(first, second, bindings, used, skipped, debug): if used[0] or used[1]: newclause = Clause(skipped[0] + skipped[1] + first + second) debug.line(" -> New Clause: %s" % newclause) return [newclause.substitute_bindings(bindings)] else: debug.line(" -> End") return [] def _subsumes_finalize(first, second, bindings, used, skipped, debug): if not len(skipped[0]) and not len(first): return [True] else: return [] def clausify(expression): clause_list = [] for clause in _clausify(skolemize(expression)): for free in clause.free(): if is_indvar(free.name): newvar = VariableExpression(unique_variable()) clause = clause.replace(free, newvar) clause_list.append(clause) return clause_list def _clausify(expression): if isinstance(expression, AndExpression): return _clausify(expression.first) + _clausify(expression.second) elif isinstance(expression, OrExpression): first = _clausify(expression.first) second = _clausify(expression.second) assert len(first) == 1 assert len(second) == 1 return [first[0] + second[0]] elif isinstance(expression, EqualityExpression): return [Clause([expression])] elif isinstance(expression, ApplicationExpression): return [Clause([expression])] elif isinstance(expression, NegatedExpression): if isinstance(expression.term, ApplicationExpression): return [Clause([expression])] elif isinstance(expression.term, EqualityExpression): return [Clause([expression])] raise ProverParseError() class BindingDict: def __init__(self, binding_list=None): self.d = {} if binding_list: for (v, b) in binding_list: self[v] = b def __setitem__(self, variable, binding): assert isinstance(variable, Variable) assert isinstance(binding, Expression) try: existing = self[variable] except KeyError: existing = None if not existing or binding == existing: self.d[variable] = binding elif isinstance(binding, IndividualVariableExpression): try: existing = self[binding.variable] except KeyError: existing = None binding2 = VariableExpression(variable) if not existing or binding2 == existing: self.d[binding.variable] = binding2 else: raise BindingException( "Variable %s already bound to another " "value" % (variable) ) else: raise BindingException( "Variable %s already bound to another " "value" % (variable) ) def __getitem__(self, variable): assert isinstance(variable, Variable) intermediate = self.d[variable] while intermediate: try: intermediate = self.d[intermediate] except KeyError: return intermediate def __contains__(self, item): return item in self.d def __add__(self, other): try: combined = BindingDict() for v in self.d: combined[v] = self.d[v] for v in other.d: combined[v] = other.d[v] return combined except BindingException as e: raise BindingException( "Attempting to add two contradicting " "BindingDicts: '%s' and '%s'" % (self, other) ) from e def __len__(self): return len(self.d) def __str__(self): data_str = ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) return "{" + data_str + "}" def __repr__(self): return "%s" % self def most_general_unification(a, b, bindings=None): if bindings is None: bindings = BindingDict() if a == b: return bindings elif isinstance(a, IndividualVariableExpression): return _mgu_var(a, b, bindings) elif isinstance(b, IndividualVariableExpression): return _mgu_var(b, a, bindings) elif isinstance(a, ApplicationExpression) and isinstance(b, ApplicationExpression): return most_general_unification( a.function, b.function, bindings ) + most_general_unification(a.argument, b.argument, bindings) raise BindingException((a, b)) def _mgu_var(var, expression, bindings): if var.variable in expression.free() | expression.constants(): raise BindingException((var, expression)) else: return BindingDict([(var.variable, expression)]) + bindings class BindingException(Exception): def __init__(self, arg): if isinstance(arg, tuple): Exception.__init__(self, "'%s' cannot be bound to '%s'" % arg) else: Exception.__init__(self, arg) class UnificationException(Exception): def __init__(self, a, b): Exception.__init__(self, f"'{a}' cannot unify with '{b}'") class DebugObject: def __init__(self, enabled=True, indent=0): self.enabled = enabled self.indent = indent def __add__(self, i): return DebugObject(self.enabled, self.indent + i) def line(self, line): if self.enabled: print(" " * self.indent + line) def testResolutionProver(): resolution_test(r"man(x)") resolution_test(r"(man(x) -> man(x))") resolution_test(r"(man(x) -> --man(x))") resolution_test(r"-(man(x) and -man(x))") resolution_test(r"(man(x) or -man(x))") resolution_test(r"(man(x) -> man(x))") resolution_test(r"-(man(x) and -man(x))") resolution_test(r"(man(x) or -man(x))") resolution_test(r"(man(x) -> man(x))") resolution_test(r"(man(x) iff man(x))") resolution_test(r"-(man(x) iff -man(x))") resolution_test("all x.man(x)") resolution_test("-all x.some y.F(x,y) & some x.all y.(-F(x,y))") resolution_test("some x.all y.sees(x,y)") p1 = Expression.fromstring(r"all x.(man(x) -> mortal(x))") p2 = Expression.fromstring(r"man(Socrates)") c = Expression.fromstring(r"mortal(Socrates)") print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") p1 = Expression.fromstring(r"all x.(man(x) -> walks(x))") p2 = Expression.fromstring(r"man(John)") c = Expression.fromstring(r"some y.walks(y)") print(f"{p1}, {p2} |- {c}: {ResolutionProver().prove(c, [p1, p2])}") p = Expression.fromstring(r"some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))") c = Expression.fromstring(r"some e0.walk(e0,mary)") print(f"{p} |- {c}: {ResolutionProver().prove(c, [p])}") def resolution_test(e): f = Expression.fromstring(e) t = ResolutionProver().prove(f) print(f"|- {f}: {t}") def test_clausify(): lexpr = Expression.fromstring print(clausify(lexpr("P(x) | Q(x)"))) print(clausify(lexpr("(P(x) & Q(x)) | R(x)"))) print(clausify(lexpr("P(x) | (Q(x) & R(x))"))) print(clausify(lexpr("(P(x) & Q(x)) | (R(x) & S(x))"))) print(clausify(lexpr("P(x) | Q(x) | R(x)"))) print(clausify(lexpr("P(x) | (Q(x) & R(x)) | S(x)"))) print(clausify(lexpr("exists x.P(x) | Q(x)"))) print(clausify(lexpr("-(-P(x) & Q(x))"))) print(clausify(lexpr("P(x) <-> Q(x)"))) print(clausify(lexpr("-(P(x) <-> Q(x))"))) print(clausify(lexpr("-(all x.P(x))"))) print(clausify(lexpr("-(some x.P(x))"))) print(clausify(lexpr("some x.P(x)"))) print(clausify(lexpr("some x.all y.P(x,y)"))) print(clausify(lexpr("all y.some x.P(x,y)"))) print(clausify(lexpr("all z.all y.some x.P(x,y,z)"))) print(clausify(lexpr("all x.(all y.P(x,y) -> -all y.(Q(x,y) -> R(x,y)))"))) def demo(): test_clausify() print() testResolutionProver() print() p = Expression.fromstring("man(x)") print(ResolutionProverCommand(p, [p]).prove()) if __name__ == "__main__": demo()
natural language toolkit firstorder tableau theorem prover c 20012023 nltk project dan garrette dhgarrettegmail com url https www nltk org for license information see license txt module for a tableaubased first order theorem prover if there s nothing left in the agenda and we haven t closed the path check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars combine new context with existing since current is of type ab the path is closed if a b since current is of the form a b replace all free instances of a with b if there are accessiblevars on the path get the set of bound variables that have not be used by this allexpression no more available variables to substitute param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions listsem expression pop the first expression that appears in the agenda for i s in enumerateself sets if s if i in categories neq categories all for ex in s try if not ex0 exhausted s removeex return ex i except attributeerror s removeex return ex i else return s pop i return none none none def replaceallself old new for s in self sets for ex ctx in s ex replaceold variable new if ctx is not none ctx replaceold variable new def markallsfreshself for u in self setscategories all u exhausted false def markneqsfreshself for neq in self setscategories neq neq exhausted false def categorizeexpressionself current if isinstancecurrent negatedexpression return self categorizenegatedexpressioncurrent elif isinstancecurrent functionvariableexpression return categories prop elif tableauprover isatomcurrent return categories atom elif isinstancecurrent allexpression return categories all elif isinstancecurrent andexpression return categories and elif isinstancecurrent orexpression return categories or elif isinstancecurrent impexpression return categories imp elif isinstancecurrent iffexpression return categories iff elif isinstancecurrent equalityexpression return categories eq elif isinstancecurrent existsexpression return categories exists elif isinstancecurrent applicationexpression return categories app else raise proverparseerrorcannot categorize s current class name def categorizenegatedexpressionself current negated current term if isinstancenegated negatedexpression return categories dneg elif isinstancenegated functionvariableexpression return categories nprop elif tableauprover isatomnegated return categories natom elif isinstancenegated allexpression return categories nall elif isinstancenegated andexpression return categories nand elif isinstancenegated orexpression return categories nor elif isinstancenegated impexpression return categories nimp elif isinstancenegated iffexpression return categories niff elif isinstancenegated equalityexpression return categories neq elif isinstancenegated existsexpression return categories nexists elif isinstancenegated applicationexpression return categories napp else raise proverparseerrorcannot categorize s negated class name class debug def initself verbose indent0 linesnone self verbose verbose self indent indent if not lines lines self lines lines def addself increment return debugself verbose self indent 1 self lines def lineself data indent0 if isinstancedata tuple ex ctx data if ctx data fex ctx else data s ex if isinstanceex allexpression try usedvars s joins ve variable name for ve in ex usedvars data s usedvars except attributeerror data newline format self indent indent data self lines appendnewline if self verbose printnewline class categories atom 0 prop 1 natom 2 nprop 3 app 4 napp 5 neq 6 dneg 7 nall 8 nexists 9 and 10 nor 11 nimp 12 or 13 imp 14 nand 15 iff 16 niff 17 eq 18 exists 19 all 20 def testtableauprover tableautestp p tableautestp p tableautestq p p q tableautestmanx tableautestmanx manx tableautestmanx manx tableautestmanx and manx tableautestmanx or manx tableautestmanx manx tableautestmanx and manx tableautestmanx or manx tableautestmanx manx tableautestmanx iff manx tableautestmanx iff manx tableautestall x manx tableautestall x all y x y y x tableautestall x all y all z x y y z x z tableautest all x some y fx y some x all y fx y tableautest some x all y seesx y p1 all x manx mortalx p2 mansocrates c mortalsocrates tableautestc p1 p2 p1 all x manx walksx p2 manjohn c some y walksy tableautestc p1 p2 p x y walksy c walksx tableautestc p p x y y z z w c x w tableautestc p p some e1 some e2 believee1 john e2 walke2 mary c some e0 walke0 mary tableautestc p c exists x exists z3 x mary z3 john seesz3 x exists x exists z4 x john z4 mary seesx z4 tableautestc p some e1 some e2 believe e1 john e2 and walk e2 mary c some x some e3 some e4 believe e3 x e4 and walk e4 mary tableautestc p def testhigherordertableauprover tableautestbelievej lieb believej lieb cheatb tableautestbelievej lieb cheatb believej lieb tableautest believej lieb lieb how do we capture that john believes all things that are true tableautest believej knowb cheatb believej knowb lieb knowb stealsb cheatb tableautestpqy ry rz pqx qy ry rz tableautestbelievej cheatb lieb believej lieb cheatb tableautestbelievej cheatb lieb believej lieb cheatb def tableautestc psnone verbosefalse pc expression fromstringc pps expression fromstringp for p in ps if ps else if not ps ps print s s s joinps pc tableauprover provepc pps verboseverbose def demo testtableauprover testhigherordertableauprover if name main demo natural language toolkit first order tableau theorem prover c 2001 2023 nltk project dan garrette dhgarrette gmail com url https www nltk org for license information see license txt module for a tableau based first order theorem prover if there s nothing left in the agenda and we haven t closed the path check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars check if the branch is closed return true if it is mark all allexpressions as not exhausted into the agenda since we are potentially adding new accessible vars combine new context with existing since current is of type a b the path is closed if a b since current is of the form a b replace all free instances of a with b if there are accessible_vars on the path get the set of bound variables that have not be used by this allexpression no more available variables to substitute param goal input expression to prove type goal sem expression param assumptions input expressions to use as assumptions in the proof type assumptions list sem expression pop the first expression that appears in the agenda tableau_test all x some y f x y some x all y f x y tableau_test some x all y sees x y p some e1 some e2 believe e1 john e2 and walk e2 mary c some x some e3 some e4 believe e3 x e4 and walk e4 mary tableau_test c p how do we capture that john believes all things that are true
from nltk.inference.api import BaseProverCommand, Prover from nltk.internals import Counter from nltk.sem.logic import ( AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, Expression, FunctionVariableExpression, IffExpression, ImpExpression, LambdaExpression, NegatedExpression, OrExpression, Variable, VariableExpression, unique_variable, ) _counter = Counter() class ProverParseError(Exception): pass class TableauProver(Prover): _assume_false = False def _prove(self, goal=None, assumptions=None, verbose=False): if not assumptions: assumptions = [] result = None try: agenda = Agenda() if goal: agenda.put(-goal) agenda.put_all(assumptions) debugger = Debug(verbose) result = self._attempt_proof(agenda, set(), set(), debugger) except RuntimeError as e: if self._assume_false and str(e).startswith( "maximum recursion depth exceeded" ): result = False else: if verbose: print(e) else: raise e return (result, "\n".join(debugger.lines)) def _attempt_proof(self, agenda, accessible_vars, atoms, debug): (current, context), category = agenda.pop_first() if not current: debug.line("AGENDA EMPTY") return False proof_method = { Categories.ATOM: self._attempt_proof_atom, Categories.PROP: self._attempt_proof_prop, Categories.N_ATOM: self._attempt_proof_n_atom, Categories.N_PROP: self._attempt_proof_n_prop, Categories.APP: self._attempt_proof_app, Categories.N_APP: self._attempt_proof_n_app, Categories.N_EQ: self._attempt_proof_n_eq, Categories.D_NEG: self._attempt_proof_d_neg, Categories.N_ALL: self._attempt_proof_n_all, Categories.N_EXISTS: self._attempt_proof_n_some, Categories.AND: self._attempt_proof_and, Categories.N_OR: self._attempt_proof_n_or, Categories.N_IMP: self._attempt_proof_n_imp, Categories.OR: self._attempt_proof_or, Categories.IMP: self._attempt_proof_imp, Categories.N_AND: self._attempt_proof_n_and, Categories.IFF: self._attempt_proof_iff, Categories.N_IFF: self._attempt_proof_n_iff, Categories.EQ: self._attempt_proof_eq, Categories.EXISTS: self._attempt_proof_some, Categories.ALL: self._attempt_proof_all, }[category] debug.line((current, context)) return proof_method(current, context, agenda, accessible_vars, atoms, debug) def _attempt_proof_atom( self, current, context, agenda, accessible_vars, atoms, debug ): if (current, True) in atoms: debug.line("CLOSED", 1) return True if context: if isinstance(context.term, NegatedExpression): current = current.negate() agenda.put(context(current).simplify()) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) else: agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars | set(current.args), atoms | {(current, False)}, debug + 1, ) def _attempt_proof_n_atom( self, current, context, agenda, accessible_vars, atoms, debug ): if (current.term, False) in atoms: debug.line("CLOSED", 1) return True if context: if isinstance(context.term, NegatedExpression): current = current.negate() agenda.put(context(current).simplify()) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) else: agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars | set(current.term.args), atoms | {(current.term, True)}, debug + 1, ) def _attempt_proof_prop( self, current, context, agenda, accessible_vars, atoms, debug ): if (current, True) in atoms: debug.line("CLOSED", 1) return True agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars, atoms | {(current, False)}, debug + 1 ) def _attempt_proof_n_prop( self, current, context, agenda, accessible_vars, atoms, debug ): if (current.term, False) in atoms: debug.line("CLOSED", 1) return True agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars, atoms | {(current.term, True)}, debug + 1 ) def _attempt_proof_app( self, current, context, agenda, accessible_vars, atoms, debug ): f, args = current.uncurry() for i, arg in enumerate(args): if not TableauProver.is_atom(arg): ctx = f nv = Variable("X%s" % _counter.get()) for j, a in enumerate(args): ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) if context: ctx = context(ctx).simplify() ctx = LambdaExpression(nv, ctx) agenda.put(arg, ctx) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) raise Exception("If this method is called, there must be a non-atomic argument") def _attempt_proof_n_app( self, current, context, agenda, accessible_vars, atoms, debug ): f, args = current.term.uncurry() for i, arg in enumerate(args): if not TableauProver.is_atom(arg): ctx = f nv = Variable("X%s" % _counter.get()) for j, a in enumerate(args): ctx = ctx(VariableExpression(nv)) if i == j else ctx(a) if context: ctx = context(ctx).simplify() ctx = LambdaExpression(nv, -ctx) agenda.put(-arg, ctx) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) raise Exception("If this method is called, there must be a non-atomic argument") def _attempt_proof_n_eq( self, current, context, agenda, accessible_vars, atoms, debug ): if current.term.first == current.term.second: debug.line("CLOSED", 1) return True agenda[Categories.N_EQ].add((current, context)) current._exhausted = True return self._attempt_proof( agenda, accessible_vars | {current.term.first, current.term.second}, atoms, debug + 1, ) def _attempt_proof_d_neg( self, current, context, agenda, accessible_vars, atoms, debug ): agenda.put(current.term.term, context) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_all( self, current, context, agenda, accessible_vars, atoms, debug ): agenda[Categories.EXISTS].add( (ExistsExpression(current.term.variable, -current.term.term), context) ) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_some( self, current, context, agenda, accessible_vars, atoms, debug ): agenda[Categories.ALL].add( (AllExpression(current.term.variable, -current.term.term), context) ) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_and( self, current, context, agenda, accessible_vars, atoms, debug ): agenda.put(current.first, context) agenda.put(current.second, context) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_or( self, current, context, agenda, accessible_vars, atoms, debug ): agenda.put(-current.term.first, context) agenda.put(-current.term.second, context) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_imp( self, current, context, agenda, accessible_vars, atoms, debug ): agenda.put(current.term.first, context) agenda.put(-current.term.second, context) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_or( self, current, context, agenda, accessible_vars, atoms, debug ): new_agenda = agenda.clone() agenda.put(current.first, context) new_agenda.put(current.second, context) return self._attempt_proof( agenda, accessible_vars, atoms, debug + 1 ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_imp( self, current, context, agenda, accessible_vars, atoms, debug ): new_agenda = agenda.clone() agenda.put(-current.first, context) new_agenda.put(current.second, context) return self._attempt_proof( agenda, accessible_vars, atoms, debug + 1 ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_and( self, current, context, agenda, accessible_vars, atoms, debug ): new_agenda = agenda.clone() agenda.put(-current.term.first, context) new_agenda.put(-current.term.second, context) return self._attempt_proof( agenda, accessible_vars, atoms, debug + 1 ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_iff( self, current, context, agenda, accessible_vars, atoms, debug ): new_agenda = agenda.clone() agenda.put(current.first, context) agenda.put(current.second, context) new_agenda.put(-current.first, context) new_agenda.put(-current.second, context) return self._attempt_proof( agenda, accessible_vars, atoms, debug + 1 ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_n_iff( self, current, context, agenda, accessible_vars, atoms, debug ): new_agenda = agenda.clone() agenda.put(current.term.first, context) agenda.put(-current.term.second, context) new_agenda.put(-current.term.first, context) new_agenda.put(current.term.second, context) return self._attempt_proof( agenda, accessible_vars, atoms, debug + 1 ) and self._attempt_proof(new_agenda, accessible_vars, atoms, debug + 1) def _attempt_proof_eq( self, current, context, agenda, accessible_vars, atoms, debug ): agenda.put_atoms(atoms) agenda.replace_all(current.first, current.second) accessible_vars.discard(current.first) agenda.mark_neqs_fresh() return self._attempt_proof(agenda, accessible_vars, set(), debug + 1) def _attempt_proof_some( self, current, context, agenda, accessible_vars, atoms, debug ): new_unique_variable = VariableExpression(unique_variable()) agenda.put(current.term.replace(current.variable, new_unique_variable), context) agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 ) def _attempt_proof_all( self, current, context, agenda, accessible_vars, atoms, debug ): try: current._used_vars except AttributeError: current._used_vars = set() if accessible_vars: bv_available = accessible_vars - current._used_vars if bv_available: variable_to_use = list(bv_available)[0] debug.line("--> Using '%s'" % variable_to_use, 2) current._used_vars |= {variable_to_use} agenda.put( current.term.replace(current.variable, variable_to_use), context ) agenda[Categories.ALL].add((current, context)) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) else: debug.line("--> Variables Exhausted", 2) current._exhausted = True agenda[Categories.ALL].add((current, context)) return self._attempt_proof(agenda, accessible_vars, atoms, debug + 1) else: new_unique_variable = VariableExpression(unique_variable()) debug.line("--> Using '%s'" % new_unique_variable, 2) current._used_vars |= {new_unique_variable} agenda.put( current.term.replace(current.variable, new_unique_variable), context ) agenda[Categories.ALL].add((current, context)) agenda.mark_alls_fresh() return self._attempt_proof( agenda, accessible_vars | {new_unique_variable}, atoms, debug + 1 ) @staticmethod def is_atom(e): if isinstance(e, NegatedExpression): e = e.term if isinstance(e, ApplicationExpression): for arg in e.args: if not TableauProver.is_atom(arg): return False return True elif isinstance(e, AbstractVariableExpression) or isinstance( e, LambdaExpression ): return True else: return False class TableauProverCommand(BaseProverCommand): def __init__(self, goal=None, assumptions=None, prover=None): if prover is not None: assert isinstance(prover, TableauProver) else: prover = TableauProver() BaseProverCommand.__init__(self, prover, goal, assumptions) class Agenda: def __init__(self): self.sets = tuple(set() for i in range(21)) def clone(self): new_agenda = Agenda() set_list = [s.copy() for s in self.sets] new_allExs = set() for allEx, _ in set_list[Categories.ALL]: new_allEx = AllExpression(allEx.variable, allEx.term) try: new_allEx._used_vars = {used for used in allEx._used_vars} except AttributeError: new_allEx._used_vars = set() new_allExs.add((new_allEx, None)) set_list[Categories.ALL] = new_allExs set_list[Categories.N_EQ] = { (NegatedExpression(n_eq.term), ctx) for (n_eq, ctx) in set_list[Categories.N_EQ] } new_agenda.sets = tuple(set_list) return new_agenda def __getitem__(self, index): return self.sets[index] def put(self, expression, context=None): if isinstance(expression, AllExpression): ex_to_add = AllExpression(expression.variable, expression.term) try: ex_to_add._used_vars = {used for used in expression._used_vars} except AttributeError: ex_to_add._used_vars = set() else: ex_to_add = expression self.sets[self._categorize_expression(ex_to_add)].add((ex_to_add, context)) def put_all(self, expressions): for expression in expressions: self.put(expression) def put_atoms(self, atoms): for atom, neg in atoms: if neg: self[Categories.N_ATOM].add((-atom, None)) else: self[Categories.ATOM].add((atom, None)) def pop_first(self): for i, s in enumerate(self.sets): if s: if i in [Categories.N_EQ, Categories.ALL]: for ex in s: try: if not ex[0]._exhausted: s.remove(ex) return (ex, i) except AttributeError: s.remove(ex) return (ex, i) else: return (s.pop(), i) return ((None, None), None) def replace_all(self, old, new): for s in self.sets: for ex, ctx in s: ex.replace(old.variable, new) if ctx is not None: ctx.replace(old.variable, new) def mark_alls_fresh(self): for u, _ in self.sets[Categories.ALL]: u._exhausted = False def mark_neqs_fresh(self): for neq, _ in self.sets[Categories.N_EQ]: neq._exhausted = False def _categorize_expression(self, current): if isinstance(current, NegatedExpression): return self._categorize_NegatedExpression(current) elif isinstance(current, FunctionVariableExpression): return Categories.PROP elif TableauProver.is_atom(current): return Categories.ATOM elif isinstance(current, AllExpression): return Categories.ALL elif isinstance(current, AndExpression): return Categories.AND elif isinstance(current, OrExpression): return Categories.OR elif isinstance(current, ImpExpression): return Categories.IMP elif isinstance(current, IffExpression): return Categories.IFF elif isinstance(current, EqualityExpression): return Categories.EQ elif isinstance(current, ExistsExpression): return Categories.EXISTS elif isinstance(current, ApplicationExpression): return Categories.APP else: raise ProverParseError("cannot categorize %s" % current.__class__.__name__) def _categorize_NegatedExpression(self, current): negated = current.term if isinstance(negated, NegatedExpression): return Categories.D_NEG elif isinstance(negated, FunctionVariableExpression): return Categories.N_PROP elif TableauProver.is_atom(negated): return Categories.N_ATOM elif isinstance(negated, AllExpression): return Categories.N_ALL elif isinstance(negated, AndExpression): return Categories.N_AND elif isinstance(negated, OrExpression): return Categories.N_OR elif isinstance(negated, ImpExpression): return Categories.N_IMP elif isinstance(negated, IffExpression): return Categories.N_IFF elif isinstance(negated, EqualityExpression): return Categories.N_EQ elif isinstance(negated, ExistsExpression): return Categories.N_EXISTS elif isinstance(negated, ApplicationExpression): return Categories.N_APP else: raise ProverParseError("cannot categorize %s" % negated.__class__.__name__) class Debug: def __init__(self, verbose, indent=0, lines=None): self.verbose = verbose self.indent = indent if not lines: lines = [] self.lines = lines def __add__(self, increment): return Debug(self.verbose, self.indent + 1, self.lines) def line(self, data, indent=0): if isinstance(data, tuple): ex, ctx = data if ctx: data = f"{ex}, {ctx}" else: data = "%s" % ex if isinstance(ex, AllExpression): try: used_vars = "[%s]" % ( ",".join("%s" % ve.variable.name for ve in ex._used_vars) ) data += ": %s" % used_vars except AttributeError: data += ": []" newline = "{}{}".format(" " * (self.indent + indent), data) self.lines.append(newline) if self.verbose: print(newline) class Categories: ATOM = 0 PROP = 1 N_ATOM = 2 N_PROP = 3 APP = 4 N_APP = 5 N_EQ = 6 D_NEG = 7 N_ALL = 8 N_EXISTS = 9 AND = 10 N_OR = 11 N_IMP = 12 OR = 13 IMP = 14 N_AND = 15 IFF = 16 N_IFF = 17 EQ = 18 EXISTS = 19 ALL = 20 def testTableauProver(): tableau_test("P | -P") tableau_test("P & -P") tableau_test("Q", ["P", "(P -> Q)"]) tableau_test("man(x)") tableau_test("(man(x) -> man(x))") tableau_test("(man(x) -> --man(x))") tableau_test("-(man(x) and -man(x))") tableau_test("(man(x) or -man(x))") tableau_test("(man(x) -> man(x))") tableau_test("-(man(x) and -man(x))") tableau_test("(man(x) or -man(x))") tableau_test("(man(x) -> man(x))") tableau_test("(man(x) iff man(x))") tableau_test("-(man(x) iff -man(x))") tableau_test("all x.man(x)") tableau_test("all x.all y.((x = y) -> (y = x))") tableau_test("all x.all y.all z.(((x = y) & (y = z)) -> (x = z))") p1 = "all x.(man(x) -> mortal(x))" p2 = "man(Socrates)" c = "mortal(Socrates)" tableau_test(c, [p1, p2]) p1 = "all x.(man(x) -> walks(x))" p2 = "man(John)" c = "some y.walks(y)" tableau_test(c, [p1, p2]) p = "((x = y) & walks(y))" c = "walks(x)" tableau_test(c, [p]) p = "((x = y) & ((y = z) & (z = w)))" c = "(x = w)" tableau_test(c, [p]) p = "some e1.some e2.(believe(e1,john,e2) & walk(e2,mary))" c = "some e0.walk(e0,mary)" tableau_test(c, [p]) c = "(exists x.exists z3.((x = Mary) & ((z3 = John) & sees(z3,x))) <-> exists x.exists z4.((x = John) & ((z4 = Mary) & sees(x,z4))))" tableau_test(c) def testHigherOrderTableauProver(): tableau_test("believe(j, -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) tableau_test("believe(j, lie(b) & cheat(b))", ["believe(j, lie(b))"]) tableau_test( "believe(j, lie(b))", ["lie(b)"] ) tableau_test( "believe(j, know(b, cheat(b)))", ["believe(j, know(b, lie(b)) & know(b, steals(b) & cheat(b)))"], ) tableau_test("P(Q(y), R(y) & R(z))", ["P(Q(x) & Q(y), R(y) & R(z))"]) tableau_test("believe(j, cheat(b) & lie(b))", ["believe(j, lie(b) & cheat(b))"]) tableau_test("believe(j, -cheat(b) & -lie(b))", ["believe(j, -lie(b) & -cheat(b))"]) def tableau_test(c, ps=None, verbose=False): pc = Expression.fromstring(c) pps = [Expression.fromstring(p) for p in ps] if ps else [] if not ps: ps = [] print( "%s |- %s: %s" % (", ".join(ps), pc, TableauProver().prove(pc, pps, verbose=verbose)) ) def demo(): testTableauProver() testHigherOrderTableauProver() if __name__ == "__main__": demo()
natural language toolkit internal utility functions c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com nitin madnani nmadnaniets org url https www nltk org for license information see license txt java via commandline xx add classpath option to configjava configure nltk s java interface by letting nltk know where it can find the java binary and what extra options if any should be passed to java when it is run param bin the full path to the java binary if not specified then nltk will search the system for a java binary and if one is not found it will raise a lookuperror exception type bin str param options a list of options that should be passed to the java binary when it is called a common value is xmx512m which tells java binary to increase the maximum heap size to 512 megabytes if no options are specified then do not modify the options list type options liststr execute the given java command by opening a subprocess that calls java if java has not yet been configured it will be configured by calling configjava with no arguments param cmd the java command that should be called formatted as a list of strings typically the first string will be the name of the java class and the remaining strings will be arguments for that java class type cmd liststr param classpath a separated list of directories jar archives and zip archives to search for class files type classpath str param stdin specify the executed program s standard input file handles respectively valid values are subprocess pipe an existing file descriptor a positive integer an existing file object pipe stdout devnull and none subprocess pipe indicates that a new pipe to the child should be created with none no redirection will occur the child s file handles will be inherited from the parent additionally stderr can be subprocess stdout which indicates that the stderr data from the applications should be captured into the same file handle as for stdout param stdout specify the executed program s standard output file handle see stdin for valid values param stderr specify the executed program s standard error file handle see stdin for valid values param blocking if false then return immediately after spawning the subprocess in this case the return value is the popen object and not a stdout stderr tuple return if blockingtrue then return a tuple stdout stderr containing the stdout and stderr outputs generated by the java command if the stdout and stderr parameters were set to subprocess pipe or none otherwise if blockingfalse then return a subprocess popen object raise oserror if the java command returns a nonzero return code make sure we know where a java binary is set up the classpath construct the full command string call java via a subprocess check the return code parsing exception raised by read functions when they fail param position the index in the input string where an error occurred param expected what was expected when an error occurred if a python string literal begins at the specified position in the given string then return a tuple val endposition containing the value of the string literal and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python string literal exists type s str param startposition the specified beginning position of the string s to begin regex matching type startposition int return a tuple containing the matched string literal evaluated as a string and the end position of the string literal rtype tuplestr int raise readerror if the stringstartre regex doesn t return a match in s at startposition i e open quote if the stringendre regex doesn t return a match in s at the end of the first match i e close quote raise valueerror if an invalid string i e contains an invalid escape sequence is passed into the eval example from nltk internals import readstr readstr hello world 0 hello 7 read the open quote and any modifiers find the close quote process it using eval strings with invalid escape sequences might raise valueerror if an integer begins at the specified position in the given string then return a tuple val endposition containing the value of the integer and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python integer exists type s str param startposition the specified beginning position of the string s to begin regex matching type startposition int return a tuple containing the matched integer casted to an int and the end position of the int in s rtype tupleint int raise readerror if the readintre regex doesn t return a match in s at startposition example from nltk internals import readint readint 42 is the answer 0 42 2 if an integer or float begins at the specified position in the given string then return a tuple val endposition containing the value of the number and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python number exists type s str param startposition the specified beginning position of the string s to begin regex matching type startposition int return a tuple containing the matched number casted to a float and the end position of the number in s rtype tuplefloat int raise readerror if the readnumbervalue regex doesn t return a match in s at startposition example from nltk internals import readnumber readnumber pi is 3 14159 6 3 14159 13 check if a method has been overridden return true if method overrides some method with the same name in a base class this is typically used when defining abstract base classes or interfaces to allow subclasses to define either of two related methods class eateri subclass must define eat or batcheat def eatself food if overriddenself batcheat return self batcheatfood0 else raise notimplementederror def batcheatself foods return self eatfood for food in foods type method instance method return the method resolution order for cls i e a list containing cls and all its base classes in the order in which they would be checked by getattr for newstyle classes this is just cls mro for classic classes this can be obtained by a depthfirst lefttoright traversal of bases deprecation decorator base class xx dedent msg first if it comes from a docstring add an epytext field to a given object s docstring indent if we already have a docstring then add a blank line to separate it from the new field and check its indentation if obj doc obj doc obj doc rstrip nn indents re findallr n s obj doc expandtabs if indents indent minindents if we don t have a docstring add an empty one else obj doc obj doc textwrap fill ffield message initialindentindent subsequentindentindent def deprecatedmessage def decoratorfunc msg ffunction func name has been deprecated message msg n textwrap fillmsg initialindent subsequentindent def newfuncargs kwargs warnings warnmsg categorydeprecationwarning stacklevel2 return funcargs kwargs copy the old function s name docstring dict newfunc dict updatefunc dict newfunc name func name newfunc doc func doc newfunc deprecated true add a deprecated field to the docstring addepytextfieldnewfunc deprecated message return newfunc return decorator class deprecated def newcls args kwargs figure out which class is the deprecated one depcls none for base in mrocls if deprecated in base bases depcls base break assert depcls unable to determine which base is deprecated construct an appropriate warning doc depcls doc or strip if there s a deprecated field strip off the field marker doc re subrasdeprecated r doc strip off any indentation doc re subr ms doc construct a name string name class s depcls name if cls depcls name base class for s cls name put it all together msg fname has been deprecated doc wrap it msg n textwrap fillmsg initialindent subsequentindent warnings warnmsg categorydeprecationwarning stacklevel2 do the actual work of new return object newcls counter for unique naming class counter def initself initialvalue0 self value initialvalue def getself self value 1 return self value search for filesbinaries def findfileiter filename envvars searchpath filenamesnone urlnone verbosefalse findingdirfalse filenames filename filenames or assert isinstancefilename str assert not isinstancefilenames str assert not isinstancesearchpath str if isinstanceenvvars str envvars envvars split yielded false file exists no magic for alternative in filenames pathtofile os path joinfilename alternative if os path isfilepathtofile if verbose printffound filename pathtofile yielded true yield pathtofile check the bare alternatives if os path isfilealternative if verbose printffound filename alternative yielded true yield alternative check if the alternative is inside a file directory pathtofile os path joinfilename file alternative if os path isfilepathtofile if verbose printffound filename pathtofile yielded true yield pathtofile check environment variables for envvar in envvars if envvar in os environ if findingdir this is to file a directory instead of file yielded true yield os environenvvar for envdir in os environenvvar splitos pathsep check if the environment variable contains a direct path to the bin if os path isfileenvdir if verbose printffound filename envdir yielded true yield envdir check if the possible bin names exist inside the environment variable directories for alternative in filenames pathtofile os path joinenvdir alternative if os path isfilepathtofile if verbose printffound filename pathtofile yielded true yield pathtofile check if the alternative is inside a file directory pathtofile os path joinenvdir file alternative check if the alternative is inside a bin directory pathtofile os path joinenvdir bin alternative if os path isfilepathtofile if verbose printffound filename pathtofile yielded true yield pathtofile check the path list for directory in searchpath for alternative in filenames pathtofile os path joindirectory alternative if os path isfilepathtofile yielded true yield pathtofile if we re on a posix system then try using the which command to find the file if os name posix for alternative in filenames try p subprocess popen which alternative stdoutsubprocess pipe stderrsubprocess pipe stdout stderr p communicate path decodestdoutdatastdout strip if path endswithalternative and os path existspath if verbose printffound filename path yielded true yield path except keyboardinterrupt systemexit oserror raise finally pass if not yielded msg nltk was unable to find the s file nuse software specific configuration parameters filename if envvars msg or set the s environment variable envvars0 msg if searchpath msg nn searched in msg joinn s d for d in searchpath if url msg fnn for more information on filename see n url div 75 raise lookuperrorfnndivnmsgndiv def findfile filename envvars searchpath filenamesnone urlnone verbosefalse return next findfileiterfilename envvars searchpath filenames url verbose def finddir filename envvars searchpath filenamesnone urlnone verbosefalse return next findfileiter filename envvars searchpath filenames url verbose findingdirtrue def findbinaryiter name pathtobinnone envvars searchpath binarynamesnone urlnone verbosefalse yield from findfileiter pathtobin or name envvars searchpath binarynames url verbose def findbinary name pathtobinnone envvars searchpath binarynamesnone urlnone verbosefalse return next findbinaryiter name pathtobin envvars searchpath binarynames url verbose def findjariter namepattern pathtojarnone envvars searchpath urlnone verbosefalse isregexfalse assert isinstancenamepattern str assert not isinstancesearchpath str if isinstanceenvvars str envvars envvars split yielded false make sure we check the classpath first envvars classpath listenvvars if an explicit location was given then check it and yield it if it s present otherwise complain if pathtojar is not none if os path isfilepathtojar yielded true yield pathtojar else raise lookuperror fcould not find namepattern jar file at pathtojar check environment variables for envvar in envvars if envvar in os environ if envvar classpath classpath os environclasspath for cp in classpath splitos path pathsep cp os path expandusercp if os path isfilecp filename os path basenamecp if isregex and re matchnamepattern filename or not isregex and filename namepattern if verbose printffound namepattern cp yielded true yield cp the case where user put directory containing the jar file in the classpath if os path isdircp if not isregex if os path isfileos path joincp namepattern if verbose printffound namepattern cp yielded true yield os path joincp namepattern else look for file using regular expression for filename in os listdircp if re matchnamepattern filename if verbose print found s s namepattern os path joincp filename yielded true yield os path joincp filename else jarenv os path expanduseros environenvvar jariter os path joinjarenv pathtojar for pathtojar in os listdirjarenv if os path isdirjarenv else jarenv for pathtojar in jariter if os path isfilepathtojar filename os path basenamepathtojar if isregex and re matchnamepattern filename or not isregex and filename namepattern if verbose printffound namepattern pathtojar yielded true yield pathtojar check the path list for directory in searchpath if isregex for filename in os listdirdirectory pathtojar os path joindirectory filename if os path isfilepathtojar if re matchnamepattern filename if verbose printffound filename pathtojar yielded true yield pathtojar else pathtojar os path joindirectory namepattern if os path isfilepathtojar if verbose printffound namepattern pathtojar yielded true yield pathtojar if not yielded if nothing was found raise an error msg nltk was unable to find s namepattern if envvars msg set the s environment variable envvars0 msg textwrap fillmsg initialindent subsequentindent if searchpath msg nn searched in msg joinn s d for d in searchpath if url msg nn for more information on see n format namepattern url div 75 raise lookuperrorfnndivnmsgndiv def findjar namepattern pathtojarnone envvars searchpath urlnone verbosefalse isregexfalse return next findjariter namepattern pathtojar envvars searchpath url verbose isregex def findjarswithinpathpathtojars return os path joinroot filename for root dirnames filenames in os walkpathtojars for filename in fnmatch filterfilenames jar def decodestdoutdatastdoutdata import stdlib module when python is run from within the nltk directory tree the current directory is included at the beginning of the search path unfortunately that means that modules within nltk can sometimes shadow standard library modules as an example the stdlib inspect module will attempt to import the stdlib tokenize module but will instead end up importing nltk s tokenize module instead causing the import to fail wrapper for elementtree elements a wrapper around elementtree element objects whose main purpose is to provide nicer repr and str methods in addition any of the wrapped element s methods that return other element objects are overridden to wrap those values before returning them this makes elements more convenient to work with in interactive sessions and doctests at the expense of some efficiency prevent doublewrapping create and return a wrapper around a given element object if etree is an elementwrapper then etree is returned asis if isinstanceetree str etree elementtree fromstringetree self dictetree etree def unwrapself return self etree string representation def reprself s elementtree tostringself etree encodingutf8 decodeutf8 if lens 60 e s rfind if lens e 30 e 20 s fs 30 se return element r s def strself return elementtree tostringself etree encodingutf8 decodeutf8 rstrip element interface delegation passthrough def getattrself attrib return getattrself etree attrib def setattrself attr value return setattrself etree attr value def delattrself attr return delattrself etree attr def setitemself index element self etreeindex element def delitemself index del self etreeindex def setsliceself start stop elements self etreestart stop elements def delsliceself start stop del self etreestart stop def lenself return lenself etree element interface delegation wrap result def getitemself index return elementwrapperself etreeindex def getsliceself start stop return elementwrapperelt for elt in self etreestart stop def getchildrenself return elementwrapperelt for elt in self etree def getiteratorself tagnone return elementwrapperelt for elt in self etree getiteratortag def makeelementself tag attrib return elementwrapperself etree makeelementtag attrib def findself path elt self etree findpath if elt is none return elt else return elementwrapperelt def findallself path return elementwrapperelt for elt in self etree findallpath helper for handling slicing def sliceboundssequence sliceobj allowstepfalse start stop sliceobj start sliceobj stop if allowstep is true then include the step in our return value tuple if allowstep step sliceobj step if step is none step 1 use a recursive call without allowstep to find the slice bounds if step is negative then the roles of start and stop in terms of default values etc are swapped if step 0 start stop sliceboundssequence slicestop start else start stop sliceboundssequence slicestart stop return start stop step otherwise make sure that no nondefault step value is used elif sliceobj step not in none 1 raise valueerror slices with steps are not supported by s sequence class name supply default offsets if start is none start 0 if stop is none stop lensequence handle negative indices if start 0 start max0 lensequence start if stop 0 stop max0 lensequence stop make sure stop doesn t go past the end of the list note that we avoid calculating lensequence if possible because for lazy sequences calculating the length of a sequence can be expensive if stop 0 try sequencestop 1 except indexerror stop lensequence make sure start isn t past stop start minstart stop that s all folks return start stop permission checking def iswritablepath ensure that it exists if not os path existspath return false if we re on a posix system check its permissions if hasattros getuid statdata os statpath perm stat simodestatdata stmode is it worldwritable if perm 0o002 return true do we own it elif statdata stuid os getuid and perm 0o200 return true are we in a group that can write to it elif statdata stgid in os getgid os getgroups and perm 0o020 return true otherwise we can t write to it else return false otherwise we ll assume it s writable xx should we do other checks on other platforms return true nltk error reporting def raiseunorderabletypesordering a b raise typeerror unorderable types s s s typea name ordering typeb name natural language toolkit internal utility functions c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com nitin madnani nmadnani ets org url https www nltk org for license information see license txt java via command line xx add classpath option to config_java configure nltk s java interface by letting nltk know where it can find the java binary and what extra options if any should be passed to java when it is run param bin the full path to the java binary if not specified then nltk will search the system for a java binary and if one is not found it will raise a lookuperror exception type bin str param options a list of options that should be passed to the java binary when it is called a common value is xmx512m which tells java binary to increase the maximum heap size to 512 megabytes if no options are specified then do not modify the options list type options list str execute the given java command by opening a subprocess that calls java if java has not yet been configured it will be configured by calling config_java with no arguments param cmd the java command that should be called formatted as a list of strings typically the first string will be the name of the java class and the remaining strings will be arguments for that java class type cmd list str param classpath a separated list of directories jar archives and zip archives to search for class files type classpath str param stdin specify the executed program s standard input file handles respectively valid values are subprocess pipe an existing file descriptor a positive integer an existing file object pipe stdout devnull and none subprocess pipe indicates that a new pipe to the child should be created with none no redirection will occur the child s file handles will be inherited from the parent additionally stderr can be subprocess stdout which indicates that the stderr data from the applications should be captured into the same file handle as for stdout param stdout specify the executed program s standard output file handle see stdin for valid values param stderr specify the executed program s standard error file handle see stdin for valid values param blocking if false then return immediately after spawning the subprocess in this case the return value is the popen object and not a stdout stderr tuple return if blocking true then return a tuple stdout stderr containing the stdout and stderr outputs generated by the java command if the stdout and stderr parameters were set to subprocess pipe or none otherwise if blocking false then return a subprocess popen object raise oserror if the java command returns a nonzero return code make sure we know where a java binary is set up the classpath construct the full command string call java via a subprocess check the return code parsing exception raised by read_ functions when they fail param position the index in the input string where an error occurred param expected what was expected when an error occurred if a python string literal begins at the specified position in the given string then return a tuple val end_position containing the value of the string literal and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python string literal exists type s str param start_position the specified beginning position of the string s to begin regex matching type start_position int return a tuple containing the matched string literal evaluated as a string and the end position of the string literal rtype tuple str int raise readerror if the _string_start_re regex doesn t return a match in s at start_position i e open quote if the _string_end_re regex doesn t return a match in s at the end of the first match i e close quote raise valueerror if an invalid string i e contains an invalid escape sequence is passed into the eval example from nltk internals import read_str read_str hello world 0 hello 7 read the open quote and any modifiers find the close quote process it using eval strings with invalid escape sequences might raise valueerror if an integer begins at the specified position in the given string then return a tuple val end_position containing the value of the integer and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python integer exists type s str param start_position the specified beginning position of the string s to begin regex matching type start_position int return a tuple containing the matched integer casted to an int and the end position of the int in s rtype tuple int int raise readerror if the _read_int_re regex doesn t return a match in s at start_position example from nltk internals import read_int read_int 42 is the answer 0 42 2 if an integer or float begins at the specified position in the given string then return a tuple val end_position containing the value of the number and the position where it ends otherwise raise a readerror param s a string that will be checked to see if within which a python number exists type s str param start_position the specified beginning position of the string s to begin regex matching type start_position int return a tuple containing the matched number casted to a float and the end position of the number in s rtype tuple float int raise readerror if the _read_number_value regex doesn t return a match in s at start_position example from nltk internals import read_number read_number pi is 3 14159 6 3 14159 13 check if a method has been overridden return true if method overrides some method with the same name in a base class this is typically used when defining abstract base classes or interfaces to allow subclasses to define either of two related methods class eateri subclass must define eat or batch_eat def eat self food if overridden self batch_eat return self batch_eat food 0 else raise notimplementederror def batch_eat self foods return self eat food for food in foods type method instance method return the method resolution order for cls i e a list containing cls and all its base classes in the order in which they would be checked by getattr for new style classes this is just cls __mro__ for classic classes this can be obtained by a depth first left to right traversal of __bases__ deprecation decorator base class xx dedent msg first if it comes from a docstring add an epytext field to a given object s docstring if we already have a docstring then add a blank line to separate it from the new field and check its indentation if we don t have a docstring add an empty one a decorator used to mark functions as deprecated this will cause a warning to be printed the when the function is used usage from nltk internals import deprecated deprecated use foo instead def bar x print x 10 copy the old function s name docstring dict add a deprecated field to the docstring a base class used to mark deprecated classes a typical usage is to alert users that the name of a class has changed from nltk internals import deprecated class newclassname pass all logic goes here class oldclassname deprecated newclassname use newclassname instead the docstring of the deprecated class will be used in the deprecation warning message figure out which class is the deprecated one construct an appropriate warning if there s a deprecated field strip off the field marker strip off any indentation construct a name string put it all together wrap it do the actual work of __new__ counter for unique naming a counter that auto increments each time its value is read search for files binaries search for a file to be used by nltk param filename the name or path of the file param env_vars a list of environment variable names to check param file_names a list of alternative file names to check param searchpath list of directories to search param url url presented to user for download help param verbose whether or not to print path when a file is found file exists no magic check the bare alternatives check if the alternative is inside a file directory check environment variables this is to file a directory instead of file check if the environment variable contains a direct path to the bin check if the possible bin names exist inside the environment variable directories check if the alternative is inside a file directory path_to_file os path join env_dir file alternative check if the alternative is inside a bin directory check the path list if we re on a posix system then try using the which command to find the file search for a file to be used by nltk param name the name or path of the file param path_to_bin the user supplied binary location deprecated param env_vars a list of environment variable names to check param file_names a list of alternative file names to check param searchpath list of directories to search param url url presented to user for download help param verbose whether or not to print path when a file is found search for a jar that is used by nltk param name_pattern the name of the jar file param path_to_jar the user supplied jar location or none param env_vars a list of environment variable names to check in addition to the classpath variable which is checked by default param searchpath list of directories to search param is_regex whether name is a regular expression make sure we check the classpath first if an explicit location was given then check it and yield it if it s present otherwise complain check environment variables the case where user put directory containing the jar file in the classpath look for file using regular expression check the path list if nothing was found raise an error convert data read from stdout stderr to unicode import stdlib module when python is run from within the nltk directory tree the current directory is included at the beginning of the search path unfortunately that means that modules within nltk can sometimes shadow standard library modules as an example the stdlib inspect module will attempt to import the stdlib tokenize module but will instead end up importing nltk s tokenize module instead causing the import to fail wrapper for elementtree elements a wrapper around elementtree element objects whose main purpose is to provide nicer __repr__ and __str__ methods in addition any of the wrapped element s methods that return other element objects are overridden to wrap those values before returning them this makes elements more convenient to work with in interactive sessions and doctests at the expense of some efficiency prevent double wrapping create and return a wrapper around a given element object if etree is an elementwrapper then etree is returned as is initialize a new element wrapper for etree if etree is a string then it will be converted to an element object using elementtree fromstring first elementwrapper test test element xml version 1 0 encoding utf8 n test return the element object wrapped by this wrapper string representation return the result of applying elementtree tostring to the wrapped element object element interface delegation pass through element interface delegation wrap result helper for handling slicing given a slice return the corresponding start stop bounds taking into account none indices and negative indices the following guarantees are made for the returned start and stop values 0 start len sequence 0 stop len sequence start stop raise valueerror if slice_obj step is not none param allow_step if true then the slice object may have a non none step if it does then return a tuple start stop step if allow_step is true then include the step in our return value tuple use a recursive call without allow_step to find the slice bounds if step is negative then the roles of start and stop in terms of default values etc are swapped otherwise make sure that no non default step value is used supply default offsets handle negative indices make sure stop doesn t go past the end of the list note that we avoid calculating len sequence if possible because for lazy sequences calculating the length of a sequence can be expensive make sure start isn t past stop that s all folks permission checking ensure that it exists if we re on a posix system check its permissions is it world writable do we own it are we in a group that can write to it otherwise we can t write to it otherwise we ll assume it s writable xx should we do other checks on other platforms nltk error reporting
import fnmatch import locale import os import re import stat import subprocess import sys import textwrap import types import warnings from xml.etree import ElementTree _java_bin = None _java_options = [] def config_java(bin=None, options=None, verbose=False): global _java_bin, _java_options _java_bin = find_binary( "java", bin, env_vars=["JAVAHOME", "JAVA_HOME"], verbose=verbose, binary_names=["java.exe"], ) if options is not None: if isinstance(options, str): options = options.split() _java_options = list(options) def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True): subprocess_output_dict = { "pipe": subprocess.PIPE, "stdout": subprocess.STDOUT, "devnull": subprocess.DEVNULL, } stdin = subprocess_output_dict.get(stdin, stdin) stdout = subprocess_output_dict.get(stdout, stdout) stderr = subprocess_output_dict.get(stderr, stderr) if isinstance(cmd, str): raise TypeError("cmd should be a list of strings") if _java_bin is None: config_java() if isinstance(classpath, str): classpaths = [classpath] else: classpaths = list(classpath) classpath = os.path.pathsep.join(classpaths) cmd = list(cmd) cmd = ["-cp", classpath] + cmd cmd = [_java_bin] + _java_options + cmd p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr) if not blocking: return p (stdout, stderr) = p.communicate() if p.returncode != 0: print(_decode_stdoutdata(stderr)) raise OSError("Java command failed : " + str(cmd)) return (stdout, stderr) class ReadError(ValueError): def __init__(self, expected, position): ValueError.__init__(self, expected, position) self.expected = expected self.position = position def __str__(self): return f"Expected {self.expected} at {self.position}" _STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')") def read_str(s, start_position): m = _STRING_START_RE.match(s, start_position) if not m: raise ReadError("open quote", start_position) quotemark = m.group(1) _STRING_END_RE = re.compile(r"\\|%s" % quotemark) position = m.end() while True: match = _STRING_END_RE.search(s, position) if not match: raise ReadError("close quote", position) if match.group(0) == "\\": position = match.end() + 1 else: break try: return eval(s[start_position : match.end()]), match.end() except ValueError as e: raise ReadError("valid escape sequence", start_position) from e _READ_INT_RE = re.compile(r"-?\d+") def read_int(s, start_position): m = _READ_INT_RE.match(s, start_position) if not m: raise ReadError("integer", start_position) return int(m.group()), m.end() _READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?") def read_number(s, start_position): m = _READ_NUMBER_VALUE.match(s, start_position) if not m or not (m.group(1) or m.group(2)): raise ReadError("number", start_position) if m.group(2): return float(m.group()), m.end() else: return int(m.group()), m.end() def overridden(method): if isinstance(method, types.MethodType) and method.__self__.__class__ is not None: name = method.__name__ funcs = [ cls.__dict__[name] for cls in _mro(method.__self__.__class__) if name in cls.__dict__ ] return len(funcs) > 1 else: raise TypeError("Expected an instance method.") def _mro(cls): if isinstance(cls, type): return cls.__mro__ else: mro = [cls] for base in cls.__bases__: mro.extend(_mro(base)) return mro def _add_epytext_field(obj, field, message): indent = "" if obj.__doc__: obj.__doc__ = obj.__doc__.rstrip() + "\n\n" indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) if indents: indent = min(indents) else: obj.__doc__ = "" obj.__doc__ += textwrap.fill( f"@{field}: {message}", initial_indent=indent, subsequent_indent=indent + " ", ) def deprecated(message): def decorator(func): msg = f"Function {func.__name__}() has been deprecated. {message}" msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") def newFunc(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) newFunc.__dict__.update(func.__dict__) newFunc.__name__ = func.__name__ newFunc.__doc__ = func.__doc__ newFunc.__deprecated__ = True _add_epytext_field(newFunc, "deprecated", message) return newFunc return decorator class Deprecated: def __new__(cls, *args, **kwargs): dep_cls = None for base in _mro(cls): if Deprecated in base.__bases__: dep_cls = base break assert dep_cls, "Unable to determine which base is deprecated." doc = dep_cls.__doc__ or "".strip() doc = re.sub(r"\A\s*@deprecated:", r"", doc) doc = re.sub(r"(?m)^\s*", "", doc) name = "Class %s" % dep_cls.__name__ if cls != dep_cls: name += " (base class for %s)" % cls.__name__ msg = f"{name} has been deprecated. {doc}" msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") warnings.warn(msg, category=DeprecationWarning, stacklevel=2) return object.__new__(cls) class Counter: def __init__(self, initial_value=0): self._value = initial_value def get(self): self._value += 1 return self._value def find_file_iter( filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False, finding_dir=False, ): file_names = [filename] + (file_names or []) assert isinstance(filename, str) assert not isinstance(file_names, str) assert not isinstance(searchpath, str) if isinstance(env_vars, str): env_vars = env_vars.split() yielded = False for alternative in file_names: path_to_file = os.path.join(filename, alternative) if os.path.isfile(path_to_file): if verbose: print(f"[Found {filename}: {path_to_file}]") yielded = True yield path_to_file if os.path.isfile(alternative): if verbose: print(f"[Found {filename}: {alternative}]") yielded = True yield alternative path_to_file = os.path.join(filename, "file", alternative) if os.path.isfile(path_to_file): if verbose: print(f"[Found {filename}: {path_to_file}]") yielded = True yield path_to_file for env_var in env_vars: if env_var in os.environ: if finding_dir: yielded = True yield os.environ[env_var] for env_dir in os.environ[env_var].split(os.pathsep): if os.path.isfile(env_dir): if verbose: print(f"[Found {filename}: {env_dir}]") yielded = True yield env_dir for alternative in file_names: path_to_file = os.path.join(env_dir, alternative) if os.path.isfile(path_to_file): if verbose: print(f"[Found {filename}: {path_to_file}]") yielded = True yield path_to_file path_to_file = os.path.join(env_dir, "bin", alternative) if os.path.isfile(path_to_file): if verbose: print(f"[Found {filename}: {path_to_file}]") yielded = True yield path_to_file for directory in searchpath: for alternative in file_names: path_to_file = os.path.join(directory, alternative) if os.path.isfile(path_to_file): yielded = True yield path_to_file if os.name == "posix": for alternative in file_names: try: p = subprocess.Popen( ["which", alternative], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate() path = _decode_stdoutdata(stdout).strip() if path.endswith(alternative) and os.path.exists(path): if verbose: print(f"[Found {filename}: {path}]") yielded = True yield path except (KeyboardInterrupt, SystemExit, OSError): raise finally: pass if not yielded: msg = ( "NLTK was unable to find the %s file!" "\nUse software specific " "configuration parameters" % filename ) if env_vars: msg += " or set the %s environment variable" % env_vars[0] msg += "." if searchpath: msg += "\n\n Searched in:" msg += "".join("\n - %s" % d for d in searchpath) if url: msg += f"\n\n For more information on {filename}, see:\n <{url}>" div = "=" * 75 raise LookupError(f"\n\n{div}\n{msg}\n{div}") def find_file( filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False ): return next( find_file_iter(filename, env_vars, searchpath, file_names, url, verbose) ) def find_dir( filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False ): return next( find_file_iter( filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True ) ) def find_binary_iter( name, path_to_bin=None, env_vars=(), searchpath=(), binary_names=None, url=None, verbose=False, ): yield from find_file_iter( path_to_bin or name, env_vars, searchpath, binary_names, url, verbose ) def find_binary( name, path_to_bin=None, env_vars=(), searchpath=(), binary_names=None, url=None, verbose=False, ): return next( find_binary_iter( name, path_to_bin, env_vars, searchpath, binary_names, url, verbose ) ) def find_jar_iter( name_pattern, path_to_jar=None, env_vars=(), searchpath=(), url=None, verbose=False, is_regex=False, ): assert isinstance(name_pattern, str) assert not isinstance(searchpath, str) if isinstance(env_vars, str): env_vars = env_vars.split() yielded = False env_vars = ["CLASSPATH"] + list(env_vars) if path_to_jar is not None: if os.path.isfile(path_to_jar): yielded = True yield path_to_jar else: raise LookupError( f"Could not find {name_pattern} jar file at {path_to_jar}" ) for env_var in env_vars: if env_var in os.environ: if env_var == "CLASSPATH": classpath = os.environ["CLASSPATH"] for cp in classpath.split(os.path.pathsep): cp = os.path.expanduser(cp) if os.path.isfile(cp): filename = os.path.basename(cp) if ( is_regex and re.match(name_pattern, filename) or (not is_regex and filename == name_pattern) ): if verbose: print(f"[Found {name_pattern}: {cp}]") yielded = True yield cp if os.path.isdir(cp): if not is_regex: if os.path.isfile(os.path.join(cp, name_pattern)): if verbose: print(f"[Found {name_pattern}: {cp}]") yielded = True yield os.path.join(cp, name_pattern) else: for file_name in os.listdir(cp): if re.match(name_pattern, file_name): if verbose: print( "[Found %s: %s]" % ( name_pattern, os.path.join(cp, file_name), ) ) yielded = True yield os.path.join(cp, file_name) else: jar_env = os.path.expanduser(os.environ[env_var]) jar_iter = ( ( os.path.join(jar_env, path_to_jar) for path_to_jar in os.listdir(jar_env) ) if os.path.isdir(jar_env) else (jar_env,) ) for path_to_jar in jar_iter: if os.path.isfile(path_to_jar): filename = os.path.basename(path_to_jar) if ( is_regex and re.match(name_pattern, filename) or (not is_regex and filename == name_pattern) ): if verbose: print(f"[Found {name_pattern}: {path_to_jar}]") yielded = True yield path_to_jar for directory in searchpath: if is_regex: for filename in os.listdir(directory): path_to_jar = os.path.join(directory, filename) if os.path.isfile(path_to_jar): if re.match(name_pattern, filename): if verbose: print(f"[Found {filename}: {path_to_jar}]") yielded = True yield path_to_jar else: path_to_jar = os.path.join(directory, name_pattern) if os.path.isfile(path_to_jar): if verbose: print(f"[Found {name_pattern}: {path_to_jar}]") yielded = True yield path_to_jar if not yielded: msg = "NLTK was unable to find %s!" % name_pattern if env_vars: msg += " Set the %s environment variable" % env_vars[0] msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ") if searchpath: msg += "\n\n Searched in:" msg += "".join("\n - %s" % d for d in searchpath) if url: msg += "\n\n For more information, on {}, see:\n <{}>".format( name_pattern, url, ) div = "=" * 75 raise LookupError(f"\n\n{div}\n{msg}\n{div}") def find_jar( name_pattern, path_to_jar=None, env_vars=(), searchpath=(), url=None, verbose=False, is_regex=False, ): return next( find_jar_iter( name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex ) ) def find_jars_within_path(path_to_jars): return [ os.path.join(root, filename) for root, dirnames, filenames in os.walk(path_to_jars) for filename in fnmatch.filter(filenames, "*.jar") ] def _decode_stdoutdata(stdoutdata): if not isinstance(stdoutdata, bytes): return stdoutdata encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding()) if encoding is None: return stdoutdata.decode() return stdoutdata.decode(encoding) def import_from_stdlib(module): old_path = sys.path sys.path = [d for d in sys.path if d not in ("", ".")] m = __import__(module) sys.path = old_path return m class ElementWrapper: def __new__(cls, etree): if isinstance(etree, ElementWrapper): return etree else: return object.__new__(ElementWrapper) def __init__(self, etree): r if isinstance(etree, str): etree = ElementTree.fromstring(etree) self.__dict__["_etree"] = etree def unwrap(self): return self._etree def __repr__(self): s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8") if len(s) > 60: e = s.rfind("<") if (len(s) - e) > 30: e = -20 s = f"{s[:30]}...{s[e:]}" return "<Element %r>" % s def __str__(self): return ( ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip() ) def __getattr__(self, attrib): return getattr(self._etree, attrib) def __setattr__(self, attr, value): return setattr(self._etree, attr, value) def __delattr__(self, attr): return delattr(self._etree, attr) def __setitem__(self, index, element): self._etree[index] = element def __delitem__(self, index): del self._etree[index] def __setslice__(self, start, stop, elements): self._etree[start:stop] = elements def __delslice__(self, start, stop): del self._etree[start:stop] def __len__(self): return len(self._etree) def __getitem__(self, index): return ElementWrapper(self._etree[index]) def __getslice__(self, start, stop): return [ElementWrapper(elt) for elt in self._etree[start:stop]] def getchildren(self): return [ElementWrapper(elt) for elt in self._etree] def getiterator(self, tag=None): return (ElementWrapper(elt) for elt in self._etree.getiterator(tag)) def makeelement(self, tag, attrib): return ElementWrapper(self._etree.makeelement(tag, attrib)) def find(self, path): elt = self._etree.find(path) if elt is None: return elt else: return ElementWrapper(elt) def findall(self, path): return [ElementWrapper(elt) for elt in self._etree.findall(path)] def slice_bounds(sequence, slice_obj, allow_step=False): start, stop = (slice_obj.start, slice_obj.stop) if allow_step: step = slice_obj.step if step is None: step = 1 if step < 0: start, stop = slice_bounds(sequence, slice(stop, start)) else: start, stop = slice_bounds(sequence, slice(start, stop)) return start, stop, step elif slice_obj.step not in (None, 1): raise ValueError( "slices with steps are not supported by %s" % sequence.__class__.__name__ ) if start is None: start = 0 if stop is None: stop = len(sequence) if start < 0: start = max(0, len(sequence) + start) if stop < 0: stop = max(0, len(sequence) + stop) if stop > 0: try: sequence[stop - 1] except IndexError: stop = len(sequence) start = min(start, stop) return start, stop def is_writable(path): if not os.path.exists(path): return False if hasattr(os, "getuid"): statdata = os.stat(path) perm = stat.S_IMODE(statdata.st_mode) if perm & 0o002: return True elif statdata.st_uid == os.getuid() and (perm & 0o200): return True elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020): return True else: return False return True def raise_unorderable_types(ordering, a, b): raise TypeError( "unorderable types: %s() %s %s()" % (type(a).__name__, ordering, type(b).__name__) )
natural language toolkit json encoderdecoder helpers c 20012023 nltk project steven xu xxustudent unimelb edu au url https www nltk org for license information see license txt register json tags so the nltk data loader knows what module and class to look for nltk uses simple tags to mark the types of objects but the fullyqualified tag nltk org 2011 prefix is also accepted in case anyone ends up using it decorates a class to register it s json tag decode nested objects first check if we have a tagged object natural language toolkit json encoder decoder helpers c 2001 2023 nltk project steven xu xxu student unimelb edu au url https www nltk org for license information see license txt register json tags so the nltk data loader knows what module and class to look for nltk uses simple tags to mark the types of objects but the fully qualified tag nltk org 2011 prefix is also accepted in case anyone ends up using it decorates a class to register it s json tag decode nested objects first check if we have a tagged object
import json json_tags = {} TAG_PREFIX = "!" def register_tag(cls): json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls return cls class JSONTaggedEncoder(json.JSONEncoder): def default(self, obj): obj_tag = getattr(obj, "json_tag", None) if obj_tag is None: return super().default(obj) obj_tag = TAG_PREFIX + obj_tag obj = obj.encode_json_obj() return {obj_tag: obj} class JSONTaggedDecoder(json.JSONDecoder): def decode(self, s): return self.decode_obj(super().decode(s)) @classmethod def decode_obj(cls, obj): if isinstance(obj, dict): obj = {key: cls.decode_obj(val) for (key, val) in obj.items()} elif isinstance(obj, list): obj = list(cls.decode_obj(val) for val in obj) if not isinstance(obj, dict) or len(obj) != 1: return obj obj_tag = next(iter(obj.keys())) if not obj_tag.startswith("!"): return obj if obj_tag not in json_tags: raise ValueError("Unknown tag", obj_tag) obj_cls = json_tags[obj_tag] return obj_cls.decode_json_obj(obj[obj_tag]) __all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"]
natural language toolkit language codes c 20222023 nltk project eric kafe kafe ericgmail com url https www nltk org for license information see license txt iso6393 language codes c https iso6393 sil org translate between language names and language codes the iso6393 language codes were downloaded from the registration ity at https iso6393 sil org the iso6393 codeset is evolving so retired language codes are kept in the iso639retired dictionary which is used as fallback by the wrapper functions langname and langcode in order to support the lookup of retired codes the langcode function returns the current iso6393 code if there is one and falls back to the retired code otherwise as specified by bcp47 it returns the shortest 2letter code by default but 3letter codes are also available import nltk langnames as lgn lgn langname fri fri is a retired code western frisian the current code is different from the retired one lgn langcode western frisian fy lgn langcode western frisian typ 3 fry convert a composite bcp47 tag to a language name from nltk langnames import langname langname calatnesvalencia catalan latin spain valencian langname calatnesvalencia typshort catalan convert language name to iso6393 language code returns the short 2letter code by default if one is available and the 3letter code otherwise from nltk langnames import langcode langcode modern greek 1453 el specify typ3 to get the 3letter code langcode modern greek 1453 typ3 ell translate betwwen wikidata qcodes and bcp47 codes or names convert bcp47 tag to wikidata qcode tag2q ndsusddemv q4289225 convert wikidata qcode to bcp47 tag q2tag q4289225 ndsusddemv convert wikidata qcode to bcp47 full or short language name q2name q4289225 low german mecklenburgvorpommern q2name q4289225 short low german convert simple language name to wikidata qcode lang2q low german q25433 data dictionaries return inverse mapping but only if it is bijective if lendic keys lensetdic values return val key for key val in dic items else warnthis dictionary has no bijective inverse mapping bcp47 loadwikiq wikidata conversion table needs to be loaded explicitly wikibcp47 inversedictbcp47 wikiq iso639short aar aa abk ab afr af aka ak amh am ara ar arg an asm as ava av ave ae aym ay aze az bak ba bam bm bel be ben bn bis bi bod bo bos bs bre br bul bg cat ca ces cs cha ch che ce chu cu chv cv cor kw cos co cre cr cym cy dan da deu de div dv dzo dz ell el eng en epo eo est et eus eu ewe ee fao fo fas fa fij fj fin fi fra fr fry fy ful ff gla gd gle ga glg gl glv gv grn gn guj gu hat ht hau ha hbs sh heb he her hz hin hi hmo ho hrv hr hun hu hye hy ibo ig ido io iii ii iku iu ile ie ina ia ind id ipk ik isl is ita it jav jv jpn ja kal kl kan kn kas ks kat ka kau kr kaz kk khm km kik ki kin rw kir ky kom kv kon kg kor ko kua kj kur ku lao lo lat la lav lv lim li lin ln lit lt ltz lb lub lu lug lg mah mh mal ml mar mr mkd mk mlg mg mlt mt mon mn mri mi msa ms mya my nau na nav nv nbl nr nde nd ndo ng nep ne nld nl nno nn nob nb nor no nya ny oci oc oji oj ori or orm om oss os pan pa pli pi pol pl por pt pus ps que qu roh rm ron ro run rn rus ru sag sg san sa sin si slk sk slv sl sme se smo sm sna sn snd sd som so sot st spa es sqi sq srd sc srp sr ssw ss sun su swa sw swe sv tah ty tam ta tat tt tel te tgk tg tgl tl tha th tir ti ton to tsn tn tso ts tuk tk tur tr twi tw uig ug ukr uk urd ur uzb uz ven ve vie vi vol vo wln wa wol wo xho xh yid yi yor yo zha za zho zh zul zu iso639retired fri western frisian auv auvergnat gsc gascon lms limousin lnc languedocien prv provenal amd amap creole bgh bogan bnh banaw bvs belgian sign language ccy southern zhuang cit chittagonian flm falam chin jap jarura kob kohoroxitari mob moinba mzf aiku nhj tlalitzlipa nahuatl nhs southeastern puebla nahuatl occ occidental tmx tomyang tot patlachicontla totonac xmi miarr yib yinglish ztc lachirioag zapotec atf atuence bqe navarrolabourdin basque bsz souletin basque aex amerax ahe ahe aiz aari akn amikoana arf arafundi azr adzera bcx pamona bii bisu bke bengkulu blu hmong njua boc bakung kenyah bsd sarawak bisaya bwv bahau river kenyah bxt buxinhua byu buyang ccx northern zhuang cru cartana dat darang deng dyk land dayak eni enim fiz izere gen geman deng ggh garrehajuran itu itutang kds lahu shi knh kayan river kenyah krg north korowai krq krui kxg katingan lmt lematang lnt lintang lod berawan mbg northern nambikura mdo southwest gbaya mhv arakanese miv mimi mqd madang nky khiamniungan naga nxj nyadu ogn ogan ork orokaiva paj ipekatapuia pec southern pesisir pen penesak plm palembang poj lower pokomo pun pubian rae ranau rjb rajbanshi rws rawas sdd semendo sdi sindang kelingi skl selako slb kahumamahon saluan srj serawai suf tarpia suh suba suu sungkai szk sizaki tle southern marakwet tnj tanjong ttx tutong 1 ubm upper baram kenyah vky kayu agung vmo mukomuko wre ware xah kahayan xkm mahakam kenyah xuf kunfal yio dayao yi ymj muji yi ypl pula yi ypw puwa yi ywm wumeng yi yym yuanjiangmojiang yi mly malay individual language muw mundari xst silt e ope old persian scc serbian scr croatian xsk sakan mol moldavian aay aariya acc cubulco ach cbm yepocapa southwestern cakchiquel chs chumash ckc northern cakchiquel ckd south central cakchiquel cke eastern cakchiquel ckf southern cakchiquel cki santa mara de jess cakchiquel ckj santo domingo xenacoj cakchiquel ckk acatenango southwestern cakchiquel ckw western cakchiquel cnm ixtatn chuj cti tila chol cun cunn quich eml emilianoromagnolo eur europanto gmo gamogofadawro hsf southeastern huastec hva san lus potos huastec ixi nebaj ixil ixj chajul ixil jai western jacalteco mms southern mam mpf tajumulco mam mtz tacanec mvc central mam mvj todos santos cuchumatn mam poa eastern pokomam pob western pokomch pou southern pokomam ppv papav quj joyabaj quich qut west central quich quu eastern quich qxi san andrs quich sic malinguat stc santa cruz tlz toala tzb bachajn tzeltal tzc chamula tzotzil tze chenalh tzotzil tzs san andrs larrainzar tzotzil tzt western tzutujil tzu huixtn tzotzil tzz zinacantn tzotzil vlr vatrata yus chan santa cruz maya nfg nyeng nfk shakara agp paranan bhk albay bicolano bkb finallig btb beti cameroon cjr chorotega cmk chimakum drh darkhat drw darwazi gav gabutamon mof moheganmontauknarragansett mst cataelano mandaya myt sangab mandaya rmr cal sgl sanglechiishkashimi sul surigaonon sum sumomayangna tnf tangshewi wgw wagawaga ayx ayi china bjq southern betsimisaraka malagasy dha dhanwar india dkl kolum so dogon mja mahei nbf naxi noo nootka tie tingal tkk takpa baz tunen bjd bandjigali ccq chaungtha cka khumi awa chin dap nisi india dwl walo kumbe dogon elp elpaputih gbc garawa gio gelao hrr horuru ibi ibilo jar jarawa nigeria kdv kado kgh upper tanudan kalinga kpp paku karen kzh kenuzidongola lcq luhu mgx omati nln durango nahuatl pbz palu pgy pongyong sca sansu tlw south wemale unp worora wiw wirangu ybd yangbye yen yendang yma yamphe daf dan djl djiwarli ggr aghu tharnggalu ilw talur izi iziezaaikwomgbo meg mea mld malakhel mnt maykulan mwd mudbura myq forest maninka nbx ngura nlr ngarla pcr panang ppr piru tgg tangga wit wintu xia xiandao yiy yir yoront yos yos emo emok ggm gugu mini leg lengua lmm lamam mhh maskoy pidgin puz purum naga sap sanapan yuu yugh aam aramanik adp adap aue kxauein bmy bemba democratic republic of congo bxx borna democratic republic of congo byy buya dzd daza gfx mangetti dune xung gti gbatiri ime imeraguen kbf kakauhua koj sara dunjo kwq kwak kxe kakihum lii lingkhim mwj maligo nnx ngong oun oung pmu mirpur panjabi sgo songa thx the tsf southwestern tamang uok uokha xsj subi yds yiddish sign language ymt matortaygikaragas ynh yangho bgm baga mboteni btl bhatola cbe chipiajes cbh cagua coy coyaima cqu chilean quechua cum cumeral duj dhuwal ggn eastern gurung ggo southern gondi guv gey iap iapama ill iranun kgc kasseng kox coxima ktr kota marudu tinagas kvs kunggara kzj coastal kadazan kzt tambunan dusun nad nijadali nts natagaimas ome omejes pmc palumata pod ponares ppa pao pry pray 3 rna runa svr savara tdu tempasuk dusun thc tai hang tong tid tidong tmp tai mne tne tinoc kallahan toe tomedes xba kamba brazil xbx kabix xip xipinwa xkh karahawyana yri yar jeg jeng kgd kataang krm krim prb lua puk pu ko rie rien rsi rennellese sign language skk sok snh shinabo lsg lyons sign language mwx mediak mwy mosiro ncp ndaktup ais nataoran amis asd asas dit dirari dud hunsaare lba lui llo khlor myd maramba myi mina india nns ningye aoh arma ayy tayabas ayta bbz babalia creole arabic bpb barbacoas cca cauca cdg chamari dgu degaru drr dororo ekc eastern karnic gli guliguli kjf khalaj kxl nepali kurux kxu kui india lmz lumbee nxu narau plp palpa sdm semandang tbb tapeba xrq karranga xtz tasmanian zir ziriya thw thudam bic bikaru bij vaghatyabijimlegeri blg balau gji geji mvm muya ngo ngoni pat papitalai vki ijazuba wra warapu ajt judeotunisian arabic cug chungmboko lak laka nigeria lno lango south sudan pii pini smd sama snb sebuyau uun kulonpazeh wrd warduji wya wyandot iso639long inversedictiso639short iso639coderetired inversedictiso639retired natural language toolkit language codes c 2022 2023 nltk project eric kafe kafe eric gmail com url https www nltk org for license information see license txt iso639 3 language codes c https iso639 3 sil org translate between language names and language codes the iso639 3 language codes were downloaded from the registration ity at https iso639 3 sil org the iso639 3 codeset is evolving so retired language codes are kept in the iso639retired dictionary which is used as fallback by the wrapper functions langname and langcode in order to support the lookup of retired codes the langcode function returns the current iso639 3 code if there is one and falls back to the retired code otherwise as specified by bcp 47 it returns the shortest 2 letter code by default but 3 letter codes are also available import nltk langnames as lgn lgn langname fri fri is a retired code western frisian the current code is different from the retired one lgn langcode western frisian fy lgn langcode western frisian typ 3 fry convert a composite bcp 47 tag to a language name from nltk langnames import langname langname ca latn es valencia catalan latin spain valencian langname ca latn es valencia typ short catalan retired codes 3 letter codes convert to 2 letter code parse according to bcp 47 include all subtags only the language subtag convert language name to iso639 3 language code returns the short 2 letter code by default if one is available and the 3 letter code otherwise from nltk langnames import langcode langcode modern greek 1453 el specify typ 3 to get the 3 letter code langcode modern greek 1453 typ 3 ell convert to 3 letter code translate betwwen wikidata q codes and bcp 47 codes or names convert bcp 47 tag to wikidata q code tag2q nds u sd demv q4289225 convert wikidata q code to bcp 47 tag q2tag q4289225 nds u sd demv convert wikidata q code to bcp 47 full or short language name q2name q4289225 low german mecklenburg vorpommern q2name q4289225 short low german convert simple language name to wikidata q code lang2q low german q25433 data dictionaries return inverse mapping but only if it is bijective wikidata conversion table needs to be loaded explicitly
import re from warnings import warn from nltk.corpus import bcp47 codepattern = re.compile("[a-z][a-z][a-z]?") def langname(tag, typ="full"): tags = tag.split("-") code = tags[0].lower() if codepattern.fullmatch(code): if code in iso639retired: return iso639retired[code] elif code in iso639short: code2 = iso639short[code] warn(f"Shortening {code!r} to {code2!r}", stacklevel=2) tag = "-".join([code2] + tags[1:]) name = bcp47.name(tag) if typ == "full": return name elif name: return name.split(":")[0] else: warn(f"Could not find code in {code!r}", stacklevel=2) def langcode(name, typ=2): if name in bcp47.langcode: code = bcp47.langcode[name] if typ == 3 and code in iso639long: code = iso639long[code] return code elif name in iso639code_retired: return iso639code_retired[name] else: warn(f"Could not find language in {name!r}", stacklevel=2) def tag2q(tag): return bcp47.wiki_q[tag] def q2tag(qcode): return wiki_bcp47[qcode] def q2name(qcode, typ="full"): return langname(q2tag(qcode), typ) def lang2q(name): return tag2q(langcode(name)) def inverse_dict(dic): if len(dic.keys()) == len(set(dic.values())): return {val: key for (key, val) in dic.items()} else: warn("This dictionary has no bijective inverse mapping.") bcp47.load_wiki_q() wiki_bcp47 = inverse_dict(bcp47.wiki_q) iso639short = { "aar": "aa", "abk": "ab", "afr": "af", "aka": "ak", "amh": "am", "ara": "ar", "arg": "an", "asm": "as", "ava": "av", "ave": "ae", "aym": "ay", "aze": "az", "bak": "ba", "bam": "bm", "bel": "be", "ben": "bn", "bis": "bi", "bod": "bo", "bos": "bs", "bre": "br", "bul": "bg", "cat": "ca", "ces": "cs", "cha": "ch", "che": "ce", "chu": "cu", "chv": "cv", "cor": "kw", "cos": "co", "cre": "cr", "cym": "cy", "dan": "da", "deu": "de", "div": "dv", "dzo": "dz", "ell": "el", "eng": "en", "epo": "eo", "est": "et", "eus": "eu", "ewe": "ee", "fao": "fo", "fas": "fa", "fij": "fj", "fin": "fi", "fra": "fr", "fry": "fy", "ful": "ff", "gla": "gd", "gle": "ga", "glg": "gl", "glv": "gv", "grn": "gn", "guj": "gu", "hat": "ht", "hau": "ha", "hbs": "sh", "heb": "he", "her": "hz", "hin": "hi", "hmo": "ho", "hrv": "hr", "hun": "hu", "hye": "hy", "ibo": "ig", "ido": "io", "iii": "ii", "iku": "iu", "ile": "ie", "ina": "ia", "ind": "id", "ipk": "ik", "isl": "is", "ita": "it", "jav": "jv", "jpn": "ja", "kal": "kl", "kan": "kn", "kas": "ks", "kat": "ka", "kau": "kr", "kaz": "kk", "khm": "km", "kik": "ki", "kin": "rw", "kir": "ky", "kom": "kv", "kon": "kg", "kor": "ko", "kua": "kj", "kur": "ku", "lao": "lo", "lat": "la", "lav": "lv", "lim": "li", "lin": "ln", "lit": "lt", "ltz": "lb", "lub": "lu", "lug": "lg", "mah": "mh", "mal": "ml", "mar": "mr", "mkd": "mk", "mlg": "mg", "mlt": "mt", "mon": "mn", "mri": "mi", "msa": "ms", "mya": "my", "nau": "na", "nav": "nv", "nbl": "nr", "nde": "nd", "ndo": "ng", "nep": "ne", "nld": "nl", "nno": "nn", "nob": "nb", "nor": "no", "nya": "ny", "oci": "oc", "oji": "oj", "ori": "or", "orm": "om", "oss": "os", "pan": "pa", "pli": "pi", "pol": "pl", "por": "pt", "pus": "ps", "que": "qu", "roh": "rm", "ron": "ro", "run": "rn", "rus": "ru", "sag": "sg", "san": "sa", "sin": "si", "slk": "sk", "slv": "sl", "sme": "se", "smo": "sm", "sna": "sn", "snd": "sd", "som": "so", "sot": "st", "spa": "es", "sqi": "sq", "srd": "sc", "srp": "sr", "ssw": "ss", "sun": "su", "swa": "sw", "swe": "sv", "tah": "ty", "tam": "ta", "tat": "tt", "tel": "te", "tgk": "tg", "tgl": "tl", "tha": "th", "tir": "ti", "ton": "to", "tsn": "tn", "tso": "ts", "tuk": "tk", "tur": "tr", "twi": "tw", "uig": "ug", "ukr": "uk", "urd": "ur", "uzb": "uz", "ven": "ve", "vie": "vi", "vol": "vo", "wln": "wa", "wol": "wo", "xho": "xh", "yid": "yi", "yor": "yo", "zha": "za", "zho": "zh", "zul": "zu", } iso639retired = { "fri": "Western Frisian", "auv": "Auvergnat", "gsc": "Gascon", "lms": "Limousin", "lnc": "Languedocien", "prv": "Provençal", "amd": "Amapá Creole", "bgh": "Bogan", "bnh": "Banawá", "bvs": "Belgian Sign Language", "ccy": "Southern Zhuang", "cit": "Chittagonian", "flm": "Falam Chin", "jap": "Jaruára", "kob": "Kohoroxitari", "mob": "Moinba", "mzf": "Aiku", "nhj": "Tlalitzlipa Nahuatl", "nhs": "Southeastern Puebla Nahuatl", "occ": "Occidental", "tmx": "Tomyang", "tot": "Patla-Chicontla Totonac", "xmi": "Miarrã", "yib": "Yinglish", "ztc": "Lachirioag Zapotec", "atf": "Atuence", "bqe": "Navarro-Labourdin Basque", "bsz": "Souletin Basque", "aex": "Amerax", "ahe": "Ahe", "aiz": "Aari", "akn": "Amikoana", "arf": "Arafundi", "azr": "Adzera", "bcx": "Pamona", "bii": "Bisu", "bke": "Bengkulu", "blu": "Hmong Njua", "boc": "Bakung Kenyah", "bsd": "Sarawak Bisaya", "bwv": "Bahau River Kenyah", "bxt": "Buxinhua", "byu": "Buyang", "ccx": "Northern Zhuang", "cru": "Carútana", "dat": "Darang Deng", "dyk": "Land Dayak", "eni": "Enim", "fiz": "Izere", "gen": "Geman Deng", "ggh": "Garreh-Ajuran", "itu": "Itutang", "kds": "Lahu Shi", "knh": "Kayan River Kenyah", "krg": "North Korowai", "krq": "Krui", "kxg": "Katingan", "lmt": "Lematang", "lnt": "Lintang", "lod": "Berawan", "mbg": "Northern Nambikuára", "mdo": "Southwest Gbaya", "mhv": "Arakanese", "miv": "Mimi", "mqd": "Madang", "nky": "Khiamniungan Naga", "nxj": "Nyadu", "ogn": "Ogan", "ork": "Orokaiva", "paj": "Ipeka-Tapuia", "pec": "Southern Pesisir", "pen": "Penesak", "plm": "Palembang", "poj": "Lower Pokomo", "pun": "Pubian", "rae": "Ranau", "rjb": "Rajbanshi", "rws": "Rawas", "sdd": "Semendo", "sdi": "Sindang Kelingi", "skl": "Selako", "slb": "Kahumamahon Saluan", "srj": "Serawai", "suf": "Tarpia", "suh": "Suba", "suu": "Sungkai", "szk": "Sizaki", "tle": "Southern Marakwet", "tnj": "Tanjong", "ttx": "Tutong 1", "ubm": "Upper Baram Kenyah", "vky": "Kayu Agung", "vmo": "Muko-Muko", "wre": "Ware", "xah": "Kahayan", "xkm": "Mahakam Kenyah", "xuf": "Kunfal", "yio": "Dayao Yi", "ymj": "Muji Yi", "ypl": "Pula Yi", "ypw": "Puwa Yi", "ywm": "Wumeng Yi", "yym": "Yuanjiang-Mojiang Yi", "mly": "Malay (individual language)", "muw": "Mundari", "xst": "Silt'e", "ope": "Old Persian", "scc": "Serbian", "scr": "Croatian", "xsk": "Sakan", "mol": "Moldavian", "aay": "Aariya", "acc": "Cubulco Achí", "cbm": "Yepocapa Southwestern Cakchiquel", "chs": "Chumash", "ckc": "Northern Cakchiquel", "ckd": "South Central Cakchiquel", "cke": "Eastern Cakchiquel", "ckf": "Southern Cakchiquel", "cki": "Santa María De Jesús Cakchiquel", "ckj": "Santo Domingo Xenacoj Cakchiquel", "ckk": "Acatenango Southwestern Cakchiquel", "ckw": "Western Cakchiquel", "cnm": "Ixtatán Chuj", "cti": "Tila Chol", "cun": "Cunén Quiché", "eml": "Emiliano-Romagnolo", "eur": "Europanto", "gmo": "Gamo-Gofa-Dawro", "hsf": "Southeastern Huastec", "hva": "San Luís Potosí Huastec", "ixi": "Nebaj Ixil", "ixj": "Chajul Ixil", "jai": "Western Jacalteco", "mms": "Southern Mam", "mpf": "Tajumulco Mam", "mtz": "Tacanec", "mvc": "Central Mam", "mvj": "Todos Santos Cuchumatán Mam", "poa": "Eastern Pokomam", "pob": "Western Pokomchí", "pou": "Southern Pokomam", "ppv": "Papavô", "quj": "Joyabaj Quiché", "qut": "West Central Quiché", "quu": "Eastern Quiché", "qxi": "San Andrés Quiché", "sic": "Malinguat", "stc": "Santa Cruz", "tlz": "Toala'", "tzb": "Bachajón Tzeltal", "tzc": "Chamula Tzotzil", "tze": "Chenalhó Tzotzil", "tzs": "San Andrés Larrainzar Tzotzil", "tzt": "Western Tzutujil", "tzu": "Huixtán Tzotzil", "tzz": "Zinacantán Tzotzil", "vlr": "Vatrata", "yus": "Chan Santa Cruz Maya", "nfg": "Nyeng", "nfk": "Shakara", "agp": "Paranan", "bhk": "Albay Bicolano", "bkb": "Finallig", "btb": "Beti (Cameroon)", "cjr": "Chorotega", "cmk": "Chimakum", "drh": "Darkhat", "drw": "Darwazi", "gav": "Gabutamon", "mof": "Mohegan-Montauk-Narragansett", "mst": "Cataelano Mandaya", "myt": "Sangab Mandaya", "rmr": "Caló", "sgl": "Sanglechi-Ishkashimi", "sul": "Surigaonon", "sum": "Sumo-Mayangna", "tnf": "Tangshewi", "wgw": "Wagawaga", "ayx": "Ayi (China)", "bjq": "Southern Betsimisaraka Malagasy", "dha": "Dhanwar (India)", "dkl": "Kolum So Dogon", "mja": "Mahei", "nbf": "Naxi", "noo": "Nootka", "tie": "Tingal", "tkk": "Takpa", "baz": "Tunen", "bjd": "Bandjigali", "ccq": "Chaungtha", "cka": "Khumi Awa Chin", "dap": "Nisi (India)", "dwl": "Walo Kumbe Dogon", "elp": "Elpaputih", "gbc": "Garawa", "gio": "Gelao", "hrr": "Horuru", "ibi": "Ibilo", "jar": "Jarawa (Nigeria)", "kdv": "Kado", "kgh": "Upper Tanudan Kalinga", "kpp": "Paku Karen", "kzh": "Kenuzi-Dongola", "lcq": "Luhu", "mgx": "Omati", "nln": "Durango Nahuatl", "pbz": "Palu", "pgy": "Pongyong", "sca": "Sansu", "tlw": "South Wemale", "unp": "Worora", "wiw": "Wirangu", "ybd": "Yangbye", "yen": "Yendang", "yma": "Yamphe", "daf": "Dan", "djl": "Djiwarli", "ggr": "Aghu Tharnggalu", "ilw": "Talur", "izi": "Izi-Ezaa-Ikwo-Mgbo", "meg": "Mea", "mld": "Malakhel", "mnt": "Maykulan", "mwd": "Mudbura", "myq": "Forest Maninka", "nbx": "Ngura", "nlr": "Ngarla", "pcr": "Panang", "ppr": "Piru", "tgg": "Tangga", "wit": "Wintu", "xia": "Xiandao", "yiy": "Yir Yoront", "yos": "Yos", "emo": "Emok", "ggm": "Gugu Mini", "leg": "Lengua", "lmm": "Lamam", "mhh": "Maskoy Pidgin", "puz": "Purum Naga", "sap": "Sanapaná", "yuu": "Yugh", "aam": "Aramanik", "adp": "Adap", "aue": "ǂKxʼauǁʼein", "bmy": "Bemba (Democratic Republic of Congo)", "bxx": "Borna (Democratic Republic of Congo)", "byy": "Buya", "dzd": "Daza", "gfx": "Mangetti Dune ǃXung", "gti": "Gbati-ri", "ime": "Imeraguen", "kbf": "Kakauhua", "koj": "Sara Dunjo", "kwq": "Kwak", "kxe": "Kakihum", "lii": "Lingkhim", "mwj": "Maligo", "nnx": "Ngong", "oun": "ǃOǃung", "pmu": "Mirpur Panjabi", "sgo": "Songa", "thx": "The", "tsf": "Southwestern Tamang", "uok": "Uokha", "xsj": "Subi", "yds": "Yiddish Sign Language", "ymt": "Mator-Taygi-Karagas", "ynh": "Yangho", "bgm": "Baga Mboteni", "btl": "Bhatola", "cbe": "Chipiajes", "cbh": "Cagua", "coy": "Coyaima", "cqu": "Chilean Quechua", "cum": "Cumeral", "duj": "Dhuwal", "ggn": "Eastern Gurung", "ggo": "Southern Gondi", "guv": "Gey", "iap": "Iapama", "ill": "Iranun", "kgc": "Kasseng", "kox": "Coxima", "ktr": "Kota Marudu Tinagas", "kvs": "Kunggara", "kzj": "Coastal Kadazan", "kzt": "Tambunan Dusun", "nad": "Nijadali", "nts": "Natagaimas", "ome": "Omejes", "pmc": "Palumata", "pod": "Ponares", "ppa": "Pao", "pry": "Pray 3", "rna": "Runa", "svr": "Savara", "tdu": "Tempasuk Dusun", "thc": "Tai Hang Tong", "tid": "Tidong", "tmp": "Tai Mène", "tne": "Tinoc Kallahan", "toe": "Tomedes", "xba": "Kamba (Brazil)", "xbx": "Kabixí", "xip": "Xipináwa", "xkh": "Karahawyana", "yri": "Yarí", "jeg": "Jeng", "kgd": "Kataang", "krm": "Krim", "prb": "Lua'", "puk": "Pu Ko", "rie": "Rien", "rsi": "Rennellese Sign Language", "skk": "Sok", "snh": "Shinabo", "lsg": "Lyons Sign Language", "mwx": "Mediak", "mwy": "Mosiro", "ncp": "Ndaktup", "ais": "Nataoran Amis", "asd": "Asas", "dit": "Dirari", "dud": "Hun-Saare", "lba": "Lui", "llo": "Khlor", "myd": "Maramba", "myi": "Mina (India)", "nns": "Ningye", "aoh": "Arma", "ayy": "Tayabas Ayta", "bbz": "Babalia Creole Arabic", "bpb": "Barbacoas", "cca": "Cauca", "cdg": "Chamari", "dgu": "Degaru", "drr": "Dororo", "ekc": "Eastern Karnic", "gli": "Guliguli", "kjf": "Khalaj", "kxl": "Nepali Kurux", "kxu": "Kui (India)", "lmz": "Lumbee", "nxu": "Narau", "plp": "Palpa", "sdm": "Semandang", "tbb": "Tapeba", "xrq": "Karranga", "xtz": "Tasmanian", "zir": "Ziriya", "thw": "Thudam", "bic": "Bikaru", "bij": "Vaghat-Ya-Bijim-Legeri", "blg": "Balau", "gji": "Geji", "mvm": "Muya", "ngo": "Ngoni", "pat": "Papitalai", "vki": "Ija-Zuba", "wra": "Warapu", "ajt": "Judeo-Tunisian Arabic", "cug": "Chungmboko", "lak": "Laka (Nigeria)", "lno": "Lango (South Sudan)", "pii": "Pini", "smd": "Sama", "snb": "Sebuyau", "uun": "Kulon-Pazeh", "wrd": "Warduji", "wya": "Wyandot", } iso639long = inverse_dict(iso639short) iso639code_retired = inverse_dict(iso639retired)
this module is from mxdatetimelazymodule py and is distributed under the terms of the egenix com public license agreement https www egenix comproductsegenix compubliclicense1 1 0 pdf helper to enable simple lazy module import lazy means the actual import is deferred until an attribute is requested from the module s namespace this has the advantage of allowing all imports to be done at the top of a script in a prominent and visible place without having a great impact on startup time c 19992005 marcandre lemburg mailto mallemburg com see the documentation for further information on s or contact the constants lazy module class lazy modules are imported into the given namespaces whenever a nonspecial attribute there are some attributes like doc that class instances handle without calling getattr is requested the module is then registered under the given name in locals usually replacing the import wrapper instance the import itself is done using globals as global namespace example of creating a lazy load module iso lazymodule iso locals globals later requesting an attribute from iso will load the module automatically into the locals namespace overriding the lazymodule instance t iso week1998 1 1 flag which indicates whether the lazymodule is initialized or not name of the module to load flag which indicates whether the module was loaded or not locals dictionary where to register the module globals dictionary to use for the module import create a lazymodule instance wrapping module name the module will later on be registered in locals under the given module name globals is optional and defaults to locals import the module now load and register module localname self lazymodulename e g toolbox fullname self name e g nltk toolbox if self lazymoduleloaded return self lazymodulelocalslocalname if debug printlazymodule loading module r fullname self lazymodulelocalslocalname module import fullname self lazymodulelocals self lazymoduleglobals fill namespace with all symbols from original module to provide faster access self dict updatemodule dict set import flag self dictlazymoduleloaded 1 if debug printlazymodule module r loaded fullname return module def getattrself name import the module on demand and set the attribute if not self lazymoduleinit self dictname value return if self lazymoduleloaded self lazymodulelocalsself lazymodulename value self dictname value return if debug print lazymodule module load triggered by attribute r write access name module self lazymoduleimport setattrmodule name value def reprself return lazymodule s self name this module is from mx datetime lazymodule py and is distributed under the terms of the egenix com public license agreement https www egenix com products egenix com public license 1 1 0 pdf helper to enable simple lazy module import lazy means the actual import is deferred until an attribute is requested from the module s namespace this has the advantage of allowing all imports to be done at the top of a script in a prominent and visible place without having a great impact on startup time c 1999 2005 marc andre lemburg mailto mal lemburg com see the documentation for further information on s or contact the constants lazy module class lazy modules are imported into the given namespaces whenever a non special attribute there are some attributes like __doc__ that class instances handle without calling __getattr__ is requested the module is then registered under the given name in locals usually replacing the import wrapper instance the import itself is done using globals as global namespace example of creating a lazy load module iso lazymodule iso locals globals later requesting an attribute from iso will load the module automatically into the locals namespace overriding the lazymodule instance t iso week 1998 1 1 flag which indicates whether the lazymodule is initialized or not name of the module to load flag which indicates whether the module was loaded or not locals dictionary where to register the module globals dictionary to use for the module import create a lazymodule instance wrapping module name the module will later on be registered in locals under the given module name globals is optional and defaults to locals import the module now load and register module e g toolbox e g nltk toolbox fill namespace with all symbols from original module to provide faster access set import flag import the module on demand and get the attribute import the module on demand and set the attribute
_debug = 0 class LazyModule: __lazymodule_init = 0 __lazymodule_name = "" __lazymodule_loaded = 0 __lazymodule_locals = None __lazymodule_globals = None def __init__(self, name, locals, globals=None): self.__lazymodule_locals = locals if globals is None: globals = locals self.__lazymodule_globals = globals mainname = globals.get("__name__", "") if mainname: self.__name__ = mainname + "." + name self.__lazymodule_name = name else: self.__name__ = self.__lazymodule_name = name self.__lazymodule_init = 1 def __lazymodule_import(self): local_name = self.__lazymodule_name full_name = self.__name__ if self.__lazymodule_loaded: return self.__lazymodule_locals[local_name] if _debug: print("LazyModule: Loading module %r" % full_name) self.__lazymodule_locals[local_name] = module = __import__( full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*" ) self.__dict__.update(module.__dict__) self.__dict__["__lazymodule_loaded"] = 1 if _debug: print("LazyModule: Module %r loaded" % full_name) return module def __getattr__(self, name): if self.__lazymodule_loaded: raise AttributeError(name) if _debug: print( "LazyModule: " "Module load triggered by attribute %r read access" % name ) module = self.__lazymodule_import() return getattr(module, name) def __setattr__(self, name, value): if not self.__lazymodule_init: self.__dict__[name] = value return if self.__lazymodule_loaded: self.__lazymodule_locals[self.__lazymodule_name] = value self.__dict__[name] = value return if _debug: print( "LazyModule: " "Module load triggered by attribute %r write access" % name ) module = self.__lazymodule_import() setattr(module, name, value) def __repr__(self): return "<LazyModule '%s'>" % self.__name__
natural language toolkit language models c 20012023 nltk project s ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt nltk language modeling module currently this module covers only ngram language models but it should be easy to extend to neural models preparing data before we train our ngram models it is necessary to make sure the data we put in them is in the right format let s say we have a text that is a list of sentences where each sentence is a list of strings for simplicity we just consider a text consisting of characters instead of words text a b c a c d c e f if we want to train a bigram model we need to turn this text into bigrams here s what the first sentence of our text would look like if we use a function from nltk for this from nltk util import bigrams listbigramstext0 a b b c notice how b occurs both as the first and second member of different bigrams but a and c don t wouldn t it be nice to somehow indicate how often sentences start with a and end with c a standard way to deal with this is to add special padding symbols to the sentence before splitting it into ngrams fortunately nltk also has a function for that let s see what it does to the first sentence from nltk util import padsequence listpadsequencetext0 padlefttrue leftpadsymbols padrighttrue rightpadsymbols n2 s a b c s note the n argument that tells the function we need padding for bigrams now passing all these parameters every time is tedious and in most cases they can be safely assumed as defaults anyway thus our module provides a convenience function that has all these arguments already set while the other arguments remain the same as for padsequence from nltk lm preprocessing import padbothends listpadbothendstext0 n2 s a b c s combining the two parts discussed so far we get the following preparation steps for one sentence listbigramspadbothendstext0 n2 s a a b b c c s to make our model more robust we could also train it on unigrams single words as well as bigrams its main source of information nltk once again helpfully provides a function called everygrams while not the most efficient it is conceptually simple from nltk util import everygrams paddedbigrams listpadbothendstext0 n2 listeverygramspaddedbigrams maxlen2 s s a a a b b b c c c s s we are almost ready to start counting ngrams just one more step left during training and evaluation our model will rely on a vocabulary that defines which words are known to the model to create this vocabulary we need to pad our sentences just like for counting ngrams and then combine the sentences into one flat stream of words from nltk lm preprocessing import flatten listflattenpadbothendssent n2 for sent in text s a b c s s a c d c e f s in most cases we want to use the same text as the source for both vocabulary and ngram counts now that we understand what this means for our preprocessing we can simply import a function that does everything for us from nltk lm preprocessing import paddedeverygrampipeline train vocab paddedeverygrampipeline2 text so as to avoid recreating the text in memory both train and vocab are lazy iterators they are evaluated on demand at training time training having prepared our data we are ready to start training a model as a simple example let us train a maximum likelihood estimator mle we only need to specify the highest ngram order to instantiate it from nltk lm import mle lm mle2 this automatically creates an empty vocabulary lenlm vocab 0 which gets filled as we fit the model lm fittrain vocab printlm vocab vocabulary with cutoff1 unklabel unk and 9 items lenlm vocab 9 the vocabulary helps us handle words that have not occurred during training lm vocab lookuptext0 a b c lm vocab lookupaliens from mars unk unk unk moreover in some cases we want to ignore words that we did see during training but that didn t occur frequently enough to provide us useful information you can tell the vocabulary to ignore such words to find out how that works check out the docs for the vocabulary class using a trained model when it comes to ngram models the training boils down to counting up the ngrams from the training corpus printlm counts ngramcounter with 2 ngram orders and 24 ngrams this provides a convenient interface to access counts for unigrams lm counts a 2 and bigrams in this case a b lm counts a b 1 and so on however the real purpose of training a language model is to have it score how probable words are in certain contexts this being mle the model returns the item s relative frequency as its score lm scorea 0 15384615384615385 items that are not seen during training are mapped to the vocabulary s unknown label token this is unk by default lm scoreunk lm scorealiens true here s how you get the score for a word given some preceding context for example we want to know what is the chance that b is preceded by a lm scoreb a 0 5 to avoid underflow when working with many small score values it makes sense to take their logarithm for convenience this can be done with the logscore method lm logscorea 2 700439718141092 building on this method we can also evaluate our model s crossentropy and perplexity with respect to sequences of ngrams test a b c d lm entropytest 1 292481250360578 lm perplexitytest 2 449489742783178 it is advisable to preprocess your test text exactly the same way as you did the training text one cool feature of ngram models is that they can be used to generate text lm generate1 randomseed3 s lm generate5 randomseed3 s a b c d provide randomseed if you want to consistently reproduce the same text all other things being equal here we are using it to test the examples you can also condition your generation on some preceding text with the context argument lm generate5 textseed c randomseed3 s c d c d note that an ngram model is restricted in how much preceding context it can take into account for example a trigram model can only condition its output on 2 preceding words if you pass in a 4word context the first two words will be ignored natural language toolkit language models c 2001 2023 nltk project s ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt nltk language modeling module currently this module covers only ngram language models but it should be easy to extend to neural models preparing data before we train our ngram models it is necessary to make sure the data we put in them is in the right format let s say we have a text that is a list of sentences where each sentence is a list of strings for simplicity we just consider a text consisting of characters instead of words text a b c a c d c e f if we want to train a bigram model we need to turn this text into bigrams here s what the first sentence of our text would look like if we use a function from nltk for this from nltk util import bigrams list bigrams text 0 a b b c notice how b occurs both as the first and second member of different bigrams but a and c don t wouldn t it be nice to somehow indicate how often sentences start with a and end with c a standard way to deal with this is to add special padding symbols to the sentence before splitting it into ngrams fortunately nltk also has a function for that let s see what it does to the first sentence from nltk util import pad_sequence list pad_sequence text 0 pad_left true left_pad_symbol s pad_right true right_pad_symbol s n 2 s a b c s note the n argument that tells the function we need padding for bigrams now passing all these parameters every time is tedious and in most cases they can be safely assumed as defaults anyway thus our module provides a convenience function that has all these arguments already set while the other arguments remain the same as for pad_sequence from nltk lm preprocessing import pad_both_ends list pad_both_ends text 0 n 2 s a b c s combining the two parts discussed so far we get the following preparation steps for one sentence list bigrams pad_both_ends text 0 n 2 s a a b b c c s to make our model more robust we could also train it on unigrams single words as well as bigrams its main source of information nltk once again helpfully provides a function called everygrams while not the most efficient it is conceptually simple from nltk util import everygrams padded_bigrams list pad_both_ends text 0 n 2 list everygrams padded_bigrams max_len 2 s s a a a b b b c c c s s we are almost ready to start counting ngrams just one more step left during training and evaluation our model will rely on a vocabulary that defines which words are known to the model to create this vocabulary we need to pad our sentences just like for counting ngrams and then combine the sentences into one flat stream of words from nltk lm preprocessing import flatten list flatten pad_both_ends sent n 2 for sent in text s a b c s s a c d c e f s in most cases we want to use the same text as the source for both vocabulary and ngram counts now that we understand what this means for our preprocessing we can simply import a function that does everything for us from nltk lm preprocessing import padded_everygram_pipeline train vocab padded_everygram_pipeline 2 text so as to avoid re creating the text in memory both train and vocab are lazy iterators they are evaluated on demand at training time training having prepared our data we are ready to start training a model as a simple example let us train a maximum likelihood estimator mle we only need to specify the highest ngram order to instantiate it from nltk lm import mle lm mle 2 this automatically creates an empty vocabulary len lm vocab 0 which gets filled as we fit the model lm fit train vocab print lm vocab vocabulary with cutoff 1 unk_label unk and 9 items len lm vocab 9 the vocabulary helps us handle words that have not occurred during training lm vocab lookup text 0 a b c lm vocab lookup aliens from mars unk unk unk moreover in some cases we want to ignore words that we did see during training but that didn t occur frequently enough to provide us useful information you can tell the vocabulary to ignore such words to find out how that works check out the docs for the vocabulary class using a trained model when it comes to ngram models the training boils down to counting up the ngrams from the training corpus print lm counts ngramcounter with 2 ngram orders and 24 ngrams this provides a convenient interface to access counts for unigrams lm counts a 2 and bigrams in this case a b lm counts a b 1 and so on however the real purpose of training a language model is to have it score how probable words are in certain contexts this being mle the model returns the item s relative frequency as its score lm score a 0 15384615384615385 items that are not seen during training are mapped to the vocabulary s unknown label token this is unk by default lm score unk lm score aliens true here s how you get the score for a word given some preceding context for example we want to know what is the chance that b is preceded by a lm score b a 0 5 to avoid underflow when working with many small score values it makes sense to take their logarithm for convenience this can be done with the logscore method lm logscore a 2 700439718141092 building on this method we can also evaluate our model s cross entropy and perplexity with respect to sequences of ngrams test a b c d lm entropy test 1 292481250360578 lm perplexity test 2 449489742783178 it is advisable to preprocess your test text exactly the same way as you did the training text one cool feature of ngram models is that they can be used to generate text lm generate 1 random_seed 3 s lm generate 5 random_seed 3 s a b c d provide random_seed if you want to consistently reproduce the same text all other things being equal here we are using it to test the examples you can also condition your generation on some preceding text with the context argument lm generate 5 text_seed c random_seed 3 s c d c d note that an ngram model is restricted in how much preceding context it can take into account for example a trigram model can only condition its output on 2 preceding words if you pass in a 4 word context the first two words will be ignored
from nltk.lm.counter import NgramCounter from nltk.lm.models import ( MLE, AbsoluteDiscountingInterpolated, KneserNeyInterpolated, Laplace, Lidstone, StupidBackoff, WittenBellInterpolated, ) from nltk.lm.vocabulary import Vocabulary __all__ = [ "Vocabulary", "NgramCounter", "MLE", "Lidstone", "Laplace", "WittenBellInterpolated", "KneserNeyInterpolated", "AbsoluteDiscountingInterpolated", "StupidBackoff", ]
natural language toolkit language models c 20012023 nltk project s ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt language model interface import random import warnings from abc import abcmeta abstractmethod from bisect import bisect from itertools import accumulate from nltk lm counter import ngramcounter from nltk lm util import logbase2 from nltk lm vocabulary import vocabulary class smoothingmetaclassabcmeta def initself vocabulary counter self vocab vocabulary self counts counter abstractmethod def unigramscoreself word raise notimplementederror abstractmethod def alphagammaself word context raise notimplementederror def meanitems like random choice but with weights heavily inspired by python 3 6 random choices abc for language models cannot be directly instantiated itself creates new languagemodel param vocabulary if provided this vocabulary will be used instead of creating a new one when training type vocabulary nltk lm vocabulary or none param counter if provided use this object to count ngrams type counter nltk lm ngramcounter or none param ngramsfn if given defines how sentences in training text are turned to ngram sequences type ngramsfn function or none param padfn if given defines how sentences in training text are padded type padfn function or none trains the model on a text param text training text as a sequence of sentences masks out of vocab oov words and computes their model score for modelspecific logic of calculating scores see the unmaskedscore method score a word given some optional context concrete models are expected to provide an implementation note that this method does not mask its arguments with the oov label use the score method for that param str word word for which we want the score param tuplestr context context the word is in if none compute unigram score param context tuplestr or none rtype float evaluate the log score of this word in this context the arguments are the same as for score and unmaskedscore helper method for retrieving counts for a given context assumes context has been checked and oov words in it masked type context tuplestr or none calculate crossentropy of model for given evaluation text param iterabletuplestr textngrams a sequence of ngram tuples rtype float calculates the perplexity of the given text this is simply 2 crossentropy for the text so the arguments are the same generate words from the model param int numwords how many words to generate by default 1 param textseed generation can be conditioned on preceding context param randomseed a random seed or an instance of random random if provided makes the random sampling part of generation reproducible return one str word or a list of words generated from model examples from nltk lm import mle lm mle2 lm fita b b c vocabularytext a b c lm fita b c lm generaterandomseed3 a lm generatetextseed a b this is the base recursion case sorting samples achieves two things reproducible randomness when sampling turns mapping into sequence which weightedchoice expects we build up text one word at a time using the preceding context natural language toolkit language models c 2001 2023 nltk project s ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt language model interface ngram smoothing interface implements chen goodman 1995 s idea that all smoothing algorithms have certain features in common this should ideally allow smoothing algorithms to work both with backoff and interpolation param vocabulary the ngram vocabulary object type vocabulary nltk lm vocab vocabulary param counter the counts of the vocabulary items type counter nltk lm counter ngramcounter return average aka mean for sequence of items like random choice but with weights heavily inspired by python 3 6 random choices abc for language models cannot be directly instantiated itself creates new languagemodel param vocabulary if provided this vocabulary will be used instead of creating a new one when training type vocabulary nltk lm vocabulary or none param counter if provided use this object to count ngrams type counter nltk lm ngramcounter or none param ngrams_fn if given defines how sentences in training text are turned to ngram sequences type ngrams_fn function or none param pad_fn if given defines how sentences in training text are padded type pad_fn function or none trains the model on a text param text training text as a sequence of sentences masks out of vocab oov words and computes their model score for model specific logic of calculating scores see the unmasked_score method score a word given some optional context concrete models are expected to provide an implementation note that this method does not mask its arguments with the oov label use the score method for that param str word word for which we want the score param tuple str context context the word is in if none compute unigram score param context tuple str or none rtype float evaluate the log score of this word in this context the arguments are the same as for score and unmasked_score helper method for retrieving counts for a given context assumes context has been checked and oov words in it masked type context tuple str or none calculate cross entropy of model for given evaluation text param iterable tuple str text_ngrams a sequence of ngram tuples rtype float calculates the perplexity of the given text this is simply 2 cross entropy for the text so the arguments are the same generate words from the model param int num_words how many words to generate by default 1 param text_seed generation can be conditioned on preceding context param random_seed a random seed or an instance of random random if provided makes the random sampling part of generation reproducible return one str word or a list of words generated from model examples from nltk lm import mle lm mle 2 lm fit a b b c vocabulary_text a b c lm fit a b c lm generate random_seed 3 a lm generate text_seed a b this is the base recursion case sorting samples achieves two things reproducible randomness when sampling turns mapping into sequence which _weighted_choice expects we build up text one word at a time using the preceding context
import random import warnings from abc import ABCMeta, abstractmethod from bisect import bisect from itertools import accumulate from nltk.lm.counter import NgramCounter from nltk.lm.util import log_base2 from nltk.lm.vocabulary import Vocabulary class Smoothing(metaclass=ABCMeta): def __init__(self, vocabulary, counter): self.vocab = vocabulary self.counts = counter @abstractmethod def unigram_score(self, word): raise NotImplementedError() @abstractmethod def alpha_gamma(self, word, context): raise NotImplementedError() def _mean(items): return sum(items) / len(items) def _random_generator(seed_or_generator): if isinstance(seed_or_generator, random.Random): return seed_or_generator return random.Random(seed_or_generator) def _weighted_choice(population, weights, random_generator=None): if not population: raise ValueError("Can't choose from empty population") if len(population) != len(weights): raise ValueError("The number of weights does not match the population") cum_weights = list(accumulate(weights)) total = cum_weights[-1] threshold = random_generator.random() return population[bisect(cum_weights, total * threshold)] class LanguageModel(metaclass=ABCMeta): def __init__(self, order, vocabulary=None, counter=None): self.order = order if vocabulary and not isinstance(vocabulary, Vocabulary): warnings.warn( f"The `vocabulary` argument passed to {self.__class__.__name__!r} " "must be an instance of `nltk.lm.Vocabulary`.", stacklevel=3, ) self.vocab = Vocabulary() if vocabulary is None else vocabulary self.counts = NgramCounter() if counter is None else counter def fit(self, text, vocabulary_text=None): if not self.vocab: if vocabulary_text is None: raise ValueError( "Cannot fit without a vocabulary or text to create it from." ) self.vocab.update(vocabulary_text) self.counts.update(self.vocab.lookup(sent) for sent in text) def score(self, word, context=None): return self.unmasked_score( self.vocab.lookup(word), self.vocab.lookup(context) if context else None ) @abstractmethod def unmasked_score(self, word, context=None): raise NotImplementedError() def logscore(self, word, context=None): return log_base2(self.score(word, context)) def context_counts(self, context): return ( self.counts[len(context) + 1][context] if context else self.counts.unigrams ) def entropy(self, text_ngrams): return -1 * _mean( [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams] ) def perplexity(self, text_ngrams): return pow(2.0, self.entropy(text_ngrams)) def generate(self, num_words=1, text_seed=None, random_seed=None): text_seed = [] if text_seed is None else list(text_seed) random_generator = _random_generator(random_seed) if num_words == 1: context = ( text_seed[-self.order + 1 :] if len(text_seed) >= self.order else text_seed ) samples = self.context_counts(self.vocab.lookup(context)) while context and not samples: context = context[1:] if len(context) > 1 else [] samples = self.context_counts(self.vocab.lookup(context)) samples = sorted(samples) return _weighted_choice( samples, tuple(self.score(w, context) for w in samples), random_generator, ) generated = [] for _ in range(num_words): generated.append( self.generate( num_words=1, text_seed=text_seed + generated, random_seed=random_generator, ) ) return generated
natural language toolkit c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt language model counter class for counting ngrams will count any ngram sequence you give it first we need to make sure we are feeding the counter sentences of ngrams text a b c d a c d c from nltk util import ngrams textbigrams ngramssent 2 for sent in text textunigrams ngramssent 1 for sent in text the counting itself is very simple from nltk lm import ngramcounter ngramcounts ngramcountertextbigrams textunigrams you can conveniently access ngram counts using standard python dictionary notation string keys will give you unigram counts ngramcounts a 2 ngramcounts aliens 0 if you want to access counts for higher order ngrams use a list or a tuple these are treated as context keys so what you get is a frequency distribution over all continuations after the given context sortedngramcounts a items b 1 c 1 sortedngramcounts a items b 1 c 1 this is equivalent to specifying explicitly the order of the ngram in this case 2 for bigram and indexing on the context ngramcounts2 a is ngramcounts a true note that the keys in conditionalfreqdist cannot be lists only tuples it is generally advisable to use the less verbose and more flexible square bracket notation to get the count of the full ngram a b do this ngramcounts a b 1 specifying the ngram order as a number can be useful for accessing all ngrams in that order ngramcounts2 conditionalfreqdist with 4 conditions the keys of this conditionalfreqdist are the contexts we discussed earlier unigrams can also be accessed with a humanfriendly alias ngramcounts unigrams is ngramcounts1 true similarly to collections counter you can update counts after initialization ngramcounts e 0 ngramcounts updatengramsd e f 1 ngramcounts e 1 creates a new ngramcounter if ngramtext is specified counts ngrams from it otherwise waits for update method to be called explicitly param ngramtext optional text containing sentences of ngrams as for update method type ngramtext iterableiterabletuplestr or none updates ngram counts from ngramtext expects ngramtext to be a sequence of sentences sequences each sentence consists of ngrams as tuples of strings param iterableiterabletuplestr ngramtext text containing sentences of ngrams raises typeerror if the ngrams are not tuples returns grand total number of ngrams stored this includes ngrams from all orders so some duplication is expected rtype int from nltk lm import ngramcounter counts ngramcountera b c d e counts n 3 userfriendly access to ngram counts if isinstanceitem int return self countsitem elif isinstanceitem str return self counts getitem1item elif isinstanceitem sequence return self counts getitemlenitem 1tupleitem def strself return with ngram orders and ngrams format self class name lenself counts self n def lenself return self counts len def containsself item return item in self counts natural language toolkit c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt language model counter class for counting ngrams will count any ngram sequence you give it first we need to make sure we are feeding the counter sentences of ngrams text a b c d a c d c from nltk util import ngrams text_bigrams ngrams sent 2 for sent in text text_unigrams ngrams sent 1 for sent in text the counting itself is very simple from nltk lm import ngramcounter ngram_counts ngramcounter text_bigrams text_unigrams you can conveniently access ngram counts using standard python dictionary notation string keys will give you unigram counts ngram_counts a 2 ngram_counts aliens 0 if you want to access counts for higher order ngrams use a list or a tuple these are treated as context keys so what you get is a frequency distribution over all continuations after the given context sorted ngram_counts a items b 1 c 1 sorted ngram_counts a items b 1 c 1 this is equivalent to specifying explicitly the order of the ngram in this case 2 for bigram and indexing on the context ngram_counts 2 a is ngram_counts a true note that the keys in conditionalfreqdist cannot be lists only tuples it is generally advisable to use the less verbose and more flexible square bracket notation to get the count of the full ngram a b do this ngram_counts a b 1 specifying the ngram order as a number can be useful for accessing all ngrams in that order ngram_counts 2 conditionalfreqdist with 4 conditions the keys of this conditionalfreqdist are the contexts we discussed earlier unigrams can also be accessed with a human friendly alias ngram_counts unigrams is ngram_counts 1 true similarly to collections counter you can update counts after initialization ngram_counts e 0 ngram_counts update ngrams d e f 1 ngram_counts e 1 creates a new ngramcounter if ngram_text is specified counts ngrams from it otherwise waits for update method to be called explicitly param ngram_text optional text containing sentences of ngrams as for update method type ngram_text iterable iterable tuple str or none updates ngram counts from ngram_text expects ngram_text to be a sequence of sentences sequences each sentence consists of ngrams as tuples of strings param iterable iterable tuple str ngram_text text containing sentences of ngrams raises typeerror if the ngrams are not tuples returns grand total number of ngrams stored this includes ngrams from all orders so some duplication is expected rtype int from nltk lm import ngramcounter counts ngramcounter a b c d e counts n 3 user friendly access to ngram counts
from collections import defaultdict from collections.abc import Sequence from nltk.probability import ConditionalFreqDist, FreqDist class NgramCounter: def __init__(self, ngram_text=None): self._counts = defaultdict(ConditionalFreqDist) self._counts[1] = self.unigrams = FreqDist() if ngram_text: self.update(ngram_text) def update(self, ngram_text): for sent in ngram_text: for ngram in sent: if not isinstance(ngram, tuple): raise TypeError( "Ngram <{}> isn't a tuple, " "but {}".format(ngram, type(ngram)) ) ngram_order = len(ngram) if ngram_order == 1: self.unigrams[ngram[0]] += 1 continue context, word = ngram[:-1], ngram[-1] self[ngram_order][context][word] += 1 def N(self): return sum(val.N() for val in self._counts.values()) def __getitem__(self, item): if isinstance(item, int): return self._counts[item] elif isinstance(item, str): return self._counts.__getitem__(1)[item] elif isinstance(item, Sequence): return self._counts.__getitem__(len(item) + 1)[tuple(item)] def __str__(self): return "<{} with {} ngram orders and {} ngrams>".format( self.__class__.__name__, len(self._counts), self.N() ) def __len__(self): return self._counts.__len__() def __contains__(self, item): return item in self._counts
natural language toolkit language models c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com manu joseph manujosephvgmail com url https www nltk org for license information see license txt language models from nltk lm api import languagemodel smoothing from nltk lm smoothing import absolutediscounting kneserney wittenbell class mlelanguagemodel def unmaskedscoreself word contextnone return self contextcountscontext freqword class lidstonelanguagemodel def initself gamma args kwargs super initargs kwargs self gamma gamma def unmaskedscoreself word contextnone counts self contextcountscontext wordcount countsword normcount counts n return wordcount self gamma normcount lenself vocab self gamma class laplacelidstone def initself args kwargs super init1 args kwargs class stupidbackofflanguagemodel def initself alpha0 4 args kwargs super initargs kwargs self alpha alpha def unmaskedscoreself word contextnone if not context base recursion return self counts unigrams freqword counts self contextcountscontext wordcount countsword normcount counts n if wordcount 0 return wordcount normcount else return self alpha self unmaskedscoreword context1 class interpolatedlanguagemodellanguagemodel def initself smoothingcls order kwargs params kwargs popparams super initorder kwargs self estimator smoothingclsself vocab self counts params def unmaskedscoreself word contextnone if not context the base recursion case no context we only have a unigram return self estimator unigramscoreword if not self countscontext it can also happen that we have no data for this context in that case we defer to the lowerorder ngram this is the same as setting alpha to 0 and gamma to 1 alpha gamma 0 1 else alpha gamma self estimator alphagammaword context return alpha gamma self unmaskedscoreword context1 class wittenbellinterpolatedinterpolatedlanguagemodel interpolated version of smoothing with absolute discount def initself order discount0 75 kwargs super init absolutediscounting order paramsdiscount discount kwargs class kneserneyinterpolatedinterpolatedlanguagemodel natural language toolkit language models c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com manu joseph manujosephv gmail com url https www nltk org for license information see license txt language models class for providing mle ngram model scores inherits initialization from basengrammodel returns the mle score for a word given a context args word is expected to be a string context is expected to be something reasonably convertible to a tuple provides lidstone smoothed scores in addition to initialization arguments from basengrammodel also requires a number by which to increase the counts gamma add one smoothing lidstone or laplace to see what kind look at gamma attribute on the class implements laplace add one smoothing initialization identical to basengrammodel because gamma is always 1 provides stupidbackoff scores in addition to initialization arguments from basengrammodel also requires a parameter alpha with which we scale the lower order probabilities note that this is not a true probability distribution as scores for ngrams of the same order do not sum up to unity base recursion logic common to all interpolated language models the idea to abstract this comes from chen goodman 1995 do not instantiate this class directly the base recursion case no context we only have a unigram it can also happen that we have no data for this context in that case we defer to the lower order ngram this is the same as setting alpha to 0 and gamma to 1 interpolated version of witten bell smoothing interpolated version of smoothing with absolute discount interpolated version of kneser ney smoothing
from nltk.lm.api import LanguageModel, Smoothing from nltk.lm.smoothing import AbsoluteDiscounting, KneserNey, WittenBell class MLE(LanguageModel): def unmasked_score(self, word, context=None): return self.context_counts(context).freq(word) class Lidstone(LanguageModel): def __init__(self, gamma, *args, **kwargs): super().__init__(*args, **kwargs) self.gamma = gamma def unmasked_score(self, word, context=None): counts = self.context_counts(context) word_count = counts[word] norm_count = counts.N() return (word_count + self.gamma) / (norm_count + len(self.vocab) * self.gamma) class Laplace(Lidstone): def __init__(self, *args, **kwargs): super().__init__(1, *args, **kwargs) class StupidBackoff(LanguageModel): def __init__(self, alpha=0.4, *args, **kwargs): super().__init__(*args, **kwargs) self.alpha = alpha def unmasked_score(self, word, context=None): if not context: return self.counts.unigrams.freq(word) counts = self.context_counts(context) word_count = counts[word] norm_count = counts.N() if word_count > 0: return word_count / norm_count else: return self.alpha * self.unmasked_score(word, context[1:]) class InterpolatedLanguageModel(LanguageModel): def __init__(self, smoothing_cls, order, **kwargs): params = kwargs.pop("params", {}) super().__init__(order, **kwargs) self.estimator = smoothing_cls(self.vocab, self.counts, **params) def unmasked_score(self, word, context=None): if not context: return self.estimator.unigram_score(word) if not self.counts[context]: alpha, gamma = 0, 1 else: alpha, gamma = self.estimator.alpha_gamma(word, context) return alpha + gamma * self.unmasked_score(word, context[1:]) class WittenBellInterpolated(InterpolatedLanguageModel): def __init__(self, order, **kwargs): super().__init__(WittenBell, order, **kwargs) class AbsoluteDiscountingInterpolated(InterpolatedLanguageModel): def __init__(self, order, discount=0.75, **kwargs): super().__init__( AbsoluteDiscounting, order, params={"discount": discount}, **kwargs ) class KneserNeyInterpolated(InterpolatedLanguageModel): def __init__(self, order, discount=0.1, **kwargs): if not (0 <= discount <= 1): raise ValueError( "Discount must be between 0 and 1 for probabilities to sum to unity." ) super().__init__( KneserNey, order, params={"discount": discount, "order": order}, **kwargs )
natural language toolkit language model unit tests c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com manu joseph manujosephvgmail com url https www nltk org for license information see license txt smoothing algorithms for language modeling according to chen goodman 1995 these should work with both backoff and interpolation count values that are greater than zero in a distribution assumes distribution is either a mapping with counts as values or an instance of nltk conditionalfreqdist we explicitly check that values are 0 to guard against negative counts wittenbell smoothing def initself vocabulary counter kwargs super initvocabulary counter kwargs def alphagammaself word context alpha self countscontext freqword gamma self gammacontext return 1 0 gamma alpha gamma def gammaself context nplus countvaluesgtzeroself countscontext return nplus nplus self countscontext n def unigramscoreself word return self counts unigrams freqword class absolutediscountingsmoothing kneserney smoothing this is an extension of smoothing with a discount resources https pages ucsd edurlevylign256winter2008kneserneyminiexample pdf https www youtube comwatch vody1ysutd7o https medium comdennycasimplenumericalexampleforkneserneysmoothingnlp4600addf38b8 https www cl uniheidelberg decoursesss15smtscribe6 pdf https wwwi6 informatik rwthaachen depublicationsdownload951knesericassp1995 pdf count continuations that end with context and word continuations track unique ngram types regardless of how many instances were observed for each type this is different than raw ngram counts which track number of instances natural language toolkit language model unit tests c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com manu joseph manujosephv gmail com url https www nltk org for license information see license txt smoothing algorithms for language modeling according to chen goodman 1995 these should work with both backoff and interpolation count values that are greater than zero in a distribution assumes distribution is either a mapping with counts as values or an instance of nltk conditionalfreqdist we explicitly check that values are 0 to guard against negative counts witten bell smoothing smoothing with absolute discount kneser ney smoothing this is an extension of smoothing with a discount resources https pages ucsd edu rlevy lign256 winter2008 kneser_ney_mini_example pdf https www youtube com watch v ody1ysutd7o https medium com dennyc a simple numerical example for kneser ney smoothing nlp 4600addf38b8 https www cl uni heidelberg de courses ss15 smt scribe6 pdf https www i6 informatik rwth aachen de publications download 951 kneser icassp 1995 pdf count continuations that end with context and word continuations track unique ngram types regardless of how many instances were observed for each type this is different than raw ngram counts which track number of instances
from operator import methodcaller from nltk.lm.api import Smoothing from nltk.probability import ConditionalFreqDist def _count_values_gt_zero(distribution): as_count = ( methodcaller("N") if isinstance(distribution, ConditionalFreqDist) else lambda count: count ) return sum( 1 for dist_or_count in distribution.values() if as_count(dist_or_count) > 0 ) class WittenBell(Smoothing): def __init__(self, vocabulary, counter, **kwargs): super().__init__(vocabulary, counter, **kwargs) def alpha_gamma(self, word, context): alpha = self.counts[context].freq(word) gamma = self._gamma(context) return (1.0 - gamma) * alpha, gamma def _gamma(self, context): n_plus = _count_values_gt_zero(self.counts[context]) return n_plus / (n_plus + self.counts[context].N()) def unigram_score(self, word): return self.counts.unigrams.freq(word) class AbsoluteDiscounting(Smoothing): def __init__(self, vocabulary, counter, discount=0.75, **kwargs): super().__init__(vocabulary, counter, **kwargs) self.discount = discount def alpha_gamma(self, word, context): alpha = ( max(self.counts[context][word] - self.discount, 0) / self.counts[context].N() ) gamma = self._gamma(context) return alpha, gamma def _gamma(self, context): n_plus = _count_values_gt_zero(self.counts[context]) return (self.discount * n_plus) / self.counts[context].N() def unigram_score(self, word): return self.counts.unigrams.freq(word) class KneserNey(Smoothing): def __init__(self, vocabulary, counter, order, discount=0.1, **kwargs): super().__init__(vocabulary, counter, **kwargs) self.discount = discount self._order = order def unigram_score(self, word): word_continuation_count, total_count = self._continuation_counts(word) return word_continuation_count / total_count def alpha_gamma(self, word, context): prefix_counts = self.counts[context] word_continuation_count, total_count = ( (prefix_counts[word], prefix_counts.N()) if len(context) + 1 == self._order else self._continuation_counts(word, context) ) alpha = max(word_continuation_count - self.discount, 0.0) / total_count gamma = self.discount * _count_values_gt_zero(prefix_counts) / total_count return alpha, gamma def _continuation_counts(self, word, context=tuple()): higher_order_ngrams_with_context = ( counts for prefix_ngram, counts in self.counts[len(context) + 2].items() if prefix_ngram[1:] == context ) higher_order_ngrams_with_word_count, total = 0, 0 for counts in higher_order_ngrams_with_context: higher_order_ngrams_with_word_count += int(counts[word] > 0) total += _count_values_gt_zero(counts) return higher_order_ngrams_with_word_count, total
natural language toolkit c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt language model utilities from math import log neginf floatinf posinf floatinf def logbase2score natural language toolkit c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt language model utilities convenience function for computing logarithms with base 2
from math import log NEG_INF = float("-inf") POS_INF = float("inf") def log_base2(score): if score == 0.0: return NEG_INF return log(score, 2)
natural language toolkit c 20012023 nltk project ilia kurenkov ilia kurenkovgmail com url https www nltk org for license information see license txt language model vocabulary import sys from collections import counter from collections abc import iterable from functools import singledispatch from itertools import chain singledispatch def dispatchedlookupwords vocab raise typeerrorfunsupported type for looking up in vocabulary typewords dispatchedlookup registeriterable def words vocab return tupledispatchedlookupw vocab for w in words dispatchedlookup registerstr def stringlookupword vocab stores language model vocabulary satisfies two common language modeling requirements for a vocabulary when checking membership and calculating its size filters items by comparing their counts to a cutoff value adds a special unknown token which unseen words are mapped to words a c d c a b r a c d from nltk lm import vocabulary vocab vocabularywords unkcutoff2 tokens with counts greater than or equal to the cutoff value will be considered part of the vocabulary vocab c 3 c in vocab true vocab d 2 d in vocab true tokens with frequency counts less than the cutoff value will be considered not part of the vocabulary even though their entries in the count dictionary are preserved vocab b 1 b in vocab false vocab aliens 0 aliens in vocab false keeping the count entries for seen words allows us to change the cutoff value without having to recalculate the counts vocab2 vocabularyvocab counts unkcutoff1 b in vocab2 true the cutoff value influences not only membership checking but also the result of getting the size of the vocabulary using the builtin len note that while the number of keys in the vocabulary s counter stays the same the items in the vocabulary differ depending on the cutoff we use sorted to demonstrate because it keeps the order consistent sortedvocab2 counts a b c d r sortedvocab2 unk a b c d r sortedvocab counts a b c d r sortedvocab unk a c d in addition to items it gets populated with the vocabulary stores a special token that stands in for socalled unknown items by default it s unk unk in vocab true we can look up words in a vocabulary using its lookup method unseen words with counts less than cutoff are looked up as the unknown label if given one word a string as an input this method will return a string vocab lookupa a vocab lookupaliens unk if given a sequence it will return an tuple of the looked up words vocab lookupp a r d b c unk a unk d unk c it s possible to update the counts after the vocabulary has been created in general the interface is the same as that of collections counter vocab b 1 vocab updateb b c vocab b 3 create a new vocabulary param counts optional iterable or collections counter instance to preseed the vocabulary in case it is iterable counts are calculated param int unkcutoff words that occur less frequently than this value are not considered part of the vocabulary param unklabel label for marking words not part of vocabulary cutoff value items with count below this value are not considered part of vocabulary update vocabulary counts wraps collections counter update method look up one or more words in the vocabulary if passed one word as a string will return that word or self unklabel otherwise will assume it was passed a sequence of words will try to look each of them up and return an iterator over the looked up words param words words to look up type words iterablestr or str rtype generatorstr or str raises typeerror for types other than strings or iterables from nltk lm import vocabulary vocab vocabularya b c a b unkcutoff2 vocab lookupa a vocab lookupaliens unk vocab lookupa b c x b a b unk unk b only consider items with counts ge to cutoff as being in the vocabulary return selfitem self cutoff def iterself computing size of vocabulary reflects the cutoff return self len def eqself other return self unklabel other unklabel and self cutoff other cutoff and self counts other counts def strself return with cutoff unklabel and items format self class name self cutoff self unklabel lenself natural language toolkit c 2001 2023 nltk project ilia kurenkov ilia kurenkov gmail com url https www nltk org for license information see license txt language model vocabulary look up a sequence of words in the vocabulary returns an iterator over looked up words looks up one word in the vocabulary stores language model vocabulary satisfies two common language modeling requirements for a vocabulary when checking membership and calculating its size filters items by comparing their counts to a cutoff value adds a special unknown token which unseen words are mapped to words a c d c a b r a c d from nltk lm import vocabulary vocab vocabulary words unk_cutoff 2 tokens with counts greater than or equal to the cutoff value will be considered part of the vocabulary vocab c 3 c in vocab true vocab d 2 d in vocab true tokens with frequency counts less than the cutoff value will be considered not part of the vocabulary even though their entries in the count dictionary are preserved vocab b 1 b in vocab false vocab aliens 0 aliens in vocab false keeping the count entries for seen words allows us to change the cutoff value without having to recalculate the counts vocab2 vocabulary vocab counts unk_cutoff 1 b in vocab2 true the cutoff value influences not only membership checking but also the result of getting the size of the vocabulary using the built in len note that while the number of keys in the vocabulary s counter stays the same the items in the vocabulary differ depending on the cutoff we use sorted to demonstrate because it keeps the order consistent sorted vocab2 counts a b c d r sorted vocab2 unk a b c d r sorted vocab counts a b c d r sorted vocab unk a c d in addition to items it gets populated with the vocabulary stores a special token that stands in for so called unknown items by default it s unk unk in vocab true we can look up words in a vocabulary using its lookup method unseen words with counts less than cutoff are looked up as the unknown label if given one word a string as an input this method will return a string vocab lookup a a vocab lookup aliens unk if given a sequence it will return an tuple of the looked up words vocab lookup p a r d b c unk a unk d unk c it s possible to update the counts after the vocabulary has been created in general the interface is the same as that of collections counter vocab b 1 vocab update b b c vocab b 3 create a new vocabulary param counts optional iterable or collections counter instance to pre seed the vocabulary in case it is iterable counts are calculated param int unk_cutoff words that occur less frequently than this value are not considered part of the vocabulary param unk_label label for marking words not part of vocabulary cutoff value items with count below this value are not considered part of vocabulary update vocabulary counts wraps collections counter update method look up one or more words in the vocabulary if passed one word as a string will return that word or self unk_label otherwise will assume it was passed a sequence of words will try to look each of them up and return an iterator over the looked up words param words word s to look up type words iterable str or str rtype generator str or str raises typeerror for types other than strings or iterables from nltk lm import vocabulary vocab vocabulary a b c a b unk_cutoff 2 vocab lookup a a vocab lookup aliens unk vocab lookup a b c x b a b unk unk b only consider items with counts ge to cutoff as being in the vocabulary building on membership check define how to iterate over vocabulary computing size of vocabulary reflects the cutoff
import sys from collections import Counter from collections.abc import Iterable from functools import singledispatch from itertools import chain @singledispatch def _dispatched_lookup(words, vocab): raise TypeError(f"Unsupported type for looking up in vocabulary: {type(words)}") @_dispatched_lookup.register(Iterable) def _(words, vocab): return tuple(_dispatched_lookup(w, vocab) for w in words) @_dispatched_lookup.register(str) def _string_lookup(word, vocab): return word if word in vocab else vocab.unk_label class Vocabulary: def __init__(self, counts=None, unk_cutoff=1, unk_label="<UNK>"): self.unk_label = unk_label if unk_cutoff < 1: raise ValueError(f"Cutoff value cannot be less than 1. Got: {unk_cutoff}") self._cutoff = unk_cutoff self.counts = Counter() self.update(counts if counts is not None else "") @property def cutoff(self): return self._cutoff def update(self, *counter_args, **counter_kwargs): self.counts.update(*counter_args, **counter_kwargs) self._len = sum(1 for _ in self) def lookup(self, words): return _dispatched_lookup(words, self) def __getitem__(self, item): return self._cutoff if item == self.unk_label else self.counts[item] def __contains__(self, item): return self[item] >= self.cutoff def __iter__(self): return chain( (item for item in self.counts if item in self), [self.unk_label] if self.counts else [], ) def __len__(self): return self._len def __eq__(self, other): return ( self.unk_label == other.unk_label and self.cutoff == other.cutoff and self.counts == other.counts ) def __str__(self): return "<{} with cutoff={} unk_label='{}' and {} items>".format( self.__class__.__name__, self.cutoff, self.unk_label, len(self) )
natural language toolkit metrics c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt nltk metrics classes and methods for scoring processing modules natural language toolkit metrics c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt nltk metrics classes and methods for scoring processing modules
from nltk.metrics.agreement import AnnotationTask from nltk.metrics.aline import align from nltk.metrics.association import ( BigramAssocMeasures, ContingencyMeasures, NgramAssocMeasures, QuadgramAssocMeasures, TrigramAssocMeasures, ) from nltk.metrics.confusionmatrix import ConfusionMatrix from nltk.metrics.distance import ( binary_distance, custom_distance, edit_distance, edit_distance_align, fractional_presence, interval_distance, jaccard_distance, masi_distance, presence, ) from nltk.metrics.paice import Paice from nltk.metrics.scores import ( accuracy, approxrand, f_measure, log_likelihood, precision, recall, ) from nltk.metrics.segmentation import ghd, pk, windowdiff from nltk.metrics.spearman import ( ranks_from_scores, ranks_from_sequence, spearman_correlation, )
natural language toolkit agreement metrics c 20012023 nltk project tom lippincott tomcs columbia edu url https www nltk org for license information see license txt implementations of interannotator agreement coefficients surveyed by artstein and poesio 2007 intercoder agreement for computational linguistics an agreement coefficient calculates the amount that annotators agreed on label assignments beyond what is expected by chance in defining the annotationtask class we use naming conventions similar to the paper s terminology there are three types of objects in an annotation task the coders variables c and c the items to be annotated variables i and i the potential categories to be assigned variables k and k additionally it is often the case that we don t want to treat two different labels as complete disagreement and so the annotationtask constructor can also take a distance metric as a final argument distance metrics are simply functions that take two arguments and return a value between 0 0 and 1 0 indicating the distance between them if not supplied the default is binary comparison between the arguments the simplest way to initialize an annotationtask is with a list of triples each containing a coder s assignment for one object in the task task annotationtaskdata c1 1 v1 c2 1 v1 note that the data list needs to contain the same number of triples for each individual coder containing category values for the same set of items alpha krippendorff 1980 kappa cohen 1960 s bennet albert and goldstein 1954 pi scott 1955 todo describe handling of multiple coders and missing data expected results from the artstein and poesio survey paper from nltk metrics agreement import annotationtask import os path t annotationtaskdatax split for x in openos path joinos path dirnamefile artsteinpoesioexample txt t avgao 0 88 roundt pi 5 0 79953 roundt s 2 0 82 this would have returned a wrong value 0 0 in 785fb79 as coders are in the wrong order subsequently all values for pi s and kappa would have been wrong as they are computed with avgao t2 annotationtaskdata b 1 stat a 1 stat t2 avgao 1 0 the following of course also works t3 annotationtaskdata a 1 othr b 1 othr t3 avgao 1 0 represents an annotation task i e people assign labels to items notation tries to match notation in artstein and poesio 2007 in general coders and items can be represented as any hashable object integers for example are fine though strings are more readable labels must support the distance functions applied to them so e g a stringeditdistance makes no sense if your labels are integers whereas interval distance needs numeric values a notable case of this is the masi metric which requires python sets initialize an annotation task the data argument can be none to create an empty annotation task or a sequence of 3tuples each representing a coder s labeling of an item coder item label the distance argument is a function taking two arguments labels and producing a numerical distance the distance from a label to itself should be zero distancel l 0 load an sequence of annotation results appending to any data already loaded the argument is a sequence of 3tuples each representing a coder s labeling of an item coder item label agreement between two coders on a given item data data or self data cfedermann we don t know what combination of coderitem will come first in x to avoid stopiteration problems due to assuming an order ca cb we allow either for k1 and then look up the missing as k2 k1 nextx for x in data if xcoder in ca cb and xitem i if k1coder ca k2 nextx for x in data if xcoder cb and xitem i else k2 nextx for x in data if xcoder ca and xitem i ret 1 0 floatself distancek1labels k2labels log debugobserved agreement between s and s on s f ca cb i ret log debug distance between r and r f k1labels k2labels 1 0 ret return ret def nkself k return floatsum1 for x in self data if xlabels k def nikself i k return floatsum1 for x in self data if xitem i and xlabels k def nckself c k return floatsum1 for x in self data if xcoder c and xlabels k deprecateduse nk nik or nck instead def nself knone inone cnone observed agreement between two coders on all items data self groupeddata item x for x in self data if xcoder in ca cb ret sumself agrca cb item itemdata for item itemdata in data len self i log debugobserved agreement between s and s f ca cb ret return ret def pairwiseaverageself function total 0 n 0 s self c copy for ca in self c s removeca for cb in s total functionca cb n 1 ret total n return ret def avgaoself the observed disagreement for the weighted kappa coefficient total 0 0 data x for x in self data if xcoder in ca cb for i itemdata in self groupeddataitem data we should have two items distance doesn t care which comes first total self distancenextitemdatalabels nextitemdatalabels ret total lenself i maxdistance log debugobserved disagreement between s and s f ca cb ret return ret def dokwself maxdistance1 0 agreement coefficients bennett albert and goldstein 1954 ae 1 0 lenself k ret self avgao ae 1 0 ae return ret def piself total 0 0 labelfreqs freqdistxlabels for x in self data for k f in labelfreqs items total f2 ae total lenself i lenself c 2 return self avgao ae 1 ae def aekappaself ca cb ae 0 0 nitems floatlenself i labelfreqs conditionalfreqdistxlabels xcoder for x in self data for k in labelfreqs conditions ae labelfreqskca nitems labelfreqskcb nitems return ae def kappapairwiseself ca cb cohen 1960 averages naively over kappas for each coder pair davies and fleiss 1982 averages over observed and expected agreements for each coder pair krippendorff 1980 check for degenerate cases if lenself k 0 raise valueerrorcannot calculate alpha no data present if lenself k 1 log debugonly one annotation value alpha returning 1 return 1 if lenself c 1 and lenself i 1 raise valueerrorcannot calculate alpha only one coder and item present totaldisagreement 0 0 totalratings 0 allvalidlabelsfreq freqdist totaldo 0 0 total observed disagreement for all items for i itemdata in self groupeddataitem labelfreqs freqdistxlabels for x in itemdata labelscount sumlabelfreqs values if labelscount 2 ignore the item continue allvalidlabelsfreq labelfreqs totaldo self disagreementlabelfreqs labelscount do totaldo sumallvalidlabelsfreq values de self disagreementallvalidlabelsfreq expected disagreement kalpha 1 0 do de return kalpha def weightedkappapairwiseself ca cb maxdistance1 0 cohen 1968 return self pairwiseaverage lambda ca cb self weightedkappapairwiseca cb maxdistance if name main import optparse import re from nltk metrics import distance process commandline arguments parser optparse optionparser parser addoption d distance destdistance defaultbinarydistance helpdistance metric to use parser addoption a agreement destagreement defaultkappa helpagreement coefficient to calculate parser addoption e exclude destexclude actionappend default helpcoder names to exclude may be specified multiple times parser addoption i include destinclude actionappend default helpcoder names to include same format as exclude parser addoption f file destfile helpfile to read labelings from each line with three columns labeler item labels parser addoption v verbose destverbose default0 helphow much debugging to print on stderr 04 parser addoption c columnsep destcolumnsep defaultt helpcharstring that separates the three columns in the file defaults to tab parser addoption l labelsep destlabelsep default helpcharstring that separates labels if labelers can assign more than one defaults to comma parser addoption p presence destpresence defaultnone helpconvert each labeling into 1 or 0 based on presence of label parser addoption t thorough destthorough defaultfalse actionstoretrue helpcalculate agreement for every subset of the annotators options remainder parser parseargs if not options file parser printhelp exit logging basicconfiglevel50 10 intoptions verbose read in data from the specified file data with openoptions file as infile for l in infile toks l splitoptions columnsep coder object labels toks0 strtoks1 1 frozensettoks1 strip splitoptions labelsep if options include options exclude or lenoptions include 0 and coder in options include or lenoptions exclude 0 and coder not in options exclude data appendcoder object labels if options presence task annotationtask data getattrdistance options distanceoptions presence else task annotationtaskdata getattrdistance options distance if options thorough pass else printgetattrtask options agreement logging shutdown natural language toolkit agreement metrics c 2001 2023 nltk project tom lippincott tom cs columbia edu url https www nltk org for license information see license txt implementations of inter annotator agreement coefficients surveyed by artstein and poesio 2007 inter coder agreement for computational linguistics an agreement coefficient calculates the amount that annotators agreed on label assignments beyond what is expected by chance in defining the annotationtask class we use naming conventions similar to the paper s terminology there are three types of objects in an annotation task the coders variables c and c the items to be annotated variables i and i the potential categories to be assigned variables k and k additionally it is often the case that we don t want to treat two different labels as complete disagreement and so the annotationtask constructor can also take a distance metric as a final argument distance metrics are simply functions that take two arguments and return a value between 0 0 and 1 0 indicating the distance between them if not supplied the default is binary comparison between the arguments the simplest way to initialize an annotationtask is with a list of triples each containing a coder s assignment for one object in the task task annotationtask data c1 1 v1 c2 1 v1 note that the data list needs to contain the same number of triples for each individual coder containing category values for the same set of items alpha krippendorff 1980 kappa cohen 1960 s bennet albert and goldstein 1954 pi scott 1955 todo describe handling of multiple coders and missing data expected results from the artstein and poesio survey paper from nltk metrics agreement import annotationtask import os path t annotationtask data x split for x in open os path join os path dirname __file__ artstein_poesio_example txt t avg_ao 0 88 round t pi 5 0 79953 round t s 2 0 82 this would have returned a wrong value 0 0 in 785fb79 as coders are in the wrong order subsequently all values for pi s and kappa would have been wrong as they are computed with avg_ao t2 annotationtask data b 1 stat a 1 stat t2 avg_ao 1 0 the following of course also works t3 annotationtask data a 1 othr b 1 othr t3 avg_ao 1 0 represents an annotation task i e people assign labels to items notation tries to match notation in artstein and poesio 2007 in general coders and items can be represented as any hashable object integers for example are fine though strings are more readable labels must support the distance functions applied to them so e g a string edit distance makes no sense if your labels are integers whereas interval distance needs numeric values a notable case of this is the masi metric which requires python sets initialize an annotation task the data argument can be none to create an empty annotation task or a sequence of 3 tuples each representing a coder s labeling of an item coder item label the distance argument is a function taking two arguments labels and producing a numerical distance the distance from a label to itself should be zero distance l l 0 load an sequence of annotation results appending to any data already loaded the argument is a sequence of 3 tuples each representing a coder s labeling of an item coder item label agreement between two coders on a given item cfedermann we don t know what combination of coder item will come first in x to avoid stopiteration problems due to assuming an order ca cb we allow either for k1 and then look up the missing as k2 implements the n notation used in artstein and poesio 2007 observed agreement between two coders on all items calculates the average of function results for each coder pair average observed agreement across all coders and items the observed disagreement for the weighted kappa coefficient we should have two items distance doesn t care which comes first averaged over all labelers agreement coefficients bennett albert and goldstein 1954 scott 1955 here multi pi equivalent to k from siegel and castellan 1988 cohen 1960 averages naively over kappas for each coder pair davies and fleiss 1982 averages over observed and expected agreements for each coder pair krippendorff 1980 check for degenerate cases total observed disagreement for all items ignore the item expected disagreement cohen 1968 cohen 1968 process command line arguments read in data from the specified file
import logging from itertools import groupby from operator import itemgetter from nltk.internals import deprecated from nltk.metrics.distance import binary_distance from nltk.probability import ConditionalFreqDist, FreqDist log = logging.getLogger(__name__) class AnnotationTask: def __init__(self, data=None, distance=binary_distance): self.distance = distance self.I = set() self.K = set() self.C = set() self.data = [] if data is not None: self.load_array(data) def __str__(self): return "\r\n".join( map( lambda x: "%s\t%s\t%s" % (x["coder"], x["item"].replace("_", "\t"), ",".join(x["labels"])), self.data, ) ) def load_array(self, array): for coder, item, labels in array: self.C.add(coder) self.K.add(labels) self.I.add(item) self.data.append({"coder": coder, "labels": labels, "item": item}) def agr(self, cA, cB, i, data=None): data = data or self.data k1 = next(x for x in data if x["coder"] in (cA, cB) and x["item"] == i) if k1["coder"] == cA: k2 = next(x for x in data if x["coder"] == cB and x["item"] == i) else: k2 = next(x for x in data if x["coder"] == cA and x["item"] == i) ret = 1.0 - float(self.distance(k1["labels"], k2["labels"])) log.debug("Observed agreement between %s and %s on %s: %f", cA, cB, i, ret) log.debug( 'Distance between "%r" and "%r": %f', k1["labels"], k2["labels"], 1.0 - ret ) return ret def Nk(self, k): return float(sum(1 for x in self.data if x["labels"] == k)) def Nik(self, i, k): return float(sum(1 for x in self.data if x["item"] == i and x["labels"] == k)) def Nck(self, c, k): return float(sum(1 for x in self.data if x["coder"] == c and x["labels"] == k)) @deprecated("Use Nk, Nik or Nck instead") def N(self, k=None, i=None, c=None): if k is not None and i is None and c is None: ret = self.Nk(k) elif k is not None and i is not None and c is None: ret = self.Nik(i, k) elif k is not None and c is not None and i is None: ret = self.Nck(c, k) else: raise ValueError( f"You must pass either i or c, not both! (k={k!r},i={i!r},c={c!r})" ) log.debug("Count on N[%s,%s,%s]: %d", k, i, c, ret) return ret def _grouped_data(self, field, data=None): data = data or self.data return groupby(sorted(data, key=itemgetter(field)), itemgetter(field)) def Ao(self, cA, cB): data = self._grouped_data( "item", (x for x in self.data if x["coder"] in (cA, cB)) ) ret = sum(self.agr(cA, cB, item, item_data) for item, item_data in data) / len( self.I ) log.debug("Observed agreement between %s and %s: %f", cA, cB, ret) return ret def _pairwise_average(self, function): total = 0 n = 0 s = self.C.copy() for cA in self.C: s.remove(cA) for cB in s: total += function(cA, cB) n += 1 ret = total / n return ret def avg_Ao(self): ret = self._pairwise_average(self.Ao) log.debug("Average observed agreement: %f", ret) return ret def Do_Kw_pairwise(self, cA, cB, max_distance=1.0): total = 0.0 data = (x for x in self.data if x["coder"] in (cA, cB)) for i, itemdata in self._grouped_data("item", data): total += self.distance(next(itemdata)["labels"], next(itemdata)["labels"]) ret = total / (len(self.I) * max_distance) log.debug("Observed disagreement between %s and %s: %f", cA, cB, ret) return ret def Do_Kw(self, max_distance=1.0): ret = self._pairwise_average( lambda cA, cB: self.Do_Kw_pairwise(cA, cB, max_distance) ) log.debug("Observed disagreement: %f", ret) return ret def S(self): Ae = 1.0 / len(self.K) ret = (self.avg_Ao() - Ae) / (1.0 - Ae) return ret def pi(self): total = 0.0 label_freqs = FreqDist(x["labels"] for x in self.data) for k, f in label_freqs.items(): total += f**2 Ae = total / ((len(self.I) * len(self.C)) ** 2) return (self.avg_Ao() - Ae) / (1 - Ae) def Ae_kappa(self, cA, cB): Ae = 0.0 nitems = float(len(self.I)) label_freqs = ConditionalFreqDist((x["labels"], x["coder"]) for x in self.data) for k in label_freqs.conditions(): Ae += (label_freqs[k][cA] / nitems) * (label_freqs[k][cB] / nitems) return Ae def kappa_pairwise(self, cA, cB): Ae = self.Ae_kappa(cA, cB) ret = (self.Ao(cA, cB) - Ae) / (1.0 - Ae) log.debug("Expected agreement between %s and %s: %f", cA, cB, Ae) return ret def kappa(self): return self._pairwise_average(self.kappa_pairwise) def multi_kappa(self): Ae = self._pairwise_average(self.Ae_kappa) return (self.avg_Ao() - Ae) / (1.0 - Ae) def Disagreement(self, label_freqs): total_labels = sum(label_freqs.values()) pairs = 0.0 for j, nj in label_freqs.items(): for l, nl in label_freqs.items(): pairs += float(nj * nl) * self.distance(l, j) return 1.0 * pairs / (total_labels * (total_labels - 1)) def alpha(self): if len(self.K) == 0: raise ValueError("Cannot calculate alpha, no data present!") if len(self.K) == 1: log.debug("Only one annotation value, alpha returning 1.") return 1 if len(self.C) == 1 and len(self.I) == 1: raise ValueError("Cannot calculate alpha, only one coder and item present!") total_disagreement = 0.0 total_ratings = 0 all_valid_labels_freq = FreqDist([]) total_do = 0.0 for i, itemdata in self._grouped_data("item"): label_freqs = FreqDist(x["labels"] for x in itemdata) labels_count = sum(label_freqs.values()) if labels_count < 2: continue all_valid_labels_freq += label_freqs total_do += self.Disagreement(label_freqs) * labels_count do = total_do / sum(all_valid_labels_freq.values()) de = self.Disagreement(all_valid_labels_freq) k_alpha = 1.0 - do / de return k_alpha def weighted_kappa_pairwise(self, cA, cB, max_distance=1.0): total = 0.0 label_freqs = ConditionalFreqDist( (x["coder"], x["labels"]) for x in self.data if x["coder"] in (cA, cB) ) for j in self.K: for l in self.K: total += label_freqs[cA][j] * label_freqs[cB][l] * self.distance(j, l) De = total / (max_distance * pow(len(self.I), 2)) log.debug("Expected disagreement between %s and %s: %f", cA, cB, De) Do = self.Do_Kw_pairwise(cA, cB) ret = 1.0 - (Do / De) return ret def weighted_kappa(self, max_distance=1.0): return self._pairwise_average( lambda cA, cB: self.weighted_kappa_pairwise(cA, cB, max_distance) ) if __name__ == "__main__": import optparse import re from nltk.metrics import distance parser = optparse.OptionParser() parser.add_option( "-d", "--distance", dest="distance", default="binary_distance", help="distance metric to use", ) parser.add_option( "-a", "--agreement", dest="agreement", default="kappa", help="agreement coefficient to calculate", ) parser.add_option( "-e", "--exclude", dest="exclude", action="append", default=[], help="coder names to exclude (may be specified multiple times)", ) parser.add_option( "-i", "--include", dest="include", action="append", default=[], help="coder names to include, same format as exclude", ) parser.add_option( "-f", "--file", dest="file", help="file to read labelings from, each line with three columns: 'labeler item labels'", ) parser.add_option( "-v", "--verbose", dest="verbose", default="0", help="how much debugging to print on stderr (0-4)", ) parser.add_option( "-c", "--columnsep", dest="columnsep", default="\t", help="char/string that separates the three columns in the file, defaults to tab", ) parser.add_option( "-l", "--labelsep", dest="labelsep", default=",", help="char/string that separates labels (if labelers can assign more than one), defaults to comma", ) parser.add_option( "-p", "--presence", dest="presence", default=None, help="convert each labeling into 1 or 0, based on presence of LABEL", ) parser.add_option( "-T", "--thorough", dest="thorough", default=False, action="store_true", help="calculate agreement for every subset of the annotators", ) (options, remainder) = parser.parse_args() if not options.file: parser.print_help() exit() logging.basicConfig(level=50 - 10 * int(options.verbose)) data = [] with open(options.file) as infile: for l in infile: toks = l.split(options.columnsep) coder, object_, labels = ( toks[0], str(toks[1:-1]), frozenset(toks[-1].strip().split(options.labelsep)), ) if ( (options.include == options.exclude) or (len(options.include) > 0 and coder in options.include) or (len(options.exclude) > 0 and coder not in options.exclude) ): data.append((coder, object_, labels)) if options.presence: task = AnnotationTask( data, getattr(distance, options.distance)(options.presence) ) else: task = AnnotationTask(data, getattr(distance, options.distance)) if options.thorough: pass else: print(getattr(task, options.agreement)()) logging.shutdown()
natural language toolkit ngram association measures c 20012023 nltk project joel nothman jnothmanstudent usyd edu au url https www nltk org for license information see license txt provides scoring functions for a number of association measures through a generic abstract implementation in ngramassocmeasures and nspecific bigramassocmeasures and trigramassocmeasures indices to marginals arguments marginals index for the ngram count unigrams 2 marginals index for the number of words in the data class ngramassocmeasuresmetaclassabcmeta n 0 staticmethod abstractmethod def contingencymarginals calculates values of contingency table marginals from its values raise notimplementederror the contingency table is not available in the general ngram case classmethod def expectedvaluescls cont for each contingency table cell yield the expected value scores ngrams by their frequency return marginalsngram marginalstotal classmethod def studenttcls marginals return marginalsngram productmarginalsunigrams marginalstotal cls n 1 marginalsngram small 0 5 classmethod def chisqcls marginals cont cls contingencymarginals exps cls expectedvaluescont return sumobs exp 2 exp small for obs exp in zipcont exps staticmethod def milikemarginals kwargs return marginalsngram kwargs getpower 3 product marginalsunigrams classmethod def pmicls marginals return log2marginalsngram marginalstotal cls n 1 log2 productmarginalsunigrams classmethod def likelihoodratiocls marginals scores ngrams using the poissonstirling measure exp productmarginalsunigrams marginalstotal cls n 1 return marginalsngram log2marginalsngram exp 1 classmethod def jaccardcls marginals a collection of bigram association measures each association measure is provided as a function with three arguments bigramscorefnnii nix nxi nxx the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example nii counts w1 w2 i e the bigram being scored nix counts w1 nxi counts w2 nxx counts i e any bigram this may be shown with respect to a contingency table w1 w1 w2 nii noi nxi w2 nio noo nix total nxx calculates values of a bigram contingency table from marginal values nix nxi nixxituple noi nxi nii nio nix nii return nii noi nio nxx nii noi nio staticmethod def marginalsnii noi nio noo calculates expected values for a contingency table nxx sumcont for each contingency table cell for i in range4 yield conti conti 1 conti conti 2 nxx classmethod def phisqcls marginals nii nio noi noo cls contingencymarginals return nii noo nio noi 2 nii nio nii noi nio noo noi noo classmethod def chisqcls nii nixxituple nxx nix nxi nixxituple return nxx cls phisqnii nix nxi nxx classmethod def fishercls marginals nii nio noi noo cls contingencymarginals odds pvalue fisherexactnii nio noi noo alternativeless return pvalue staticmethod def dicenii nixxituple nxx a collection of trigram association measures each association measure is provided as a function with four arguments trigramscorefnniii niix nixi nxii nixx nxix nxxi nxxx the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example niii counts w1 w2 w3 i e the trigram being scored nixx counts w1 nxxx counts i e any trigram calculates values of a trigram contingency table or cube from marginal values trigramassocmeasures contingency1 1 1 1 1 73 1 2000 1 0 0 0 0 72 0 1927 calculates values of contingency table marginals from its values trigramassocmeasures marginals1 0 0 0 0 72 0 1927 1 1 1 1 1 73 1 2000 a collection of quadgram association measures each association measure is provided as a function with five arguments trigramscorefnniiii niiix niixi nixii nxiii niixx nixix nixxi nxixi nxxii nxiix nixxx nxixx nxxix nxxxi nall the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example niiii counts w1 w2 w3 w4 i e the quadgram being scored nixxi counts w1 w4 nxxxx counts i e any quadgram calculates values of a quadgram contingency table from marginal values calculates values of contingency table marginals from its values quadgramassocmeasures marginals1 0 2 46 552 825 2577 34967 1 0 2 48 7250 9031 28585 356653 1 2 553 3 1 7804 6 3132 1378 49 2 38970 17660 100 38970 440540 wraps ngramassocmeasures classes such that the arguments of association measures are contingency table values rather than marginals constructs a contingencymeasures given a ngramassocmeasures class self class name contingency measures class name for k in dirmeasures if k startswith continue v getattrmeasures k if not k startswith v self makecontingencyfnmeasures v setattrself k v staticmethod def makecontingencyfnmeasures oldfn def rescontingency return oldfnmeasures marginalscontingency res doc oldfn doc res name oldfn name return res natural language toolkit ngram association measures c 2001 2023 nltk project joel nothman jnothman student usyd edu au url https www nltk org for license information see license txt provides scoring functions for a number of association measures through a generic abstract implementation in ngramassocmeasures and n specific bigramassocmeasures and trigramassocmeasures indices to marginals arguments marginals index for the ngram count marginals index for a tuple of each unigram count marginals index for the number of words in the data an abstract class defining a collection of generic association measures each public method returns a score taking the following arguments score_fn count_of_ngram count_of_n 1gram_1 count_of_n 1gram_j count_of_n 2gram_1 count_of_n 2gram_k count_of_1gram_1 count_of_1gram_n count_of_total_words see bigramassocmeasures and trigramassocmeasures inheriting classes should define a property _n and a method _contingency which calculates contingency values from marginals in order for all association measures defined here to be usable calculates values of a contingency table from marginal values calculates values of contingency table marginals from its values calculates expected values for a contingency table for each contingency table cell yield the expected value scores ngrams by their frequency scores ngrams using student s t test with independence hypothesis for unigrams as in manning and schutze 5 3 1 scores ngrams using pearson s chi square as in manning and schutze 5 3 3 scores ngrams using a variant of mutual information the keyword argument power sets an exponent default 3 for the numerator no logarithm of the result is calculated scores ngrams by pointwise mutual information as in manning and schutze 5 4 scores ngrams using likelihood ratios as in manning and schutze 5 3 4 scores ngrams using the poisson stirling measure scores ngrams using the jaccard index a collection of bigram association measures each association measure is provided as a function with three arguments bigram_score_fn n_ii n_ix n_xi n_xx the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example n_ii counts w1 w2 i e the bigram being scored n_ix counts w1 n_xi counts w2 n_xx counts i e any bigram this may be shown with respect to a contingency table w1 w1 w2 n_ii n_oi n_xi w2 n_io n_oo n_ix total n_xx calculates values of a bigram contingency table from marginal values calculates values of contingency table marginals from its values calculates expected values for a contingency table for each contingency table cell scores bigrams using phi square the square of the pearson correlation coefficient scores bigrams using chi square i e phi sq multiplied by the number of bigrams as in manning and schutze 5 3 3 scores bigrams using fisher s exact test pedersen 1996 less sensitive to small counts than pmi or chi sq but also more expensive to compute requires scipy scores bigrams using dice s coefficient a collection of trigram association measures each association measure is provided as a function with four arguments trigram_score_fn n_iii n_iix n_ixi n_xii n_ixx n_xix n_xxi n_xxx the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example n_iii counts w1 w2 w3 i e the trigram being scored n_ixx counts w1 n_xxx counts i e any trigram calculates values of a trigram contingency table or cube from marginal values trigramassocmeasures _contingency 1 1 1 1 1 73 1 2000 1 0 0 0 0 72 0 1927 calculates values of contingency table marginals from its values trigramassocmeasures _marginals 1 0 0 0 0 72 0 1927 1 1 1 1 1 73 1 2000 a collection of quadgram association measures each association measure is provided as a function with five arguments trigram_score_fn n_iiii n_iiix n_iixi n_ixii n_xiii n_iixx n_ixix n_ixxi n_xixi n_xxii n_xiix n_ixxx n_xixx n_xxix n_xxxi n_all the arguments constitute the marginals of a contingency table counting the occurrences of particular events in a corpus the letter i in the suffix refers to the appearance of the word in question while x indicates the appearance of any word thus for example n_iiii counts w1 w2 w3 w4 i e the quadgram being scored n_ixxi counts w1 w4 n_xxxx counts i e any quadgram calculates values of a quadgram contingency table from marginal values calculates values of contingency table marginals from its values quadgramassocmeasures _marginals 1 0 2 46 552 825 2577 34967 1 0 2 48 7250 9031 28585 356653 1 2 553 3 1 7804 6 3132 1378 49 2 38970 17660 100 38970 440540 wraps ngramassocmeasures classes such that the arguments of association measures are contingency table values rather than marginals constructs a contingencymeasures given a ngramassocmeasures class from an association measure function produces a new function which accepts contingency table values as its arguments
import math as _math from abc import ABCMeta, abstractmethod from functools import reduce _log2 = lambda x: _math.log2(x) _ln = _math.log _product = lambda s: reduce(lambda x, y: x * y, s) _SMALL = 1e-20 try: from scipy.stats import fisher_exact except ImportError: def fisher_exact(*_args, **_kwargs): raise NotImplementedError NGRAM = 0 UNIGRAMS = -2 TOTAL = -1 class NgramAssocMeasures(metaclass=ABCMeta): _n = 0 @staticmethod @abstractmethod def _contingency(*marginals): raise NotImplementedError( "The contingency table is not available" "in the general ngram case" ) @staticmethod @abstractmethod def _marginals(*contingency): raise NotImplementedError( "The contingency table is not available" "in the general ngram case" ) @classmethod def _expected_values(cls, cont): n_all = sum(cont) bits = [1 << i for i in range(cls._n)] for i in range(len(cont)): yield ( _product( sum(cont[x] for x in range(2**cls._n) if (x & j) == (i & j)) for j in bits ) / (n_all ** (cls._n - 1)) ) @staticmethod def raw_freq(*marginals): return marginals[NGRAM] / marginals[TOTAL] @classmethod def student_t(cls, *marginals): return ( marginals[NGRAM] - _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) ) / (marginals[NGRAM] + _SMALL) ** 0.5 @classmethod def chi_sq(cls, *marginals): cont = cls._contingency(*marginals) exps = cls._expected_values(cont) return sum((obs - exp) ** 2 / (exp + _SMALL) for obs, exp in zip(cont, exps)) @staticmethod def mi_like(*marginals, **kwargs): return marginals[NGRAM] ** kwargs.get("power", 3) / _product( marginals[UNIGRAMS] ) @classmethod def pmi(cls, *marginals): return _log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) - _log2( _product(marginals[UNIGRAMS]) ) @classmethod def likelihood_ratio(cls, *marginals): cont = cls._contingency(*marginals) return 2 * sum( obs * _ln(obs / (exp + _SMALL) + _SMALL) for obs, exp in zip(cont, cls._expected_values(cont)) ) @classmethod def poisson_stirling(cls, *marginals): exp = _product(marginals[UNIGRAMS]) / (marginals[TOTAL] ** (cls._n - 1)) return marginals[NGRAM] * (_log2(marginals[NGRAM] / exp) - 1) @classmethod def jaccard(cls, *marginals): cont = cls._contingency(*marginals) return cont[0] / sum(cont[:-1]) class BigramAssocMeasures(NgramAssocMeasures): _n = 2 @staticmethod def _contingency(n_ii, n_ix_xi_tuple, n_xx): (n_ix, n_xi) = n_ix_xi_tuple n_oi = n_xi - n_ii n_io = n_ix - n_ii return (n_ii, n_oi, n_io, n_xx - n_ii - n_oi - n_io) @staticmethod def _marginals(n_ii, n_oi, n_io, n_oo): return (n_ii, (n_oi + n_ii, n_io + n_ii), n_oo + n_oi + n_io + n_ii) @staticmethod def _expected_values(cont): n_xx = sum(cont) for i in range(4): yield (cont[i] + cont[i ^ 1]) * (cont[i] + cont[i ^ 2]) / n_xx @classmethod def phi_sq(cls, *marginals): n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) return (n_ii * n_oo - n_io * n_oi) ** 2 / ( (n_ii + n_io) * (n_ii + n_oi) * (n_io + n_oo) * (n_oi + n_oo) ) @classmethod def chi_sq(cls, n_ii, n_ix_xi_tuple, n_xx): (n_ix, n_xi) = n_ix_xi_tuple return n_xx * cls.phi_sq(n_ii, (n_ix, n_xi), n_xx) @classmethod def fisher(cls, *marginals): n_ii, n_io, n_oi, n_oo = cls._contingency(*marginals) (odds, pvalue) = fisher_exact([[n_ii, n_io], [n_oi, n_oo]], alternative="less") return pvalue @staticmethod def dice(n_ii, n_ix_xi_tuple, n_xx): (n_ix, n_xi) = n_ix_xi_tuple return 2 * n_ii / (n_ix + n_xi) class TrigramAssocMeasures(NgramAssocMeasures): _n = 3 @staticmethod def _contingency(n_iii, n_iix_tuple, n_ixx_tuple, n_xxx): (n_iix, n_ixi, n_xii) = n_iix_tuple (n_ixx, n_xix, n_xxi) = n_ixx_tuple n_oii = n_xii - n_iii n_ioi = n_ixi - n_iii n_iio = n_iix - n_iii n_ooi = n_xxi - n_iii - n_oii - n_ioi n_oio = n_xix - n_iii - n_oii - n_iio n_ioo = n_ixx - n_iii - n_ioi - n_iio n_ooo = n_xxx - n_iii - n_oii - n_ioi - n_iio - n_ooi - n_oio - n_ioo return (n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo) @staticmethod def _marginals(*contingency): n_iii, n_oii, n_ioi, n_ooi, n_iio, n_oio, n_ioo, n_ooo = contingency return ( n_iii, (n_iii + n_iio, n_iii + n_ioi, n_iii + n_oii), ( n_iii + n_ioi + n_iio + n_ioo, n_iii + n_oii + n_iio + n_oio, n_iii + n_oii + n_ioi + n_ooi, ), sum(contingency), ) class QuadgramAssocMeasures(NgramAssocMeasures): _n = 4 @staticmethod def _contingency(n_iiii, n_iiix_tuple, n_iixx_tuple, n_ixxx_tuple, n_xxxx): (n_iiix, n_iixi, n_ixii, n_xiii) = n_iiix_tuple (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix) = n_iixx_tuple (n_ixxx, n_xixx, n_xxix, n_xxxi) = n_ixxx_tuple n_oiii = n_xiii - n_iiii n_ioii = n_ixii - n_iiii n_iioi = n_iixi - n_iiii n_ooii = n_xxii - n_iiii - n_oiii - n_ioii n_oioi = n_xixi - n_iiii - n_oiii - n_iioi n_iooi = n_ixxi - n_iiii - n_ioii - n_iioi n_oooi = n_xxxi - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_iooi - n_oioi n_iiio = n_iiix - n_iiii n_oiio = n_xiix - n_iiii - n_oiii - n_iiio n_ioio = n_ixix - n_iiii - n_ioii - n_iiio n_ooio = n_xxix - n_iiii - n_oiii - n_ioii - n_iiio - n_ooii - n_ioio - n_oiio n_iioo = n_iixx - n_iiii - n_iioi - n_iiio n_oioo = n_xixx - n_iiii - n_oiii - n_iioi - n_iiio - n_oioi - n_oiio - n_iioo n_iooo = n_ixxx - n_iiii - n_ioii - n_iioi - n_iiio - n_iooi - n_iioo - n_ioio n_oooo = ( n_xxxx - n_iiii - n_oiii - n_ioii - n_iioi - n_ooii - n_oioi - n_iooi - n_oooi - n_iiio - n_oiio - n_ioio - n_ooio - n_iioo - n_oioo - n_iooo ) return ( n_iiii, n_oiii, n_ioii, n_ooii, n_iioi, n_oioi, n_iooi, n_oooi, n_iiio, n_oiio, n_ioio, n_ooio, n_iioo, n_oioo, n_iooo, n_oooo, ) @staticmethod def _marginals(*contingency): ( n_iiii, n_oiii, n_ioii, n_ooii, n_iioi, n_oioi, n_iooi, n_oooi, n_iiio, n_oiio, n_ioio, n_ooio, n_iioo, n_oioo, n_iooo, n_oooo, ) = contingency n_iiix = n_iiii + n_iiio n_iixi = n_iiii + n_iioi n_ixii = n_iiii + n_ioii n_xiii = n_iiii + n_oiii n_iixx = n_iiii + n_iioi + n_iiio + n_iioo n_ixix = n_iiii + n_ioii + n_iiio + n_ioio n_ixxi = n_iiii + n_ioii + n_iioi + n_iooi n_xixi = n_iiii + n_oiii + n_iioi + n_oioi n_xxii = n_iiii + n_oiii + n_ioii + n_ooii n_xiix = n_iiii + n_oiii + n_iiio + n_oiio n_ixxx = n_iiii + n_ioii + n_iioi + n_iiio + n_iooi + n_iioo + n_ioio + n_iooo n_xixx = n_iiii + n_oiii + n_iioi + n_iiio + n_oioi + n_oiio + n_iioo + n_oioo n_xxix = n_iiii + n_oiii + n_ioii + n_iiio + n_ooii + n_ioio + n_oiio + n_ooio n_xxxi = n_iiii + n_oiii + n_ioii + n_iioi + n_ooii + n_iooi + n_oioi + n_oooi n_all = sum(contingency) return ( n_iiii, (n_iiix, n_iixi, n_ixii, n_xiii), (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), (n_ixxx, n_xixx, n_xxix, n_xxxi), n_all, ) class ContingencyMeasures: def __init__(self, measures): self.__class__.__name__ = "Contingency" + measures.__class__.__name__ for k in dir(measures): if k.startswith("__"): continue v = getattr(measures, k) if not k.startswith("_"): v = self._make_contingency_fn(measures, v) setattr(self, k, v) @staticmethod def _make_contingency_fn(measures, old_fn): def res(*contingency): return old_fn(*measures._marginals(*contingency)) res.__doc__ = old_fn.__doc__ res.__name__ = old_fn.__name__ return res
natural language toolkit confusion matrices c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com tom aarsen url https www nltk org for license information see license txt the confusion matrix between a list of reference values and a corresponding list of test values entry r t of this matrix is a count of the number of times that the reference value r corresponds to the test value t e g from nltk metrics import confusionmatrix ref det nn vb det jj nn nn in det nn split test det vb vb det nn nn nn in det nn split cm confusionmatrixref test printcm nn nn 3 note that the diagonal entries ritj of this matrix corresponds to correct values and the offdiagonal entries correspond to incorrect values construct a new confusion matrix from a list of reference values and a corresponding list of test values type reference list param reference an ordered list of reference values type test list param test a list of values to compare against the corresponding reference values raise valueerror if reference and length do not have the same length get a list of all values construct a valueindex dictionary make a confusion matrix table a list of all values in reference or test a dictionary mapping values in self values to their indices the confusion matrix itself as a list of lists of counts the greatest count in self confusion used for printing the total number of values in the confusion matrix the number of correct ondiagonal values in the matrix return the number of times that value li was expected and value lj was given rtype int return a multiline string representation of this confusion matrix type truncate int param truncate if specified then only show the specified number of values any sorting e g sortbycount will be performed before truncation param sortbycount if true then sort by the count of each label in the reference data i e labels that occur more frequently in the reference label will be towards the left edge of the matrix and labels that occur less frequently will be towards the right edge todo add marginals construct a format string for row values construct a format string for matrix entries write the column values write a dividing line write the entries write a dividing line write a key given a value in the confusion matrix return the recall that corresponds to this value the recall is defined as r true positive true positive false positive and can loosely be considered the ratio of how often value was predicted correctly relative to how often value was the true result param value value used in the confusionmatrix return the recall corresponding to value rtype float number of times value was correct and also predicted number of times value was correct given a value in the confusion matrix return the precision that corresponds to this value the precision is defined as p true positive true positive false negative and can loosely be considered the ratio of how often value was predicted correctly relative to the number of predictions for value param value value used in the confusionmatrix return the precision corresponding to value rtype float number of times value was correct and also predicted number of times value was predicted given a value used in the confusion matrix return the fmeasure that corresponds to this value the fmeasure is the harmonic mean of the precision and recall weighted by alpha in particular given the precision p and recall r defined by p true positive true positive false negative r true positive true positive false positive the fmeasure is 1alphap 1alphar with alpha 0 5 this reduces to 2pr p r param value value used in the confusionmatrix param alpha ratio of the cost of false negative compared to false positives defaults to 0 5 where the costs are equal type alpha float return the fmeasure corresponding to value rtype float tabulate the recall precision and fmeasure for each value in this confusion matrix reference det nn vb det jj nn nn in det nn split test det vb vb det nn nn nn in det nn split cm confusionmatrixreference test printcm evaluate tag prec recall fmeasure det 1 0000 1 0000 1 0000 in 1 0000 1 0000 1 0000 jj 0 0000 0 0000 0 0000 nn 0 7500 0 7500 0 7500 vb 0 5000 1 0000 0 6667 blankline param alpha ratio of the cost of false negative compared to false positives as used in the fmeasure computation defaults to 0 5 where the costs are equal type alpha float param truncate if specified then only show the specified number of values any sorting e g sortbycount will be performed before truncation defaults to none type truncate int optional param sortbycount whether to sort the outputs on frequency in the reference label defaults to false type sortbycount bool optional return a tabulated recall precision and fmeasure string rtype str apply keyword parameters construct the header construct the body natural language toolkit confusion matrices c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com tom aarsen url https www nltk org for license information see license txt the confusion matrix between a list of reference values and a corresponding list of test values entry r t of this matrix is a count of the number of times that the reference value r corresponds to the test value t e g from nltk metrics import confusionmatrix ref det nn vb det jj nn nn in det nn split test det vb vb det nn nn nn in det nn split cm confusionmatrix ref test print cm nn nn 3 note that the diagonal entries ri tj of this matrix corresponds to correct values and the off diagonal entries correspond to incorrect values construct a new confusion matrix from a list of reference values and a corresponding list of test values type reference list param reference an ordered list of reference values type test list param test a list of values to compare against the corresponding reference values raise valueerror if reference and length do not have the same length get a list of all values construct a value index dictionary make a confusion matrix table maximum confusion a list of all values in reference or test a dictionary mapping values in self _values to their indices the confusion matrix itself as a list of lists of counts the greatest count in self _confusion used for printing the total number of values in the confusion matrix the number of correct on diagonal values in the matrix return the number of times that value li was expected and value lj was given rtype int return a multi line string representation of this confusion matrix type truncate int param truncate if specified then only show the specified number of values any sorting e g sort_by_count will be performed before truncation param sort_by_count if true then sort by the count of each label in the reference data i e labels that occur more frequently in the reference label will be towards the left edge of the matrix and labels that occur less frequently will be towards the right edge todo add marginals construct a format string for row values construct a format string for matrix entries write the column values write a dividing line write the entries write a dividing line write a key given a value in the confusion matrix return the recall that corresponds to this value the recall is defined as r true positive true positive false positive and can loosely be considered the ratio of how often value was predicted correctly relative to how often value was the true result param value value used in the confusionmatrix return the recall corresponding to value rtype float number of times value was correct and also predicted number of times value was correct given a value in the confusion matrix return the precision that corresponds to this value the precision is defined as p true positive true positive false negative and can loosely be considered the ratio of how often value was predicted correctly relative to the number of predictions for value param value value used in the confusionmatrix return the precision corresponding to value rtype float number of times value was correct and also predicted number of times value was predicted given a value used in the confusion matrix return the f measure that corresponds to this value the f measure is the harmonic mean of the precision and recall weighted by alpha in particular given the precision p and recall r defined by p true positive true positive false negative r true positive true positive false positive the f measure is 1 alpha p 1 alpha r with alpha 0 5 this reduces to 2pr p r param value value used in the confusionmatrix param alpha ratio of the cost of false negative compared to false positives defaults to 0 5 where the costs are equal type alpha float return the f measure corresponding to value rtype float tabulate the recall precision and f measure for each value in this confusion matrix reference det nn vb det jj nn nn in det nn split test det vb vb det nn nn nn in det nn split cm confusionmatrix reference test print cm evaluate tag prec recall f measure det 1 0000 1 0000 1 0000 in 1 0000 1 0000 1 0000 jj 0 0000 0 0000 0 0000 nn 0 7500 0 7500 0 7500 vb 0 5000 1 0000 0 6667 blankline param alpha ratio of the cost of false negative compared to false positives as used in the f measure computation defaults to 0 5 where the costs are equal type alpha float param truncate if specified then only show the specified number of values any sorting e g sort_by_count will be performed before truncation defaults to none type truncate int optional param sort_by_count whether to sort the outputs on frequency in the reference label defaults to false type sort_by_count bool optional return a tabulated recall precision and f measure string rtype str apply keyword parameters construct the header construct the body
from nltk.probability import FreqDist class ConfusionMatrix: def __init__(self, reference, test, sort_by_count=False): if len(reference) != len(test): raise ValueError("Lists must have the same length.") if sort_by_count: ref_fdist = FreqDist(reference) test_fdist = FreqDist(test) def key(v): return -(ref_fdist[v] + test_fdist[v]) values = sorted(set(reference + test), key=key) else: values = sorted(set(reference + test)) indices = {val: i for (i, val) in enumerate(values)} confusion = [[0 for _ in values] for _ in values] max_conf = 0 for w, g in zip(reference, test): confusion[indices[w]][indices[g]] += 1 max_conf = max(max_conf, confusion[indices[w]][indices[g]]) self._values = values self._indices = indices self._confusion = confusion self._max_conf = max_conf self._total = len(reference) self._correct = sum(confusion[i][i] for i in range(len(values))) def __getitem__(self, li_lj_tuple): (li, lj) = li_lj_tuple i = self._indices[li] j = self._indices[lj] return self._confusion[i][j] def __repr__(self): return f"<ConfusionMatrix: {self._correct}/{self._total} correct>" def __str__(self): return self.pretty_format() def pretty_format( self, show_percents=False, values_in_chart=True, truncate=None, sort_by_count=False, ): confusion = self._confusion values = self._values if sort_by_count: values = sorted( values, key=lambda v: -sum(self._confusion[self._indices[v]]) ) if truncate: values = values[:truncate] if values_in_chart: value_strings = ["%s" % val for val in values] else: value_strings = [str(n + 1) for n in range(len(values))] valuelen = max(len(val) for val in value_strings) value_format = "%" + repr(valuelen) + "s | " if show_percents: entrylen = 6 entry_format = "%5.1f%%" zerostr = " ." else: entrylen = len(repr(self._max_conf)) entry_format = "%" + repr(entrylen) + "d" zerostr = " " * (entrylen - 1) + "." s = "" for i in range(valuelen): s += (" " * valuelen) + " |" for val in value_strings: if i >= valuelen - len(val): s += val[i - valuelen + len(val)].rjust(entrylen + 1) else: s += " " * (entrylen + 1) s += " |\n" s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) for val, li in zip(value_strings, values): i = self._indices[li] s += value_format % val for lj in values: j = self._indices[lj] if confusion[i][j] == 0: s += zerostr elif show_percents: s += entry_format % (100.0 * confusion[i][j] / self._total) else: s += entry_format % confusion[i][j] if i == j: prevspace = s.rfind(" ") s = s[:prevspace] + "<" + s[prevspace + 1 :] + ">" else: s += " " s += "|\n" s += "{}-+-{}+\n".format("-" * valuelen, "-" * ((entrylen + 1) * len(values))) s += "(row = reference; col = test)\n" if not values_in_chart: s += "Value key:\n" for i, value in enumerate(values): s += "%6d: %s\n" % (i + 1, value) return s def key(self): values = self._values str = "Value key:\n" indexlen = len(repr(len(values) - 1)) key_format = " %" + repr(indexlen) + "d: %s\n" for i in range(len(values)): str += key_format % (i, values[i]) return str def recall(self, value): TP = self[value, value] TP_FN = sum(self[value, pred_value] for pred_value in self._values) if TP_FN == 0: return 0.0 return TP / TP_FN def precision(self, value): TP = self[value, value] TP_FP = sum(self[real_value, value] for real_value in self._values) if TP_FP == 0: return 0.0 return TP / TP_FP def f_measure(self, value, alpha=0.5): p = self.precision(value) r = self.recall(value) if p == 0.0 or r == 0.0: return 0.0 return 1.0 / (alpha / p + (1 - alpha) / r) def evaluate(self, alpha=0.5, truncate=None, sort_by_count=False): tags = self._values if sort_by_count: tags = sorted(tags, key=lambda v: -sum(self._confusion[self._indices[v]])) if truncate: tags = tags[:truncate] tag_column_len = max(max(len(tag) for tag in tags), 3) s = ( f"{' ' * (tag_column_len - 3)}Tag | Prec. | Recall | F-measure\n" f"{'-' * tag_column_len}-+--------+--------+-----------\n" ) for tag in tags: s += ( f"{tag:>{tag_column_len}} | " f"{self.precision(tag):<6.4f} | " f"{self.recall(tag):<6.4f} | " f"{self.f_measure(tag, alpha=alpha):.4f}\n" ) return s def demo(): reference = "DET NN VB DET JJ NN NN IN DET NN".split() test = "DET VB VB DET NN NN NN IN DET NN".split() print("Reference =", reference) print("Test =", test) print("Confusion matrix:") print(ConfusionMatrix(reference, test)) print(ConfusionMatrix(reference, test).pretty_format(sort_by_count=True)) print(ConfusionMatrix(reference, test).recall("VB")) if __name__ == "__main__": demo()
natural language toolkit distance metrics c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com tom lippincott tomcs columbia edu url https www nltk org for license information see license txt distance metrics compute the distance between two items usually strings as metrics they must satisfy the following three requirements 1 da a 0 2 da b 0 3 da c da b db c skipping a character in s1 skipping a character in s2 substitution transposition pick the cheapest calculate the levenshtein editdistance between two strings the edit distance is the number of characters that need to be substituted inserted or deleted to transform s1 into s2 for example transforming rain to shine requires three steps consisting of two substitutions and one insertion rain sain shin shine these operations could have been done in other orders but at least three steps are needed allows specifying the cost of substitution edits e g a b because sometimes it makes sense to assign greater penalties to substitutions this also optionally allows transposition edits e g ab ba though this is disabled by default param s1 s2 the strings to be analysed param transpositions whether to allow transposition edits type s1 str type s2 str type substitutioncost int type transpositions bool rtype int set up a 2d array retrieve alphabet set up table to remember positions of last seen occurrence in s1 iterate over the array i and j start from 1 and not 0 to stay close to the wikipedia pseudocode see https en wikipedia orgwikidameraue28093levenshteindistance calculate the minimum levenshtein editdistance based alignment mapping between two strings the alignment finds the mapping from string s1 to s2 that minimizes the edit distance cost for example mapping rain to shine would involve 2 substitutions 2 matches and an insertion resulting in the following mapping 0 0 1 1 2 2 3 3 4 4 4 5 nb 0 0 is the start state without any letters associated see more https web stanford educlasscs124lecmed pdf in case of multiple valid minimumdistance alignments the backtrace has the following operation precedence 1 substitute s1 and s2 characters 2 skip s1 character 3 skip s2 character the backtrace is carried out in reverse string order this function does not support transposition param s1 s2 the strings to be aligned type s1 str type s2 str type substitutioncost int rtype listtupleint int set up a 2d array iterate over the array backtrace to find alignment simple equality test 0 0 if the labels are identical 1 0 if they are different from nltk metrics import binarydistance binarydistance1 1 0 0 binarydistance1 3 1 0 distance metric comparing setsimilarity return lenlabel1 unionlabel2 lenlabel1 intersectionlabel2 len label1 unionlabel2 def masidistancelabel1 label2 lenintersection lenlabel1 intersectionlabel2 lenunion lenlabel1 unionlabel2 lenlabel1 lenlabel1 lenlabel2 lenlabel2 if lenlabel1 lenlabel2 and lenlabel1 lenintersection m 1 elif lenintersection minlenlabel1 lenlabel2 m 0 67 elif lenintersection 0 m 0 33 else m 0 return 1 lenintersection lenunion m def intervaldistancelabel1 label2 try return powlabel1 label2 2 return powlistlabel10listlabel20 2 except printnonnumeric labels not supported with interval distance def presencelabel computes the jaro similarity between 2 sequences from matthew a jaro 1989 advances in record linkage methodology as applied to the 1985 census of tampa florida journal of the american statistical association 84 406 41420 the jaro distance between is the min no of singlecharacter transpositions required to change one word into another the jaro similarity formula from https en wikipedia orgwikijaroe28093winklerdistance jarosim 0 if m 0 else 13 ms1 ms2 mtm where si is the length of string si m is the no of matching characters t is the half no of possible transpositions first store the length of the strings because they will be reused several times the upper bound of the distance for being a matched character initialize the counts for matches and transpositions iterate through sequences check for matches and compute transpositions the jaro winkler distance is an extension of the jaro similarity in william e winkler 1990 string comparator metrics and enhanced decision rules in the fellegisunter model of record linkage proceedings of the section on survey research methods american statistical association 354359 such that jarowinklersim jarosim l p 1 jarosim where jarosim is the output from the jaro similarity see jarosimilarity l is the length of common prefix at the start of the string this implementation provides an upperbound for the l value to keep the prefixes a common value of this upperbound is 4 p is the constant scaling factor to overweigh common prefixes the jarowinkler similarity will fall within the 0 1 bound given that maxp0 25 default is p0 1 in winkler 1990 test using outputs from https www census govsrdpaperspdfrr938 pdf from table 5 comparison of string comparators rescaled between 0 and 1 winklerexamples billy billy billy bill billy blily massie massey yvette yevett billy bolly dwayne duane dixon dickson billy susan winklerscores 1 000 0 967 0 947 0 944 0 911 0 893 0 858 0 853 0 000 jaroscores 1 000 0 933 0 933 0 889 0 889 0 867 0 822 0 790 0 000 one way to match the values on the winkler s paper is to provide a different p scaling factor for different pairs of strings e g pfactors 0 1 0 125 0 20 0 125 0 20 0 20 0 20 0 15 0 1 for s1 s2 jscore wscore p in zipwinklerexamples jaroscores winklerscores pfactors assert roundjarosimilaritys1 s2 3 jscore assert roundjarowinklersimilaritys1 s2 pp 3 wscore test using outputs from https www census govsrdpaperspdfrr945 pdf from table 2 1 comparison of string comparators using last names first names and street names winklerexamples shackleford shackelford dunningham cunnigham nichleson nichulson jones johnson massey massie abroms abrams hardin martinez itman smith jeraldine geraldine marhta martha michelle michael julies julius tanya tonya dwayne duane sean susan jon john jon jan brookhaven brrokhaven brook hallow brook hllw decatur decatir fitzrureiter fitzenreiter higbee highee higbee higvee lacura locura iowa iona 1st ist jaroscores 0 970 0 896 0 926 0 790 0 889 0 889 0 722 0 467 0 926 0 944 0 869 0 889 0 867 0 822 0 783 0 917 0 000 0 933 0 944 0 905 0 856 0 889 0 889 0 889 0 833 0 000 winklerscores 0 982 0 896 0 956 0 832 0 944 0 922 0 722 0 467 0 926 0 961 0 921 0 933 0 880 0 858 0 805 0 933 0 000 0 947 0 967 0 943 0 913 0 922 0 922 0 900 0 867 0 000 one way to match the values on the winkler s paper is to provide a different p scaling factor for different pairs of strings e g pfactors 0 1 0 1 0 1 0 1 0 125 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 20 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 for s1 s2 jscore wscore p in zipwinklerexamples jaroscores winklerscores pfactors if s1 s2 in jon jan 1st ist continue skip bad examples from the paper assert roundjarosimilaritys1 s2 3 jscore assert roundjarowinklersimilaritys1 s2 pp 3 wscore this testcase proves that the output of jarowinkler similarity depends on the product l p and not on the product maxl p here the product maxl p 1 however the product l p 1 roundjarowinklersimilarity tanya tonya p0 1 maxl100 3 0 88 to ensure that the output of the jarowinkler s similarity falls between 0 1 the product of l p needs to be also fall between 0 1 compute the jaro similarity initialize the upper bound for the no of prefixes if user did not predefine the upperbound use shorter length between s1 and s2 compute the prefix matches zip will automatically loop until the end of shorter string return the similarity value as described in docstring natural language toolkit distance metrics c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com tom lippincott tom cs columbia edu url https www nltk org for license information see license txt distance metrics compute the distance between two items usually strings as metrics they must satisfy the following three requirements 1 d a a 0 2 d a b 0 3 d a c d a b d b c initialize 2d array to zero column 0 0 1 2 3 4 row 0 0 1 2 3 4 skipping a character in s1 skipping a character in s2 substitution transposition never picked by default pick the cheapest calculate the levenshtein edit distance between two strings the edit distance is the number of characters that need to be substituted inserted or deleted to transform s1 into s2 for example transforming rain to shine requires three steps consisting of two substitutions and one insertion rain sain shin shine these operations could have been done in other orders but at least three steps are needed allows specifying the cost of substitution edits e g a b because sometimes it makes sense to assign greater penalties to substitutions this also optionally allows transposition edits e g ab ba though this is disabled by default param s1 s2 the strings to be analysed param transpositions whether to allow transposition edits type s1 str type s2 str type substitution_cost int type transpositions bool rtype int set up a 2 d array retrieve alphabet set up table to remember positions of last seen occurrence in s1 iterate over the array i and j start from 1 and not 0 to stay close to the wikipedia pseudo code see https en wikipedia org wiki damerau e2 80 93levenshtein_distance substitution skip s1 skip s2 calculate the minimum levenshtein edit distance based alignment mapping between two strings the alignment finds the mapping from string s1 to s2 that minimizes the edit distance cost for example mapping rain to shine would involve 2 substitutions 2 matches and an insertion resulting in the following mapping 0 0 1 1 2 2 3 3 4 4 4 5 nb 0 0 is the start state without any letters associated see more https web stanford edu class cs124 lec med pdf in case of multiple valid minimum distance alignments the backtrace has the following operation precedence 1 substitute s1 and s2 characters 2 skip s1 character 3 skip s2 character the backtrace is carried out in reverse string order this function does not support transposition param s1 s2 the strings to be aligned type s1 str type s2 str type substitution_cost int rtype list tuple int int set up a 2 d array iterate over the array backtrace to find alignment simple equality test 0 0 if the labels are identical 1 0 if they are different from nltk metrics import binary_distance binary_distance 1 1 0 0 binary_distance 1 3 1 0 distance metric comparing set similarity distance metric that takes into account partial agreement when multiple labels are assigned from nltk metrics import masi_distance masi_distance set 1 2 set 1 2 3 4 0 665 passonneau 2006 measuring agreement on set valued items masi for semantic and pragmatic annotation krippendorff s interval distance metric from nltk metrics import interval_distance interval_distance 1 10 81 krippendorff 1980 content analysis an introduction to its methodology return pow list label1 0 list label2 0 2 higher order function to test presence of a given label computes the jaro similarity between 2 sequences from matthew a jaro 1989 advances in record linkage methodology as applied to the 1985 census of tampa florida journal of the american statistical association 84 406 414 20 the jaro distance between is the min no of single character transpositions required to change one word into another the jaro similarity formula from https en wikipedia org wiki jaro e2 80 93winkler_distance jaro_sim 0 if m 0 else 1 3 m s_1 m s_2 m t m where s_i is the length of string s_i m is the no of matching characters t is the half no of possible transpositions first store the length of the strings because they will be re used several times the upper bound of the distance for being a matched character initialize the counts for matches and transpositions no of matched characters in s1 and s2 no of transpositions between s1 and s2 positions in s1 which are matches to some character in s2 positions in s2 which are matches to some character in s1 iterate through sequences check for matches and compute transpositions iterate through each character the jaro winkler distance is an extension of the jaro similarity in william e winkler 1990 string comparator metrics and enhanced decision rules in the fellegi sunter model of record linkage proceedings of the section on survey research methods american statistical association 354 359 such that jaro_winkler_sim jaro_sim l p 1 jaro_sim where jaro_sim is the output from the jaro similarity see jaro_similarity l is the length of common prefix at the start of the string this implementation provides an upperbound for the l value to keep the prefixes a common value of this upperbound is 4 p is the constant scaling factor to overweigh common prefixes the jaro winkler similarity will fall within the 0 1 bound given that max p 0 25 default is p 0 1 in winkler 1990 test using outputs from https www census gov srd papers pdf rr93 8 pdf from table 5 comparison of string comparators rescaled between 0 and 1 winkler_examples billy billy billy bill billy blily massie massey yvette yevett billy bolly dwayne duane dixon dickson billy susan winkler_scores 1 000 0 967 0 947 0 944 0 911 0 893 0 858 0 853 0 000 jaro_scores 1 000 0 933 0 933 0 889 0 889 0 867 0 822 0 790 0 000 one way to match the values on the winkler s paper is to provide a different p scaling factor for different pairs of strings e g p_factors 0 1 0 125 0 20 0 125 0 20 0 20 0 20 0 15 0 1 for s1 s2 jscore wscore p in zip winkler_examples jaro_scores winkler_scores p_factors assert round jaro_similarity s1 s2 3 jscore assert round jaro_winkler_similarity s1 s2 p p 3 wscore test using outputs from https www census gov srd papers pdf rr94 5 pdf from table 2 1 comparison of string comparators using last names first names and street names winkler_examples shackleford shackelford dunningham cunnigham nichleson nichulson jones johnson massey massie abroms abrams hardin martinez itman smith jeraldine geraldine marhta martha michelle michael julies julius tanya tonya dwayne duane sean susan jon john jon jan brookhaven brrokhaven brook hallow brook hllw decatur decatir fitzrureiter fitzenreiter higbee highee higbee higvee lacura locura iowa iona 1st ist jaro_scores 0 970 0 896 0 926 0 790 0 889 0 889 0 722 0 467 0 926 0 944 0 869 0 889 0 867 0 822 0 783 0 917 0 000 0 933 0 944 0 905 0 856 0 889 0 889 0 889 0 833 0 000 winkler_scores 0 982 0 896 0 956 0 832 0 944 0 922 0 722 0 467 0 926 0 961 0 921 0 933 0 880 0 858 0 805 0 933 0 000 0 947 0 967 0 943 0 913 0 922 0 922 0 900 0 867 0 000 one way to match the values on the winkler s paper is to provide a different p scaling factor for different pairs of strings e g p_factors 0 1 0 1 0 1 0 1 0 125 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 20 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 for s1 s2 jscore wscore p in zip winkler_examples jaro_scores winkler_scores p_factors if s1 s2 in jon jan 1st ist continue skip bad examples from the paper assert round jaro_similarity s1 s2 3 jscore assert round jaro_winkler_similarity s1 s2 p p 3 wscore this test case proves that the output of jaro winkler similarity depends on the product l p and not on the product max_l p here the product max_l p 1 however the product l p 1 round jaro_winkler_similarity tanya tonya p 0 1 max_l 100 3 0 88 to ensure that the output of the jaro winkler s similarity falls between 0 1 the product of l p needs to be also fall between 0 1 compute the jaro similarity initialize the upper bound for the no of prefixes if user did not pre define the upperbound use shorter length between s1 and s2 compute the prefix matches zip will automatically loop until the end of shorter string return the similarity value as described in docstring
import operator import warnings def _edit_dist_init(len1, len2): lev = [] for i in range(len1): lev.append([0] * len2) for i in range(len1): lev[i][0] = i for j in range(len2): lev[0][j] = j return lev def _last_left_t_init(sigma): return {c: 0 for c in sigma} def _edit_dist_step( lev, i, j, s1, s2, last_left, last_right, substitution_cost=1, transpositions=False ): c1 = s1[i - 1] c2 = s2[j - 1] a = lev[i - 1][j] + 1 b = lev[i][j - 1] + 1 c = lev[i - 1][j - 1] + (substitution_cost if c1 != c2 else 0) d = c + 1 if transpositions and last_left > 0 and last_right > 0: d = lev[last_left - 1][last_right - 1] + i - last_left + j - last_right - 1 lev[i][j] = min(a, b, c, d) def edit_distance(s1, s2, substitution_cost=1, transpositions=False): len1 = len(s1) len2 = len(s2) lev = _edit_dist_init(len1 + 1, len2 + 1) sigma = set() sigma.update(s1) sigma.update(s2) last_left_t = _last_left_t_init(sigma) for i in range(1, len1 + 1): last_right_buf = 0 for j in range(1, len2 + 1): last_left = last_left_t[s2[j - 1]] last_right = last_right_buf if s1[i - 1] == s2[j - 1]: last_right_buf = j _edit_dist_step( lev, i, j, s1, s2, last_left, last_right, substitution_cost=substitution_cost, transpositions=transpositions, ) last_left_t[s1[i - 1]] = i return lev[len1][len2] def _edit_dist_backtrace(lev): i, j = len(lev) - 1, len(lev[0]) - 1 alignment = [(i, j)] while (i, j) != (0, 0): directions = [ (i - 1, j - 1), (i - 1, j), (i, j - 1), ] direction_costs = ( (lev[i][j] if (i >= 0 and j >= 0) else float("inf"), (i, j)) for i, j in directions ) _, (i, j) = min(direction_costs, key=operator.itemgetter(0)) alignment.append((i, j)) return list(reversed(alignment)) def edit_distance_align(s1, s2, substitution_cost=1): len1 = len(s1) len2 = len(s2) lev = _edit_dist_init(len1 + 1, len2 + 1) for i in range(len1): for j in range(len2): _edit_dist_step( lev, i + 1, j + 1, s1, s2, 0, 0, substitution_cost=substitution_cost, transpositions=False, ) alignment = _edit_dist_backtrace(lev) return alignment def binary_distance(label1, label2): return 0.0 if label1 == label2 else 1.0 def jaccard_distance(label1, label2): return (len(label1.union(label2)) - len(label1.intersection(label2))) / len( label1.union(label2) ) def masi_distance(label1, label2): len_intersection = len(label1.intersection(label2)) len_union = len(label1.union(label2)) len_label1 = len(label1) len_label2 = len(label2) if len_label1 == len_label2 and len_label1 == len_intersection: m = 1 elif len_intersection == min(len_label1, len_label2): m = 0.67 elif len_intersection > 0: m = 0.33 else: m = 0 return 1 - len_intersection / len_union * m def interval_distance(label1, label2): try: return pow(label1 - label2, 2) except: print("non-numeric labels not supported with interval distance") def presence(label): return lambda x, y: 1.0 * ((label in x) == (label in y)) def fractional_presence(label): return ( lambda x, y: abs((1.0 / len(x)) - (1.0 / len(y))) * (label in x and label in y) or 0.0 * (label not in x and label not in y) or abs(1.0 / len(x)) * (label in x and label not in y) or (1.0 / len(y)) * (label not in x and label in y) ) def custom_distance(file): data = {} with open(file) as infile: for l in infile: labelA, labelB, dist = l.strip().split("\t") labelA = frozenset([labelA]) labelB = frozenset([labelB]) data[frozenset([labelA, labelB])] = float(dist) return lambda x, y: data[frozenset([x, y])] def jaro_similarity(s1, s2): len_s1, len_s2 = len(s1), len(s2) match_bound = max(len_s1, len_s2) // 2 - 1 matches = 0 transpositions = 0 flagged_1 = [] flagged_2 = [] for i in range(len_s1): upperbound = min(i + match_bound, len_s2 - 1) lowerbound = max(0, i - match_bound) for j in range(lowerbound, upperbound + 1): if s1[i] == s2[j] and j not in flagged_2: matches += 1 flagged_1.append(i) flagged_2.append(j) break flagged_2.sort() for i, j in zip(flagged_1, flagged_2): if s1[i] != s2[j]: transpositions += 1 if matches == 0: return 0 else: return ( 1 / 3 * ( matches / len_s1 + matches / len_s2 + (matches - transpositions // 2) / matches ) ) def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4): if not 0 <= max_l * p <= 1: warnings.warn( str( "The product `max_l * p` might not fall between [0,1]." "Jaro-Winkler similarity might not be between 0 and 1." ) ) jaro_sim = jaro_similarity(s1, s2) l = 0 for s1_i, s2_i in zip(s1, s2): if s1_i == s2_i: l += 1 else: break if l == max_l: break return jaro_sim + (l * p * (1 - jaro_sim)) def demo(): string_distance_examples = [ ("rain", "shine"), ("abcdef", "acbdef"), ("language", "lnaguaeg"), ("language", "lnaugage"), ("language", "lngauage"), ] for s1, s2 in string_distance_examples: print(f"Edit distance btwn '{s1}' and '{s2}':", edit_distance(s1, s2)) print( f"Edit dist with transpositions btwn '{s1}' and '{s2}':", edit_distance(s1, s2, transpositions=True), ) print(f"Jaro similarity btwn '{s1}' and '{s2}':", jaro_similarity(s1, s2)) print( f"Jaro-Winkler similarity btwn '{s1}' and '{s2}':", jaro_winkler_similarity(s1, s2), ) print( f"Jaro-Winkler distance btwn '{s1}' and '{s2}':", 1 - jaro_winkler_similarity(s1, s2), ) s1 = {1, 2, 3, 4} s2 = {3, 4, 5} print("s1:", s1) print("s2:", s2) print("Binary distance:", binary_distance(s1, s2)) print("Jaccard distance:", jaccard_distance(s1, s2)) print("MASI distance:", masi_distance(s1, s2)) if __name__ == "__main__": demo()
natural language toolkit agreement metrics c 20012023 nltk project lauri hallila laurihallilagmail com url https www nltk org for license information see license txt counts paice s performance statistics for evaluating stemming algorithms what is required a dictionary of words grouped by their real lemmas a dictionary of words grouped by stems from a stemming algorithm when these are given understemming index ui overstemming index oi stemming weight sw and errorrate relative to truncation errt are counted references chris d paice 1994 an evaluation method for stemming algorithms in proceedings of sigir 4250 get original set of words used for analysis param lemmas a dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma type lemmas dictstr liststr return set of words that exist as values in the dictionary rtype setstr group words by stems defined by truncating them at given length param words set of words used for analysis param cutlength words are stemmed by cutting at this length type words setstr or liststr type cutlength int return dictionary where keys are stems and values are sets of words corresponding to that stem rtype dictstr setstr reference https en wikipedia orgwikilinelineintersection count intersection between two line segments defined by coordinate pairs param l1 tuple of two coordinate pairs defining the first line segment param l2 tuple of two coordinate pairs defining the second line segment type l1 tuplefloat float type l2 tuplefloat float return coordinates of the intersection rtype tuplefloat float when lines are parallel they must be on the yaxis we can ignore xaxis because we stop counting the truncation line when we get there there are no other options as ui xaxis grows and oi yaxis diminishes when we go along the truncation line get derivative of the line from 0 0 to given coordinates param coordinates a coordinate pair type coordinates tuplefloat float return derivative inf if x is zero rtype float count understemmed and overstemmed pairs for lemma stem pair with common words param lemmawords set or list of words corresponding to certain lemma param stems a dictionary where keys are stems and values are sets or lists of words corresponding to that stem type lemmawords setstr or liststr type stems dictstr setstr return amount of understemmed and overstemmed pairs contributed by words existing in both lemmawords and stems rtype tuplefloat float unachieved merge total wrongly merged total calculate actual and maximum possible amounts of understemmed and overstemmed word pairs param lemmas a dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma param stems a dictionary where keys are stems and values are sets or lists of words corresponding to that stem type lemmas dictstr liststr type stems dictstr setstr return global unachieved merge total gumt global desired merge total gdmt global wrongly merged total gwmt and global desired nonmerge total gdnt rtype tuplefloat float float float desired merge total desired nonmerge total for each lemma stem pair with common words count how many pairs are understemmed and overstemmed add to total undesired and wronglymerged totals each object is counted twice so divide by two count understemming index ui overstemming index oi and stemming weight sw param gumt gdmt gwmt gdnt global unachieved merge total gumt global desired merge total gdmt global wrongly merged total gwmt and global desired nonmerge total gdnt type gumt gdmt gwmt gdnt float return understemming index ui overstemming index oi and stemming weight sw rtype tuplefloat float float calculate understemming index ui overstemming index oi and stemming weight sw if gdmt max merge total is 0 define ui as 0 if gdnt max nonmerge total is 0 define oi as 0 oi and ui are 0 define sw as not a number ui is 0 define sw as infinity class for storing lemmas stems and evaluation metrics def initself lemmas stems self lemmas lemmas self stems stems self coords self gumt self gdmt self gwmt self gdnt none none none none self ui self oi self sw none none none self errt none self update def strself text global unachieved merge total gumt sn self gumt text appendglobal desired merge total gdmt sn self gdmt text appendglobal wronglymerged total gwmt sn self gwmt text appendglobal desired nonmerge total gdnt sn self gdnt text appendunderstemming index gumt gdmt sn self ui text appendoverstemming index gwmt gdnt sn self oi text appendstemming weight oi ui sn self sw text appenderrorrate relative to truncation errt srn self errt coordinates joins s item for item in self coords text appendtruncation line s coordinates return jointext def gettruncationindexesself words cutlength truncated truncatewords cutlength gumt gdmt gwmt gdnt calculateself lemmas truncated ui oi indexesgumt gdmt gwmt gdnt 2 return ui oi def gettruncationcoordinatesself cutlength0 words getwordsfromdictionaryself lemmas maxlength maxlenword for word in words truncate words from different points until 0 0 ui oi segment crosses the truncation line coords while cutlength maxlength get ui oi pair of current truncation point pair self gettruncationindexeswords cutlength store only new coordinates so we ll have an actual line segment when counting the intersection point if pair not in coords coords appendpair if pair 0 0 0 0 stop counting if truncation line goes through origo length from origo to truncation line is 0 return coords if lencoords 2 and pair0 0 0 derivative1 getderivativecoords2 derivative2 getderivativecoords1 derivative of the truncation line is a decreasing value when it passes stemming weight we ve found the segment of truncation line intersecting with 0 0 ui oi segment if derivative1 self sw derivative2 return coords cutlength 1 return coords def errtself count ui oi pairs for truncation points until we find the segment where ui oi crosses the truncation line self coords self gettruncationcoordinates if 0 0 0 0 in self coords truncation line goes through origo so errt cannot be counted if self ui self oi 0 0 0 0 return floatinf else return floatnan if self ui self oi 0 0 0 0 ui oi is origo define errt as 0 0 return 0 0 count the intersection point note that self ui self oi cannot be 0 0 0 0 and self coords has different coordinates so we have actual line segments instead of a line segment and a point intersection countintersection 0 0 self ui self oi self coords2 count op length of the line from origo to ui oi op sqrtself ui2 self oi2 count ot length of the line from origo to truncation line that goes through ui oi ot sqrtintersection0 2 intersection1 2 op ot tells how well the stemming algorithm works compared to just truncating words return op ot def updateself demonstration of the module some words with their real lemmas lemmas kneel kneel knelt range range ranged ring ring rang rung same words with stems from a stemming algorithm stems kneel kneel knelt knelt rang rang range ranged ring ring rung rung printwords grouped by their lemmas for lemma in sortedlemmas print formatlemma joinlemmaslemma print printsame words grouped by a stemming algorithm for stem in sortedstems print formatstem joinstemsstem print p paicelemmas stems printp print let s change results from a stemming algorithm stems kneel kneel knelt knelt rang rang range range ranged ring ring rung rung printcounting stats after changing stemming results for stem in sortedstems print formatstem joinstemsstem print p stems stems p update printp if name main demo natural language toolkit agreement metrics c 2001 2023 nltk project lauri hallila laurihallila gmail com url https www nltk org for license information see license txt counts paice s performance statistics for evaluating stemming algorithms what is required a dictionary of words grouped by their real lemmas a dictionary of words grouped by stems from a stemming algorithm when these are given understemming index ui overstemming index oi stemming weight sw and error rate relative to truncation errt are counted references chris d paice 1994 an evaluation method for stemming algorithms in proceedings of sigir 42 50 get original set of words used for analysis param lemmas a dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma type lemmas dict str list str return set of words that exist as values in the dictionary rtype set str group words by stems defined by truncating them at given length param words set of words used for analysis param cutlength words are stemmed by cutting at this length type words set str or list str type cutlength int return dictionary where keys are stems and values are sets of words corresponding to that stem rtype dict str set str reference https en wikipedia org wiki line line_intersection count intersection between two line segments defined by coordinate pairs param l1 tuple of two coordinate pairs defining the first line segment param l2 tuple of two coordinate pairs defining the second line segment type l1 tuple float float type l2 tuple float float return coordinates of the intersection rtype tuple float float lines are parallel when lines are parallel they must be on the y axis we can ignore x axis because we stop counting the truncation line when we get there there are no other options as ui x axis grows and oi y axis diminishes when we go along the truncation line get derivative of the line from 0 0 to given coordinates param coordinates a coordinate pair type coordinates tuple float float return derivative inf if x is zero rtype float count understemmed and overstemmed pairs for lemma stem pair with common words param lemmawords set or list of words corresponding to certain lemma param stems a dictionary where keys are stems and values are sets or lists of words corresponding to that stem type lemmawords set str or list str type stems dict str set str return amount of understemmed and overstemmed pairs contributed by words existing in both lemmawords and stems rtype tuple float float unachieved merge total wrongly merged total calculate actual and maximum possible amounts of understemmed and overstemmed word pairs param lemmas a dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma param stems a dictionary where keys are stems and values are sets or lists of words corresponding to that stem type lemmas dict str list str type stems dict str set str return global unachieved merge total gumt global desired merge total gdmt global wrongly merged total gwmt and global desired non merge total gdnt rtype tuple float float float float desired merge total desired non merge total for each lemma stem pair with common words count how many pairs are understemmed and overstemmed add to total undesired and wrongly merged totals each object is counted twice so divide by two count understemming index ui overstemming index oi and stemming weight sw param gumt gdmt gwmt gdnt global unachieved merge total gumt global desired merge total gdmt global wrongly merged total gwmt and global desired non merge total gdnt type gumt gdmt gwmt gdnt float return understemming index ui overstemming index oi and stemming weight sw rtype tuple float float float calculate understemming index ui overstemming index oi and stemming weight sw if gdmt max merge total is 0 define ui as 0 if gdnt max non merge total is 0 define oi as 0 oi and ui are 0 define sw as not a number ui is 0 define sw as infinity class for storing lemmas stems and evaluation metrics param lemmas a dictionary where keys are lemmas and values are sets or lists of words corresponding to that lemma param stems a dictionary where keys are stems and values are sets or lists of words corresponding to that stem type lemmas dict str list str type stems dict str set str count ui oi when stemming is done by truncating words at cutlength param words words used for the analysis param cutlength words are stemmed by cutting them at this length type words set str or list str type cutlength int return understemming and overstemming indexes rtype tuple int int count ui oi pairs for truncation points until we find the segment where ui oi crosses the truncation line param cutlength optional parameter to start counting from ui oi coordinates gotten by stemming at this length useful for speeding up the calculations when you know the approximate location of the intersection type cutlength int return list of coordinate pairs that define the truncation line rtype list tuple float float truncate words from different points until 0 0 ui oi segment crosses the truncation line get ui oi pair of current truncation point store only new coordinates so we ll have an actual line segment when counting the intersection point stop counting if truncation line goes through origo length from origo to truncation line is 0 derivative of the truncation line is a decreasing value when it passes stemming weight we ve found the segment of truncation line intersecting with 0 0 ui oi segment count error rate relative to truncation errt return errt length of the line from origo to ui oi divided by the length of the line from origo to the point defined by the same line when extended until the truncation line rtype float count ui oi pairs for truncation points until we find the segment where ui oi crosses the truncation line truncation line goes through origo so errt cannot be counted ui oi is origo define errt as 0 0 count the intersection point note that self ui self oi cannot be 0 0 0 0 and self coords has different coordinates so we have actual line segments instead of a line segment and a point count op length of the line from origo to ui oi count ot length of the line from origo to truncation line that goes through ui oi op ot tells how well the stemming algorithm works compared to just truncating words update statistics after lemmas and stems have been set demonstration of the module some words with their real lemmas same words with stems from a stemming algorithm let s change results from a stemming algorithm
from math import sqrt def get_words_from_dictionary(lemmas): words = set() for lemma in lemmas: words.update(set(lemmas[lemma])) return words def _truncate(words, cutlength): stems = {} for word in words: stem = word[:cutlength] try: stems[stem].update([word]) except KeyError: stems[stem] = {word} return stems def _count_intersection(l1, l2): x1, y1 = l1[0] x2, y2 = l1[1] x3, y3 = l2[0] x4, y4 = l2[1] denominator = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) if denominator == 0.0: if x1 == x2 == x3 == x4 == 0.0: return (0.0, y4) x = ( (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4) ) / denominator y = ( (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4) ) / denominator return (x, y) def _get_derivative(coordinates): try: return coordinates[1] / coordinates[0] except ZeroDivisionError: return float("inf") def _calculate_cut(lemmawords, stems): umt, wmt = 0.0, 0.0 for stem in stems: cut = set(lemmawords) & set(stems[stem]) if cut: cutcount = len(cut) stemcount = len(stems[stem]) umt += cutcount * (len(lemmawords) - cutcount) wmt += cutcount * (stemcount - cutcount) return (umt, wmt) def _calculate(lemmas, stems): n = sum(len(lemmas[word]) for word in lemmas) gdmt, gdnt, gumt, gwmt = (0.0, 0.0, 0.0, 0.0) for lemma in lemmas: lemmacount = len(lemmas[lemma]) gdmt += lemmacount * (lemmacount - 1) gdnt += lemmacount * (n - lemmacount) umt, wmt = _calculate_cut(lemmas[lemma], stems) gumt += umt gwmt += wmt return (gumt / 2, gdmt / 2, gwmt / 2, gdnt / 2) def _indexes(gumt, gdmt, gwmt, gdnt): try: ui = gumt / gdmt except ZeroDivisionError: ui = 0.0 try: oi = gwmt / gdnt except ZeroDivisionError: oi = 0.0 try: sw = oi / ui except ZeroDivisionError: if oi == 0.0: sw = float("nan") else: sw = float("inf") return (ui, oi, sw) class Paice: def __init__(self, lemmas, stems): self.lemmas = lemmas self.stems = stems self.coords = [] self.gumt, self.gdmt, self.gwmt, self.gdnt = (None, None, None, None) self.ui, self.oi, self.sw = (None, None, None) self.errt = None self.update() def __str__(self): text = ["Global Unachieved Merge Total (GUMT): %s\n" % self.gumt] text.append("Global Desired Merge Total (GDMT): %s\n" % self.gdmt) text.append("Global Wrongly-Merged Total (GWMT): %s\n" % self.gwmt) text.append("Global Desired Non-merge Total (GDNT): %s\n" % self.gdnt) text.append("Understemming Index (GUMT / GDMT): %s\n" % self.ui) text.append("Overstemming Index (GWMT / GDNT): %s\n" % self.oi) text.append("Stemming Weight (OI / UI): %s\n" % self.sw) text.append("Error-Rate Relative to Truncation (ERRT): %s\r\n" % self.errt) coordinates = " ".join(["(%s, %s)" % item for item in self.coords]) text.append("Truncation line: %s" % coordinates) return "".join(text) def _get_truncation_indexes(self, words, cutlength): truncated = _truncate(words, cutlength) gumt, gdmt, gwmt, gdnt = _calculate(self.lemmas, truncated) ui, oi = _indexes(gumt, gdmt, gwmt, gdnt)[:2] return (ui, oi) def _get_truncation_coordinates(self, cutlength=0): words = get_words_from_dictionary(self.lemmas) maxlength = max(len(word) for word in words) coords = [] while cutlength <= maxlength: pair = self._get_truncation_indexes(words, cutlength) if pair not in coords: coords.append(pair) if pair == (0.0, 0.0): return coords if len(coords) >= 2 and pair[0] > 0.0: derivative1 = _get_derivative(coords[-2]) derivative2 = _get_derivative(coords[-1]) if derivative1 >= self.sw >= derivative2: return coords cutlength += 1 return coords def _errt(self): self.coords = self._get_truncation_coordinates() if (0.0, 0.0) in self.coords: if (self.ui, self.oi) != (0.0, 0.0): return float("inf") else: return float("nan") if (self.ui, self.oi) == (0.0, 0.0): return 0.0 intersection = _count_intersection( ((0, 0), (self.ui, self.oi)), self.coords[-2:] ) op = sqrt(self.ui**2 + self.oi**2) ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2) return op / ot def update(self): self.gumt, self.gdmt, self.gwmt, self.gdnt = _calculate(self.lemmas, self.stems) self.ui, self.oi, self.sw = _indexes(self.gumt, self.gdmt, self.gwmt, self.gdnt) self.errt = self._errt() def demo(): lemmas = { "kneel": ["kneel", "knelt"], "range": ["range", "ranged"], "ring": ["ring", "rang", "rung"], } stems = { "kneel": ["kneel"], "knelt": ["knelt"], "rang": ["rang", "range", "ranged"], "ring": ["ring"], "rung": ["rung"], } print("Words grouped by their lemmas:") for lemma in sorted(lemmas): print("{} => {}".format(lemma, " ".join(lemmas[lemma]))) print() print("Same words grouped by a stemming algorithm:") for stem in sorted(stems): print("{} => {}".format(stem, " ".join(stems[stem]))) print() p = Paice(lemmas, stems) print(p) print() stems = { "kneel": ["kneel"], "knelt": ["knelt"], "rang": ["rang"], "range": ["range", "ranged"], "ring": ["ring"], "rung": ["rung"], } print("Counting stats after changing stemming results:") for stem in sorted(stems): print("{} => {}".format(stem, " ".join(stems[stem]))) print() p.stems = stems p.update() print(p) if __name__ == "__main__": demo()
natural language toolkit evaluation c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt given a list of reference values and a corresponding list of test values return the fraction of corresponding values that are equal in particular return the fraction of indices 0ilentest such that testi referencei type reference list param reference an ordered list of reference values type test list param test a list of values to compare against the corresponding reference values raise valueerror if reference and length do not have the same length given a set of reference values and a set of test values return the fraction of test values that appear in the reference set in particular return cardreference intersection testcardtest if test is empty then return none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a set of reference values and a set of test values return the fraction of reference values that appear in the test set in particular return cardreference intersection testcardreference if reference is empty then return none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a set of reference values and a set of test values return the fmeasure of the test values when compared against the reference values the fmeasure is the harmonic mean of the precision and recall weighted by alpha in particular given the precision p and recall r defined by p cardreference intersection testcardtest r cardreference intersection testcardreference the fmeasure is 1alphap 1alphar if either reference or test is empty then fmeasure returns none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a list of reference values and a corresponding list of test probability distributions return the average log likelihood of the reference values given the probability distributions param reference a list of reference values type reference list param test a list of probability distributions over values to compare against the corresponding reference values type test listprobdisti return the average value of dist logprobval returns an approximate significance level between two lists of independently generated test values approximate randomization calculates significance by randomly drawing from a sample of the possible permutations at the limit of the number of possible permutations the significance level is exact the approximate significance level is the sample mean number of times the statistic of the permutated lists varies from the actual statistic of the unpermuted argument lists return a tuple containing an approximate significance level the count of the number of times the pseudostatistic varied from the actual statistic and the number of shuffles rtype tuple param a a list of test values type a list param b another list of independently generated test values type b list there s no point in trying to shuffle beyond all possible permutations natural language toolkit evaluation c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt given a list of reference values and a corresponding list of test values return the fraction of corresponding values that are equal in particular return the fraction of indices 0 i len test such that test i reference i type reference list param reference an ordered list of reference values type test list param test a list of values to compare against the corresponding reference values raise valueerror if reference and length do not have the same length given a set of reference values and a set of test values return the fraction of test values that appear in the reference set in particular return card reference intersection test card test if test is empty then return none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a set of reference values and a set of test values return the fraction of reference values that appear in the test set in particular return card reference intersection test card reference if reference is empty then return none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a set of reference values and a set of test values return the f measure of the test values when compared against the reference values the f measure is the harmonic mean of the precision and recall weighted by alpha in particular given the precision p and recall r defined by p card reference intersection test card test r card reference intersection test card reference the f measure is 1 alpha p 1 alpha r if either reference or test is empty then f_measure returns none type reference set param reference a set of reference values type test set param test a set of values to compare against the reference set rtype float or none given a list of reference values and a corresponding list of test probability distributions return the average log likelihood of the reference values given the probability distributions param reference a list of reference values type reference list param test a list of probability distributions over values to compare against the corresponding reference values type test list probdisti return the average value of dist logprob val returns an approximate significance level between two lists of independently generated test values approximate randomization calculates significance by randomly drawing from a sample of the possible permutations at the limit of the number of possible permutations the significance level is exact the approximate significance level is the sample mean number of times the statistic of the permutated lists varies from the actual statistic of the unpermuted argument lists return a tuple containing an approximate significance level the count of the number of times the pseudo statistic varied from the actual statistic and the number of shuffles rtype tuple param a a list of test values type a list param b another list of independently generated test values type b list there s no point in trying to shuffle beyond all possible permutations
import operator from functools import reduce from math import fabs from random import shuffle try: from scipy.stats.stats import betai except ImportError: betai = None from nltk.util import LazyConcatenation, LazyMap def accuracy(reference, test): if len(reference) != len(test): raise ValueError("Lists must have the same length.") return sum(x == y for x, y in zip(reference, test)) / len(test) def precision(reference, test): if not hasattr(reference, "intersection") or not hasattr(test, "intersection"): raise TypeError("reference and test should be sets") if len(test) == 0: return None else: return len(reference.intersection(test)) / len(test) def recall(reference, test): if not hasattr(reference, "intersection") or not hasattr(test, "intersection"): raise TypeError("reference and test should be sets") if len(reference) == 0: return None else: return len(reference.intersection(test)) / len(reference) def f_measure(reference, test, alpha=0.5): p = precision(reference, test) r = recall(reference, test) if p is None or r is None: return None if p == 0 or r == 0: return 0 return 1.0 / (alpha / p + (1 - alpha) / r) def log_likelihood(reference, test): if len(reference) != len(test): raise ValueError("Lists must have the same length.") total_likelihood = sum(dist.logprob(val) for (val, dist) in zip(reference, test)) return total_likelihood / len(reference) def approxrand(a, b, **kwargs): shuffles = kwargs.get("shuffles", 999) shuffles = min(shuffles, reduce(operator.mul, range(1, len(a) + len(b) + 1))) stat = kwargs.get("statistic", lambda lst: sum(lst) / len(lst)) verbose = kwargs.get("verbose", False) if verbose: print("shuffles: %d" % shuffles) actual_stat = fabs(stat(a) - stat(b)) if verbose: print("actual statistic: %f" % actual_stat) print("-" * 60) c = 1e-100 lst = LazyConcatenation([a, b]) indices = list(range(len(a) + len(b))) for i in range(shuffles): if verbose and i % 10 == 0: print("shuffle: %d" % i) shuffle(indices) pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[: len(a)])) pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a) :])) pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b) if pseudo_stat >= actual_stat: c += 1 if verbose and i % 10 == 0: print("pseudo-statistic: %f" % pseudo_stat) print("significance: %f" % ((c + 1) / (i + 1))) print("-" * 60) significance = (c + 1) / (shuffles + 1) if verbose: print("significance: %f" % significance) if betai: for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]: print(f"prob(phi<={phi:f}): {betai(c, shuffles, phi):f}") return (significance, c, shuffles) def demo(): print("-" * 75) reference = "DET NN VB DET JJ NN NN IN DET NN".split() test = "DET VB VB DET NN NN NN IN DET NN".split() print("Reference =", reference) print("Test =", test) print("Accuracy:", accuracy(reference, test)) print("-" * 75) reference_set = set(reference) test_set = set(test) print("Reference =", reference_set) print("Test = ", test_set) print("Precision:", precision(reference_set, test_set)) print(" Recall:", recall(reference_set, test_set)) print("F-Measure:", f_measure(reference_set, test_set)) print("-" * 75) if __name__ == "__main__": demo()
natural language toolkit text segmentation metrics c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com david doukhan david doukhangmail com url https www nltk org for license information see license txt text segmentation metrics 1 windowdiff pevzner l and hearst m a critique and improvement of an evaluation metric for text segmentation computational linguistics 28 1936 2 generalized hamming distance bookstein a kulyukin v a raita t generalized hamming distance information retrieval 5 2002 pp 353375 baseline implementation in c http digital cs usu eduvkulyukinvkwebsoftwareghdghd html study describing benefits of generalized hamming distance versus windowdiff for evaluating text segmentation tasks begsten y quel indice pour mesurer l efficacite en segmentation de textes taln 2009 3 pk text segmentation metric beeferman d berger a lafferty j 1999 statistical models for text segmentation machine learning 34 177210 compute the windowdiff score for a pair of segmentations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation s1 000100000010 s2 000010000100 s3 100000010000 2f windowdiffs1 s1 3 0 00 2f windowdiffs1 s2 3 0 30 2f windowdiffs2 s3 3 0 80 param seg1 a segmentation type seg1 str or list param seg2 a segmentation type seg2 str or list param k window width type k int param boundary boundary value type boundary str or int or bool param weighted use the weighted variant of windowdiff type weighted boolean rtype float generalized hamming distance boundaries are at the same location no transformation required boundary match through a deletion boundary match through an insertion compute the generalized hamming distance for a reference and a hypothetical segmentation corresponding to the cost related to the transformation of the hypothetical segmentation into the reference segmentation through boundary insertion deletion and shift operations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation recommended parameter values are a shiftcostcoeff of 2 associated with a inscost and delcost equal to the mean segment length in the reference segmentation same examples as kulyukin c implementation ghd 1100100000 1100010000 1 0 1 0 0 5 0 5 ghd 1100100000 1100000001 1 0 1 0 0 5 2 0 ghd 011 110 1 0 1 0 0 5 1 0 ghd 1 0 1 0 1 0 0 5 1 0 ghd 111 000 1 0 1 0 0 5 3 0 ghd 000 111 1 0 2 0 0 5 6 0 param ref the reference segmentation type ref str or list param hyp the hypothetical segmentation type hyp str or list param inscost insertion cost type inscost float param delcost deletion cost type delcost float param shiftcostcoeff constant used to compute the cost of a shift shift cost shiftcostcoeff i j where i and j are the positions indicating the shift type shiftcostcoeff float param boundary boundary value type boundary str or int or bool rtype float beeferman s pk text segmentation evaluation metric compute the pk metric for a pair of segmentations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation 2f pk 0100 100 1 400 2 0 50 2f pk 0100 100 0 400 2 0 50 2f pk 0100 100 0100 100 2 0 00 param ref the reference segmentation type ref str or list param hyp the segmentation to evaluate type hyp str or list param k window size if none set to half of the average reference segment length type boundary str or int or bool param boundary boundary value type boundary str or int or bool rtype float natural language toolkit text segmentation metrics c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com david doukhan david doukhan gmail com url https www nltk org for license information see license txt text segmentation metrics 1 windowdiff pevzner l and hearst m a critique and improvement of an evaluation metric for text segmentation computational linguistics 28 19 36 2 generalized hamming distance bookstein a kulyukin v a raita t generalized hamming distance information retrieval 5 2002 pp 353 375 baseline implementation in c http digital cs usu edu vkulyukin vkweb software ghd ghd html study describing benefits of generalized hamming distance versus windowdiff for evaluating text segmentation tasks begsten y quel indice pour mesurer l efficacite en segmentation de textes taln 2009 3 pk text segmentation metric beeferman d berger a lafferty j 1999 statistical models for text segmentation machine learning 34 177 210 compute the windowdiff score for a pair of segmentations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation s1 000100000010 s2 000010000100 s3 100000010000 2f windowdiff s1 s1 3 0 00 2f windowdiff s1 s2 3 0 30 2f windowdiff s2 s3 3 0 80 param seg1 a segmentation type seg1 str or list param seg2 a segmentation type seg2 str or list param k window width type k int param boundary boundary value type boundary str or int or bool param weighted use the weighted variant of windowdiff type weighted boolean rtype float generalized hamming distance boundaries are at the same location no transformation required boundary match through a deletion boundary match through an insertion compute the generalized hamming distance for a reference and a hypothetical segmentation corresponding to the cost related to the transformation of the hypothetical segmentation into the reference segmentation through boundary insertion deletion and shift operations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation recommended parameter values are a shift_cost_coeff of 2 associated with a ins_cost and del_cost equal to the mean segment length in the reference segmentation same examples as kulyukin c implementation ghd 1100100000 1100010000 1 0 1 0 0 5 0 5 ghd 1100100000 1100000001 1 0 1 0 0 5 2 0 ghd 011 110 1 0 1 0 0 5 1 0 ghd 1 0 1 0 1 0 0 5 1 0 ghd 111 000 1 0 1 0 0 5 3 0 ghd 000 111 1 0 2 0 0 5 6 0 param ref the reference segmentation type ref str or list param hyp the hypothetical segmentation type hyp str or list param ins_cost insertion cost type ins_cost float param del_cost deletion cost type del_cost float param shift_cost_coeff constant used to compute the cost of a shift shift cost shift_cost_coeff i j where i and j are the positions indicating the shift type shift_cost_coeff float param boundary boundary value type boundary str or int or bool rtype float beeferman s pk text segmentation evaluation metric compute the pk metric for a pair of segmentations a segmentation is any sequence over a vocabulary of two items e g 0 1 where the specified boundary value is used to mark the edge of a segmentation 2f pk 0100 100 1 400 2 0 50 2f pk 0100 100 0 400 2 0 50 2f pk 0100 100 0100 100 2 0 00 param ref the reference segmentation type ref str or list param hyp the segmentation to evaluate type hyp str or list param k window size if none set to half of the average reference segment length type boundary str or int or bool param boundary boundary value type boundary str or int or bool rtype float
try: import numpy as np except ImportError: pass def windowdiff(seg1, seg2, k, boundary="1", weighted=False): if len(seg1) != len(seg2): raise ValueError("Segmentations have unequal length") if k > len(seg1): raise ValueError( "Window width k should be smaller or equal than segmentation lengths" ) wd = 0 for i in range(len(seg1) - k + 1): ndiff = abs(seg1[i : i + k].count(boundary) - seg2[i : i + k].count(boundary)) if weighted: wd += ndiff else: wd += min(1, ndiff) return wd / (len(seg1) - k + 1.0) def _init_mat(nrows, ncols, ins_cost, del_cost): mat = np.empty((nrows, ncols)) mat[0, :] = ins_cost * np.arange(ncols) mat[:, 0] = del_cost * np.arange(nrows) return mat def _ghd_aux(mat, rowv, colv, ins_cost, del_cost, shift_cost_coeff): for i, rowi in enumerate(rowv): for j, colj in enumerate(colv): shift_cost = shift_cost_coeff * abs(rowi - colj) + mat[i, j] if rowi == colj: tcost = mat[i, j] elif rowi > colj: tcost = del_cost + mat[i, j + 1] else: tcost = ins_cost + mat[i + 1, j] mat[i + 1, j + 1] = min(tcost, shift_cost) def ghd(ref, hyp, ins_cost=2.0, del_cost=2.0, shift_cost_coeff=1.0, boundary="1"): ref_idx = [i for (i, val) in enumerate(ref) if val == boundary] hyp_idx = [i for (i, val) in enumerate(hyp) if val == boundary] nref_bound = len(ref_idx) nhyp_bound = len(hyp_idx) if nref_bound == 0 and nhyp_bound == 0: return 0.0 elif nref_bound > 0 and nhyp_bound == 0: return nref_bound * ins_cost elif nref_bound == 0 and nhyp_bound > 0: return nhyp_bound * del_cost mat = _init_mat(nhyp_bound + 1, nref_bound + 1, ins_cost, del_cost) _ghd_aux(mat, hyp_idx, ref_idx, ins_cost, del_cost, shift_cost_coeff) return mat[-1, -1] def pk(ref, hyp, k=None, boundary="1"): if k is None: k = int(round(len(ref) / (ref.count(boundary) * 2.0))) err = 0 for i in range(len(ref) - k + 1): r = ref[i : i + k].count(boundary) > 0 h = hyp[i : i + k].count(boundary) > 0 if r != h: err += 1 return err / (len(ref) - k + 1.0)
natural language toolkit spearman rank correlation c 20012023 nltk project joel nothman jnothmanstudent usyd edu au url https www nltk org for license information see license txt tools for comparing ranked lists finds the difference between the values in ranks1 and ranks2 for keys present in both dicts if the arguments are not dicts they are converted from key rank sequences returns the spearman correlation coefficient for two rankings which should be dicts or sequences of key rank the coefficient ranges from 1 0 ranks are opposite to 1 0 ranks are identical and is only calculated for keys in both rankings for meaningful results remove keys present in only one list before ranking n 0 res 0 for k d in rankdistsranks1 ranks2 res d d n 1 try return 1 6 res n n n 1 except zerodivisionerror result is undefined if only one item is ranked return 0 0 def ranksfromsequenceseq return k i for i k in enumerateseq def ranksfromscoresscores rankgap1e15 prevscore none rank 0 for i key score in enumeratescores try if absscore prevscore rankgap rank i except typeerror pass yield key rank prevscore score natural language toolkit spearman rank correlation c 2001 2023 nltk project joel nothman jnothman student usyd edu au url https www nltk org for license information see license txt tools for comparing ranked lists finds the difference between the values in ranks1 and ranks2 for keys present in both dicts if the arguments are not dicts they are converted from key rank sequences returns the spearman correlation coefficient for two rankings which should be dicts or sequences of key rank the coefficient ranges from 1 0 ranks are opposite to 1 0 ranks are identical and is only calculated for keys in both rankings for meaningful results remove keys present in only one list before ranking result is undefined if only one item is ranked given a sequence yields each element with an increasing rank suitable for use as an argument to spearman_correlation given a sequence of key score tuples yields each key with an increasing rank tying with previous key s rank if the difference between their scores is less than rank_gap suitable for use as an argument to spearman_correlation
def _rank_dists(ranks1, ranks2): ranks1 = dict(ranks1) ranks2 = dict(ranks2) for k in ranks1: try: yield k, ranks1[k] - ranks2[k] except KeyError: pass def spearman_correlation(ranks1, ranks2): n = 0 res = 0 for k, d in _rank_dists(ranks1, ranks2): res += d * d n += 1 try: return 1 - (6 * res / (n * (n * n - 1))) except ZeroDivisionError: return 0.0 def ranks_from_sequence(seq): return ((k, i) for i, k in enumerate(seq)) def ranks_from_scores(scores, rank_gap=1e-15): prev_score = None rank = 0 for i, (key, score) in enumerate(scores): try: if abs(score - prev_score) > rank_gap: rank = i except TypeError: pass yield key, rank prev_score = score
natural language toolkit miscellaneous modules c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt natural language toolkit miscellaneous modules c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt
from nltk.misc.babelfish import babelize_shell from nltk.misc.chomsky import generate_chomsky from nltk.misc.minimalset import MinimalSet from nltk.misc.wordfinder import word_finder
this module previously provided an interface to babelfish online translation service this service is no longer available this module is kept in nltk source code in order to provide better error messages for people following the nltk book 2 0
def babelize_shell(): print("Babelfish online translation service is no longer available.")
natural language toolkit minimal sets c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt find contexts where more than one possible target value can appear e g if targets are wordinitial letters and contexts are the remainders of words then we would like to find cases like fat vs cat and training vs draining if targets are partsofspeech and contexts are words then we would like to find cases like wind noun air in rapid motion vs wind verb coil wrap create a new minimal set param parameters the context target display tuples for the item type parameters listtuplestr str str add a new item to the minimal set having the specified context target and display form param context the context in which the item of interest appears type context str param target the item of interest type target str param display the information to be reported for each item type display str store the set of targets that occurred in this context keep track of which contexts and targets we have seen for a given context and target store the display form determine which contexts occurred with enough distinct targets param minimum the minimum number of distinct target forms type minimum int rtype list natural language toolkit minimal sets c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt find contexts where more than one possible target value can appear e g if targets are word initial letters and contexts are the remainders of words then we would like to find cases like fat vs cat and training vs draining if targets are parts of speech and contexts are words then we would like to find cases like wind noun air in rapid motion vs wind verb coil wrap create a new minimal set param parameters the context target display tuples for the item type parameters list tuple str str str the contrastive information what we are controlling for to record what we have seen what we will display add a new item to the minimal set having the specified context target and display form param context the context in which the item of interest appears type context str param target the item of interest type target str param display the information to be reported for each item type display str store the set of targets that occurred in this context keep track of which contexts and targets we have seen for a given context and target store the display form determine which contexts occurred with enough distinct targets param minimum the minimum number of distinct target forms type minimum int rtype list
from collections import defaultdict class MinimalSet: def __init__(self, parameters=None): self._targets = set() self._contexts = set() self._seen = defaultdict(set) self._displays = {} if parameters: for context, target, display in parameters: self.add(context, target, display) def add(self, context, target, display): self._seen[context].add(target) self._contexts.add(context) self._targets.add(target) self._displays[(context, target)] = display def contexts(self, minimum=2): return [c for c in self._contexts if len(self._seen[c]) >= minimum] def display(self, context, target, default=""): if (context, target) in self._displays: return self._displays[(context, target)] else: return default def display_all(self, context): result = [] for target in self._targets: x = self.display(context, target) if x: result.append(x) return result def targets(self): return self._targets
natural language toolkit list sorting c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt this module provides a variety of list sorting algorithms to illustrate the many different algorithms recipes for solving a problem and how to analyze algorithms experimentally these algorithms are taken from levitin 2004 the design and analysis of algorithms selection sort selection sort scan the list to find its smallest element then swap it with the first element the remainder of the list is one element smaller apply the same method to this list and so on bubble sort bubble sort compare adjacent elements of the list lefttoright and swap them if they are out of order after one pass through the list swapping adjacent items the largest item will be in the rightmost position the remainder is one element smaller apply the same method to this list and so on merge sort merge sort split the list in half and sort each half then combine the sorted halves quick sort demonstration various sort methods natural language toolkit list sorting c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt this module provides a variety of list sorting algorithms to illustrate the many different algorithms recipes for solving a problem and how to analyze algorithms experimentally these algorithms are taken from levitin 2004 the design and analysis of algorithms selection sort selection sort scan the list to find its smallest element then swap it with the first element the remainder of the list is one element smaller apply the same method to this list and so on bubble sort bubble sort compare adjacent elements of the list left to right and swap them if they are out of order after one pass through the list swapping adjacent items the largest item will be in the rightmost position the remainder is one element smaller apply the same method to this list and so on merge sort merge sort split the list in half and sort each half then combine the sorted halves copy the result back into a quick sort swap undo last swap demonstration various sort methods
def selection(a): count = 0 for i in range(len(a) - 1): min = i for j in range(i + 1, len(a)): if a[j] < a[min]: min = j count += 1 a[min], a[i] = a[i], a[min] return count def bubble(a): count = 0 for i in range(len(a) - 1): for j in range(len(a) - i - 1): if a[j + 1] < a[j]: a[j], a[j + 1] = a[j + 1], a[j] count += 1 return count def _merge_lists(b, c): count = 0 i = j = 0 a = [] while i < len(b) and j < len(c): count += 1 if b[i] <= c[j]: a.append(b[i]) i += 1 else: a.append(c[j]) j += 1 if i == len(b): a += c[j:] else: a += b[i:] return a, count def merge(a): count = 0 if len(a) > 1: midpoint = len(a) // 2 b = a[:midpoint] c = a[midpoint:] count_b = merge(b) count_c = merge(c) result, count_a = _merge_lists(b, c) a[:] = result count = count_a + count_b + count_c return count def _partition(a, l, r): p = a[l] i = l j = r + 1 count = 0 while True: while i < r: i += 1 if a[i] >= p: break while j > l: j -= 1 if j < l or a[j] <= p: break a[i], a[j] = a[j], a[i] count += 1 if i >= j: break a[i], a[j] = a[j], a[i] a[l], a[j] = a[j], a[l] return j, count def _quick(a, l, r): count = 0 if l < r: s, count = _partition(a, l, r) count += _quick(a, l, s - 1) count += _quick(a, s + 1, r) return count def quick(a): return _quick(a, 0, len(a) - 1) def demo(): from random import shuffle for size in (10, 20, 50, 100, 200, 500, 1000): a = list(range(size)) shuffle(a) count_selection = selection(a) shuffle(a) count_bubble = bubble(a) shuffle(a) count_merge = merge(a) shuffle(a) count_quick = quick(a) print( ("size=%5d: selection=%8d, bubble=%8d, " "merge=%6d, quick=%6d") % (size, count_selection, count_bubble, count_merge, count_quick) ) if __name__ == "__main__": demo()
natural language toolkit word finder c 20012023 nltk project steven bird stevenbird1gmail com url https www nltk org for license information see license txt simplified from php version by robert klein brathnagmail com http fswordfinder sourceforge net reverse a word with probability 0 5 try to insert word at position x y direction encoded in xf yf try to insert word at position x y in direction dir attempt to arrange words into a lettergrid with the specified number of rows and columns try each word in several positions and directions until it can be fitted into the grid or the maximum number of allowable attempts is exceeded returns a tuple consisting of the grid and the words that were successfully placed param words the list of words to be put into the grid type words list param rows the number of rows in the grid type rows int param cols the number of columns in the grid type cols int param attempts the number of times to attempt placing a word type attempts int param alph the alphabet to be used for filling blank cells type alph list rtype tuple place longer words first initialize the grid try to place each word used appendsave dir x y word fill up the remaining spaces natural language toolkit word finder c 2001 2023 nltk project steven bird stevenbird1 gmail com url https www nltk org for license information see license txt simplified from php version by robert klein brathna gmail com http fswordfinder sourceforge net reverse a word with probability 0 5 try to insert word at position x y direction encoded in xf yf try to insert word at position x y in direction dir attempt to arrange words into a letter grid with the specified number of rows and columns try each word in several positions and directions until it can be fitted into the grid or the maximum number of allowable attempts is exceeded returns a tuple consisting of the grid and the words that were successfully placed param words the list of words to be put into the grid type words list param rows the number of rows in the grid type rows int param cols the number of columns in the grid type cols int param attempts the number of times to attempt placing a word type attempts int param alph the alphabet to be used for filling blank cells type alph list rtype tuple place longer words first the letter grid the words we used initialize the grid try to place each word normalize keep a record of the word used append save dir x y word fill up the remaining spaces
import random def revword(word): if random.randint(1, 2) == 1: return word[::-1] return word def step(word, x, xf, y, yf, grid): for i in range(len(word)): if grid[xf(i)][yf(i)] != "" and grid[xf(i)][yf(i)] != word[i]: return False for i in range(len(word)): grid[xf(i)][yf(i)] = word[i] return True def check(word, dir, x, y, grid, rows, cols): if dir == 1: if x - len(word) < 0 or y - len(word) < 0: return False return step(word, x, lambda i: x - i, y, lambda i: y - i, grid) elif dir == 2: if x - len(word) < 0: return False return step(word, x, lambda i: x - i, y, lambda i: y, grid) elif dir == 3: if x - len(word) < 0 or y + (len(word) - 1) >= cols: return False return step(word, x, lambda i: x - i, y, lambda i: y + i, grid) elif dir == 4: if y - len(word) < 0: return False return step(word, x, lambda i: x, y, lambda i: y - i, grid) def wordfinder(words, rows=20, cols=20, attempts=50, alph="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): words = sorted(words, key=len, reverse=True) grid = [] used = [] for i in range(rows): grid.append([""] * cols) for word in words: word = word.strip().upper() save = word word = revword(word) for attempt in range(attempts): r = random.randint(0, len(word)) dir = random.choice([1, 2, 3, 4]) x = random.randint(0, rows) y = random.randint(0, cols) if dir == 1: x += r y += r elif dir == 2: x += r elif dir == 3: x += r y -= r elif dir == 4: y += r if 0 <= x < rows and 0 <= y < cols: if check(word, dir, x, y, grid, rows, cols): used.append(save) break for i in range(rows): for j in range(cols): if grid[i][j] == "": grid[i][j] = random.choice(alph) return grid, used def word_finder(): from nltk.corpus import words wordlist = words.words() random.shuffle(wordlist) wordlist = wordlist[:200] wordlist = [w for w in wordlist if 3 <= len(w) <= 12] grid, used = wordfinder(wordlist) print("Word Finder\n") for i in range(len(grid)): for j in range(len(grid[i])): print(grid[i][j], end=" ") print() print() for i in range(len(used)): print("%d:" % (i + 1), used[i]) if __name__ == "__main__": word_finder()
natural language toolkit parsers c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt nltk parsers classes and interfaces for producing tree structures that represent the internal organization of a text this task is known as parsing the text and the resulting tree structures are called the text s parses typically the text is a single sentence and the tree structure represents the syntactic structure of the sentence however parsers can also be used in other domains for example parsers can be used to derive the morphological structure of the morphemes that make up a word or to derive the discourse structure for a set of utterances sometimes a single piece of text can be represented by more than one tree structure texts represented by more than one tree structure are called ambiguous texts note that there are actually two ways in which a text can be ambiguous the text has multiple correct parses there is not enough information to decide which of several candidate parses is correct however the parser module does not distinguish these two types of ambiguity the parser module defines parseri a standard interface for parsing texts and two simple implementations of that interface shiftreduceparser and recursivedescentparser it also contains three submodules for specialized kinds of parsing nltk parser chart defines chart parsing which uses dynamic programming to efficiently parse texts nltk parser probabilistic defines probabilistic parsing which associates a probability with each parse natural language toolkit parsers c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt nltk parsers classes and interfaces for producing tree structures that represent the internal organization of a text this task is known as parsing the text and the resulting tree structures are called the text s parses typically the text is a single sentence and the tree structure represents the syntactic structure of the sentence however parsers can also be used in other domains for example parsers can be used to derive the morphological structure of the morphemes that make up a word or to derive the discourse structure for a set of utterances sometimes a single piece of text can be represented by more than one tree structure texts represented by more than one tree structure are called ambiguous texts note that there are actually two ways in which a text can be ambiguous the text has multiple correct parses there is not enough information to decide which of several candidate parses is correct however the parser module does not distinguish these two types of ambiguity the parser module defines parseri a standard interface for parsing texts and two simple implementations of that interface shiftreduceparser and recursivedescentparser it also contains three sub modules for specialized kinds of parsing nltk parser chart defines chart parsing which uses dynamic programming to efficiently parse texts nltk parser probabilistic defines probabilistic parsing which associates a probability with each parse
from nltk.parse.api import ParserI from nltk.parse.bllip import BllipParser from nltk.parse.chart import ( BottomUpChartParser, BottomUpLeftCornerChartParser, ChartParser, LeftCornerChartParser, SteppingChartParser, TopDownChartParser, ) from nltk.parse.corenlp import CoreNLPDependencyParser, CoreNLPParser from nltk.parse.dependencygraph import DependencyGraph from nltk.parse.earleychart import ( EarleyChartParser, FeatureEarleyChartParser, FeatureIncrementalBottomUpChartParser, FeatureIncrementalBottomUpLeftCornerChartParser, FeatureIncrementalChartParser, FeatureIncrementalTopDownChartParser, IncrementalBottomUpChartParser, IncrementalBottomUpLeftCornerChartParser, IncrementalChartParser, IncrementalLeftCornerChartParser, IncrementalTopDownChartParser, ) from nltk.parse.evaluate import DependencyEvaluator from nltk.parse.featurechart import ( FeatureBottomUpChartParser, FeatureBottomUpLeftCornerChartParser, FeatureChartParser, FeatureTopDownChartParser, ) from nltk.parse.malt import MaltParser from nltk.parse.nonprojectivedependencyparser import ( NaiveBayesDependencyScorer, NonprojectiveDependencyParser, ProbabilisticNonprojectiveParser, ) from nltk.parse.pchart import ( BottomUpProbabilisticChartParser, InsideChartParser, LongestChartParser, RandomChartParser, UnsortedChartParser, ) from nltk.parse.projectivedependencyparser import ( ProbabilisticProjectiveDependencyParser, ProjectiveDependencyParser, ) from nltk.parse.recursivedescent import ( RecursiveDescentParser, SteppingRecursiveDescentParser, ) from nltk.parse.shiftreduce import ShiftReduceParser, SteppingShiftReduceParser from nltk.parse.transitionparser import TransitionParser from nltk.parse.util import TestGrammar, extract_test_sentences, load_parser from nltk.parse.viterbi import ViterbiParser
natural language toolkit parser api c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt a processing class for deriving trees that represent possible structures for a sequence of tokens these tree structures are known as parses typically parsers are used to derive syntax trees for sentences but parsers can also be used to derive other kinds of tree structure such as morphological trees and discourse structures subclasses must define at least one of parse parsesents subclasses may define grammar return the grammar used by this parser return an iterator that generates parse trees for the sentence when possible this list is sorted from most likely to least likely param sent the sentence to be parsed type sent liststr rtype itertree apply self parse to each element of sents rtype iteritertree rtype listtree return listself parsesent args kwargs def parseoneself sent args kwargs natural language toolkit parser api c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt a processing class for deriving trees that represent possible structures for a sequence of tokens these tree structures are known as parses typically parsers are used to derive syntax trees for sentences but parsers can also be used to derive other kinds of tree structure such as morphological trees and discourse structures subclasses must define at least one of parse parse_sents subclasses may define grammar return the grammar used by this parser return an iterator that generates parse trees for the sentence when possible this list is sorted from most likely to least likely param sent the sentence to be parsed type sent list str rtype iter tree apply self parse to each element of sents rtype iter iter tree rtype list tree rtype tree or none
import itertools from nltk.internals import overridden class ParserI: def grammar(self): raise NotImplementedError() def parse(self, sent, *args, **kwargs): if overridden(self.parse_sents): return next(self.parse_sents([sent], *args, **kwargs)) elif overridden(self.parse_one): return ( tree for tree in [self.parse_one(sent, *args, **kwargs)] if tree is not None ) elif overridden(self.parse_all): return iter(self.parse_all(sent, *args, **kwargs)) else: raise NotImplementedError() def parse_sents(self, sents, *args, **kwargs): return (self.parse(sent, *args, **kwargs) for sent in sents) def parse_all(self, sent, *args, **kwargs): return list(self.parse(sent, *args, **kwargs)) def parse_one(self, sent, *args, **kwargs): return next(self.parse(sent, *args, **kwargs), None)
natural language toolkit an incremental earley chart parser c 20012023 nltk project peter ljunglf peter ljunglofheatherleaf se rob speer rspeermit edu edward loper edlopergmail com steven bird stevenbird1gmail com jean mark gawron gawronmail sdsu edu url https www nltk org for license information see license txt data classes and parser implementations for incremental chart parsers which use dynamic programming to efficiently parse a text a chart parser derives parse trees for a text by iteratively adding edges to a chart each edge represents a hypothesis about the tree structure for a subsequence of the text the chart is a blackboard for composing and combining these hypotheses a parser is incremental if it guarantees that for all i j where i j all edges ending at i are built before any edges ending at j this is appealing for say speech recognizer hypothesis filtering the main parser class is earleychartparser which is a topdown algorithm originally formulated by jay earley 1970 incremental chart a sequence of edge lists contained in this chart the set of child pointer lists associated with each edge indexes mapping attribute values to lists of edges used by select if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it make sure it s a valid index create the index add all existing edges to the index if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it make sure it s a valid index create the index add all existing edges to the index incremental cfg rules when the chart is incremental we only have to look for empty complete edges here since the filtered rule only works for grammars without empty productions we only have to bother with complete edges here incremental fcfg rules when the chart is incremental we only have to look for empty complete edges here incremental cfg chart parsers an incremental chart parser implementing jay earley s parsing algorithm for each index end in 0 1 n for each edge such that edge end end if edge is incomplete and edge next is not a part of speech apply predictorrule to edge if edge is incomplete and edge next is a part of speech apply scannerrule to edge if edge is complete apply completerrule to edge return any complete parses in the chart create a new earley chart parser that uses grammar to parse texts type grammar cfg param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output type tracechartwidth int param tracechartwidth the default total width reserved for the chart in trace output the remainder of each line will be used to display edges param chartclass the class that should be used to create the charts used by this parser width for printing trace edges incremental fcfg chart parsers demonstration a demonstration of the earley parsers the grammar for chartparser and steppingchartparser tokenize the sample sentence do the parsing print results natural language toolkit an incremental earley chart parser c 2001 2023 nltk project peter ljunglöf peter ljunglof heatherleaf se rob speer rspeer mit edu edward loper edloper gmail com steven bird stevenbird1 gmail com jean mark gawron gawron mail sdsu edu url https www nltk org for license information see license txt data classes and parser implementations for incremental chart parsers which use dynamic programming to efficiently parse a text a chart parser derives parse trees for a text by iteratively adding edges to a chart each edge represents a hypothesis about the tree structure for a subsequence of the text the chart is a blackboard for composing and combining these hypotheses a parser is incremental if it guarantees that for all i j where i j all edges ending at i are built before any edges ending at j this is appealing for say speech recognizer hypothesis filtering the main parser class is earleychartparser which is a top down algorithm originally formulated by jay earley 1970 incremental chart a sequence of edge lists contained in this chart the set of child pointer lists associated with each edge indexes mapping attribute values to lists of edges used by select if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it make sure it s a valid index create the index add all existing edges to the index if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it make sure it s a valid index create the index add all existing edges to the index incremental cfg rules when the chart is incremental we only have to look for empty complete edges here since the filtered rule only works for grammars without empty productions we only have to bother with complete edges here incremental fcfg rules when the chart is incremental we only have to look for empty complete edges here incremental cfg chart parsers an incremental chart parser implementing jay earley s parsing algorithm for each index end in 0 1 n for each edge such that edge end end if edge is incomplete and edge next is not a part of speech apply predictorrule to edge if edge is incomplete and edge next is a part of speech apply scannerrule to edge if edge is complete apply completerrule to edge return any complete parses in the chart create a new earley chart parser that uses grammar to parse texts type grammar cfg param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output type trace_chart_width int param trace_chart_width the default total width reserved for the chart in trace output the remainder of each line will be used to display edges param chart_class the class that should be used to create the charts used by this parser width for printing trace edges incremental fcfg chart parsers demonstration a demonstration of the earley parsers the grammar for chartparser and steppingchartparser tokenize the sample sentence do the parsing print results
from time import perf_counter from nltk.parse.chart import ( BottomUpPredictCombineRule, BottomUpPredictRule, CachedTopDownPredictRule, Chart, ChartParser, EdgeI, EmptyPredictRule, FilteredBottomUpPredictCombineRule, FilteredSingleEdgeFundamentalRule, LeafEdge, LeafInitRule, SingleEdgeFundamentalRule, TopDownInitRule, ) from nltk.parse.featurechart import ( FeatureBottomUpPredictCombineRule, FeatureBottomUpPredictRule, FeatureChart, FeatureChartParser, FeatureEmptyPredictRule, FeatureSingleEdgeFundamentalRule, FeatureTopDownInitRule, FeatureTopDownPredictRule, ) class IncrementalChart(Chart): def initialize(self): self._edgelists = tuple([] for x in self._positions()) self._edge_to_cpls = {} self._indexes = {} def edges(self): return list(self.iteredges()) def iteredges(self): return (edge for edgelist in self._edgelists for edge in edgelist) def select(self, end, **restrictions): edgelist = self._edgelists[end] if restrictions == {}: return iter(edgelist) restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple(restrictions[key] for key in restr_keys) return iter(self._indexes[restr_keys][end].get(vals, [])) def _add_index(self, restr_keys): for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError("Bad restriction: %s" % key) index = self._indexes[restr_keys] = tuple({} for x in self._positions()) for end, edgelist in enumerate(self._edgelists): this_index = index[end] for edge in edgelist: vals = tuple(getattr(edge, key)() for key in restr_keys) this_index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): end = edge.end() for (restr_keys, index) in self._indexes.items(): vals = tuple(getattr(edge, key)() for key in restr_keys) index[end].setdefault(vals, []).append(edge) def _append_edge(self, edge): self._edgelists[edge.end()].append(edge) def _positions(self): return range(self.num_leaves() + 1) class FeatureIncrementalChart(IncrementalChart, FeatureChart): def select(self, end, **restrictions): edgelist = self._edgelists[end] if restrictions == {}: return iter(edgelist) restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple( self._get_type_if_possible(restrictions[key]) for key in restr_keys ) return iter(self._indexes[restr_keys][end].get(vals, [])) def _add_index(self, restr_keys): for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError("Bad restriction: %s" % key) index = self._indexes[restr_keys] = tuple({} for x in self._positions()) for end, edgelist in enumerate(self._edgelists): this_index = index[end] for edge in edgelist: vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) this_index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): end = edge.end() for (restr_keys, index) in self._indexes.items(): vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) index[end].setdefault(vals, []).append(edge) class CompleteFundamentalRule(SingleEdgeFundamentalRule): def _apply_incomplete(self, chart, grammar, left_edge): end = left_edge.end() for right_edge in chart.select( start=end, end=end, is_complete=True, lhs=left_edge.nextsym() ): new_edge = left_edge.move_dot_forward(right_edge.end()) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class CompleterRule(CompleteFundamentalRule): _fundamental_rule = CompleteFundamentalRule() def apply(self, chart, grammar, edge): if not isinstance(edge, LeafEdge): yield from self._fundamental_rule.apply(chart, grammar, edge) class ScannerRule(CompleteFundamentalRule): _fundamental_rule = CompleteFundamentalRule() def apply(self, chart, grammar, edge): if isinstance(edge, LeafEdge): yield from self._fundamental_rule.apply(chart, grammar, edge) class PredictorRule(CachedTopDownPredictRule): pass class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule): def apply(self, chart, grammar, edge): if edge.is_complete(): yield from self._apply_complete(chart, grammar, edge) class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule): def _apply_incomplete(self, chart, grammar, left_edge): fr = self._fundamental_rule end = left_edge.end() for right_edge in chart.select( start=end, end=end, is_complete=True, lhs=left_edge.nextsym() ): yield from fr.apply(chart, grammar, left_edge, right_edge) class FeatureCompleterRule(CompleterRule): _fundamental_rule = FeatureCompleteFundamentalRule() class FeatureScannerRule(ScannerRule): _fundamental_rule = FeatureCompleteFundamentalRule() class FeaturePredictorRule(FeatureTopDownPredictRule): pass EARLEY_STRATEGY = [ LeafInitRule(), TopDownInitRule(), CompleterRule(), ScannerRule(), PredictorRule(), ] TD_INCREMENTAL_STRATEGY = [ LeafInitRule(), TopDownInitRule(), CachedTopDownPredictRule(), CompleteFundamentalRule(), ] BU_INCREMENTAL_STRATEGY = [ LeafInitRule(), EmptyPredictRule(), BottomUpPredictRule(), CompleteFundamentalRule(), ] BU_LC_INCREMENTAL_STRATEGY = [ LeafInitRule(), EmptyPredictRule(), BottomUpPredictCombineRule(), CompleteFundamentalRule(), ] LC_INCREMENTAL_STRATEGY = [ LeafInitRule(), FilteredBottomUpPredictCombineRule(), FilteredCompleteFundamentalRule(), ] class IncrementalChartParser(ChartParser): def __init__( self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY, trace=0, trace_chart_width=50, chart_class=IncrementalChart, ): self._grammar = grammar self._trace = trace self._trace_chart_width = trace_chart_width self._chart_class = chart_class self._axioms = [] self._inference_rules = [] for rule in strategy: if rule.NUM_EDGES == 0: self._axioms.append(rule) elif rule.NUM_EDGES == 1: self._inference_rules.append(rule) else: raise ValueError( "Incremental inference rules must have " "NUM_EDGES == 0 or 1" ) def chart_parse(self, tokens, trace=None): if trace is None: trace = self._trace trace_new_edges = self._trace_new_edges tokens = list(tokens) self._grammar.check_coverage(tokens) chart = self._chart_class(tokens) grammar = self._grammar trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1) if trace: print(chart.pretty_format_leaves(trace_edge_width)) for axiom in self._axioms: new_edges = list(axiom.apply(chart, grammar)) trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width) inference_rules = self._inference_rules for end in range(chart.num_leaves() + 1): if trace > 1: print("\n* Processing queue:", end, "\n") agenda = list(chart.select(end=end)) while agenda: edge = agenda.pop() for rule in inference_rules: new_edges = list(rule.apply(chart, grammar, edge)) trace_new_edges(chart, rule, new_edges, trace, trace_edge_width) for new_edge in new_edges: if new_edge.end() == end: agenda.append(new_edge) return chart class EarleyChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args) class IncrementalTopDownChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): IncrementalChartParser.__init__( self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args ) class IncrementalBottomUpChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): IncrementalChartParser.__init__( self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args ) class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): IncrementalChartParser.__init__( self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args ) class IncrementalLeftCornerChartParser(IncrementalChartParser): def __init__(self, grammar, **parser_args): if not grammar.is_nonempty(): raise ValueError( "IncrementalLeftCornerParser only works for grammars " "without empty productions." ) IncrementalChartParser.__init__( self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args ) EARLEY_FEATURE_STRATEGY = [ LeafInitRule(), FeatureTopDownInitRule(), FeatureCompleterRule(), FeatureScannerRule(), FeaturePredictorRule(), ] TD_INCREMENTAL_FEATURE_STRATEGY = [ LeafInitRule(), FeatureTopDownInitRule(), FeatureTopDownPredictRule(), FeatureCompleteFundamentalRule(), ] BU_INCREMENTAL_FEATURE_STRATEGY = [ LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictRule(), FeatureCompleteFundamentalRule(), ] BU_LC_INCREMENTAL_FEATURE_STRATEGY = [ LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictCombineRule(), FeatureCompleteFundamentalRule(), ] class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser): def __init__( self, grammar, strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY, trace_chart_width=20, chart_class=FeatureIncrementalChart, **parser_args ): IncrementalChartParser.__init__( self, grammar, strategy=strategy, trace_chart_width=trace_chart_width, chart_class=chart_class, **parser_args ) class FeatureEarleyChartParser(FeatureIncrementalChartParser): def __init__(self, grammar, **parser_args): FeatureIncrementalChartParser.__init__( self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args ) class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser): def __init__(self, grammar, **parser_args): FeatureIncrementalChartParser.__init__( self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args ) class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser): def __init__(self, grammar, **parser_args): FeatureIncrementalChartParser.__init__( self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args ) class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser): def __init__(self, grammar, **parser_args): FeatureIncrementalChartParser.__init__( self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args ) def demo( print_times=True, print_grammar=False, print_trees=True, trace=2, sent="I saw John with a dog with my cookie", numparses=5, ): import sys import time from nltk.parse.chart import demo_grammar grammar = demo_grammar() if print_grammar: print("* Grammar") print(grammar) print("* Sentence:") print(sent) tokens = sent.split() print(tokens) print() earley = EarleyChartParser(grammar, trace=trace) t = perf_counter() chart = earley.chart_parse(tokens) parses = list(chart.parses(grammar.start())) t = perf_counter() - t if numparses: assert len(parses) == numparses, "Not all parses found" if print_trees: for tree in parses: print(tree) else: print("Nr trees:", len(parses)) if print_times: print("Time:", t) if __name__ == "__main__": demo()
natural language toolkit chart parser for featurebased grammars c 20012023 nltk project rob speer rspeermit edu peter ljunglf peter ljunglofheatherleaf se url https www nltk org for license information see license txt extension of chart parsing implementation to handle grammars with feature structures as nodes tree edge a specialized tree edge that allows shared variable bindings between nonterminals on the lefthand side and righthand side each featuretreeedge contains a set of bindings i e a dictionary mapping from variables to values if the edge is not complete then these bindings are simply stored however if the edge is complete then the constructor applies these bindings to every nonterminal in the edge whose symbol implements the interface substitutebindingsi construct a new edge if the edge is incomplete i e if dotlenrhs then store the bindings asis if the edge is complete i e if dotlenrhs then apply the bindings to all nonterminals in lhs and rhs and then clear the bindings see treeedge for a description of the other arguments if the edge is complete then substitute in the bindings and then throw them away if we didn t throw them away we might think that 2 complete edges are different just because they have different bindings even though all bindings have already been applied initialize the edge return a new treeedge formed from the given production the new edge s lefthand side and righthand side will be taken from production its span will be index index and its dot position will be 0 rtype treeedge return a new featuretreeedge formed from this edge the new edge s dot position is increased by 1 and its end index will be replaced by newend rtype featuretreeedge param newend the new end index type newend int param bindings bindings for the new edge type bindings dict return a copy of this edge s bindings dictionary return the set of variables used by this edge rtype setvariable a specialized chart for feature grammars todo subsumes check when adding new edges a chart for feature grammars see chart for more information returns an iterator over the edges in this chart see chart select for more information about the restrictions on the edges if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it a helper function for select which creates a new index for a given set of attributes aka restriction keys make sure it s a valid index create the index add all existing edges to the index a helper function for insert which registers the new edge with all existing indexes helper function which returns the type feature of the item if it exists otherwise it returns the item itself fundamental rule def applyself chart grammar leftedge rightedge make sure the rule is applicable if not leftedge end rightedge start and leftedge isincomplete and rightedge iscomplete and isinstanceleftedge featuretreeedge return found rightedge lhs nextsym leftedge nextsym if isinstancerightedge featuretreeedge if not isnonterminalnextsym return if leftedge nextsymtype rightedge lhstype return create a copy of the bindings bindings leftedge bindings we rename vars here because we don t want variables from the two different productions to match found found renamevariablesusedvarsleftedge variables unify b1 leftedge nextsym with b2 rightedge lhs to generate b3 result result unifynextsym found bindings renamevarsfalse if result is none return else if nextsym found return create a copy of the bindings bindings leftedge bindings construct the new edge newedge leftedge movedotforwardrightedge end bindings add it to the chart with appropriate child pointers if chart insertwithbackpointernewedge leftedge rightedge yield newedge class featuresingleedgefundamentalrulesingleedgefundamentalrule fundamentalrule featurefundamentalrule def applycompleteself chart grammar rightedge fr self fundamentalrule for leftedge in chart select endrightedge start iscompletefalse nextsymrightedge lhs yield from fr applychart grammar leftedge rightedge def applyincompleteself chart grammar leftedge fr self fundamentalrule for rightedge in chart select startleftedge end iscompletetrue lhsleftedge nextsym yield from fr applychart grammar leftedge rightedge topdown prediction class featuretopdowninitruletopdowninitrule def applyself chart grammar for prod in grammar productionslhsgrammar start newedge featuretreeedge fromproductionprod 0 if chart insertnewedge yield newedge class featuretopdownpredictrulecachedtopdownpredictrule r a specialized version of the cached top down predict rule that operates on nonterminals whose symbols are featstructnonterminal rather than simply comparing the nonterminals for equality they are unified the top down expand rule states that a alpha b1 betai j licenses the edge b2 gammaj j for each grammar production b2 gamma assuming that b1 and b2 can be unified if we ve already applied this rule to an edge with the same next end and the chart grammar have not changed then just return no new edges to add if the left corner in the predicted production is leaf it must match with the input we rename vars here because we don t want variables from the two different productions to match record the fact that we ve applied this rule bottomup prediction we rename vars here because we don t want variables from the two different productions to match feature chart parser instantiate variable chart a specialized chart that instantiates variables whose names start with by replacing them with unique new variables in particular whenever a complete edge is added to the chart any variables in the edge s lhs whose names start with will be replaced by unique new variable if the edge is a featuretreeedge and it is complete then instantiate all variables whose names start with by replacing them with unique new variables note that instantiation is done inplace since the parsing algorithms might already hold a reference to the edge for future use if the edge is a leaf or is not complete or is already in the chart then just return it asis get a list of variables that need to be instantiated if there are none then return asis instantiate the edge demo s np vp pp prep np np np pp vp vp pp vp verb np vp verb np detpl x nounpl x np john np i det the det my detpl a nounpl dog nounpl cookie verb ate verb saw prep with prep under natural language toolkit chart parser for feature based grammars c 2001 2023 nltk project rob speer rspeer mit edu peter ljunglöf peter ljunglof heatherleaf se url https www nltk org for license information see license txt extension of chart parsing implementation to handle grammars with feature structures as nodes tree edge a specialized tree edge that allows shared variable bindings between nonterminals on the left hand side and right hand side each featuretreeedge contains a set of bindings i e a dictionary mapping from variables to values if the edge is not complete then these bindings are simply stored however if the edge is complete then the constructor applies these bindings to every nonterminal in the edge whose symbol implements the interface substitutebindingsi construct a new edge if the edge is incomplete i e if dot len rhs then store the bindings as is if the edge is complete i e if dot len rhs then apply the bindings to all nonterminals in lhs and rhs and then clear the bindings see treeedge for a description of the other arguments if the edge is complete then substitute in the bindings and then throw them away if we didn t throw them away we might think that 2 complete edges are different just because they have different bindings even though all bindings have already been applied initialize the edge return a new treeedge formed from the given production the new edge s left hand side and right hand side will be taken from production its span will be index index and its dot position will be 0 rtype treeedge return a new featuretreeedge formed from this edge the new edge s dot position is increased by 1 and its end index will be replaced by new_end rtype featuretreeedge param new_end the new end index type new_end int param bindings bindings for the new edge type bindings dict return a copy of this edge s bindings dictionary return the set of variables used by this edge rtype set variable a specialized chart for feature grammars todo subsumes check when adding new edges a chart for feature grammars see chart for more information returns an iterator over the edges in this chart see chart select for more information about the restrictions on the edges if there are no restrictions then return all edges find the index corresponding to the given restrictions if it doesn t exist then create it a helper function for select which creates a new index for a given set of attributes aka restriction keys make sure it s a valid index create the index add all existing edges to the index a helper function for insert which registers the new edge with all existing indexes helper function which returns the type feature of the item if it exists otherwise it returns the item itself fundamental rule a specialized version of the fundamental rule that operates on nonterminals whose symbols are featstructnonterminal s rather than simply comparing the nonterminals for equality they are unified variable bindings from these unifications are collected and stored in the chart using a featuretreeedge when a complete edge is generated these bindings are applied to all nonterminals in the edge the fundamental rule states that a alpha b1 beta i j b2 gamma j k licenses the edge a alpha b3 beta i j assuming that b1 and b2 can be unified to generate b3 make sure the rule is applicable create a copy of the bindings we rename vars here because we don t want variables from the two different productions to match unify b1 left_edge nextsym with b2 right_edge lhs to generate b3 result create a copy of the bindings construct the new edge add it to the chart with appropriate child pointers a specialized version of the completer single edge fundamental rule that operates on nonterminals whose symbols are featstructnonterminal rather than simply comparing the nonterminals for equality they are unified top down prediction a specialized version of the cached top down predict rule that operates on nonterminals whose symbols are featstructnonterminal rather than simply comparing the nonterminals for equality they are unified the top down expand rule states that a alpha b1 beta i j licenses the edge b2 gamma j j for each grammar production b2 gamma assuming that b1 and b2 can be unified if we ve already applied this rule to an edge with the same next end and the chart grammar have not changed then just return no new edges to add if the left corner in the predicted production is leaf it must match with the input we rename vars here because we don t want variables from the two different productions to match record the fact that we ve applied this rule bottom up prediction we rename vars here because we don t want variables from the two different productions to match feature chart parser instantiate variable chart a specialized chart that instantiates variables whose names start with by replacing them with unique new variables in particular whenever a complete edge is added to the chart any variables in the edge s lhs whose names start with will be replaced by unique new variable if the edge is a featuretreeedge and it is complete then instantiate all variables whose names start with by replacing them with unique new variables note that instantiation is done in place since the parsing algorithms might already hold a reference to the edge for future use if the edge is a leaf or is not complete or is already in the chart then just return it as is get a list of variables that need to be instantiated if there are none then return as is instantiate the edge demo s np vp pp prep np np np pp vp vp pp vp verb np vp verb np det pl x noun pl x np john np i det the det my det pl a noun pl dog noun pl cookie verb ate verb saw prep with prep under
from time import perf_counter from nltk.featstruct import TYPE, FeatStruct, find_variables, unify from nltk.grammar import ( CFG, FeatStructNonterminal, Nonterminal, Production, is_nonterminal, is_terminal, ) from nltk.parse.chart import ( BottomUpPredictCombineRule, BottomUpPredictRule, CachedTopDownPredictRule, Chart, ChartParser, EdgeI, EmptyPredictRule, FundamentalRule, LeafInitRule, SingleEdgeFundamentalRule, TopDownInitRule, TreeEdge, ) from nltk.sem import logic from nltk.tree import Tree class FeatureTreeEdge(TreeEdge): def __init__(self, span, lhs, rhs, dot=0, bindings=None): if bindings is None: bindings = {} if dot == len(rhs) and bindings: lhs = self._bind(lhs, bindings) rhs = [self._bind(elt, bindings) for elt in rhs] bindings = {} TreeEdge.__init__(self, span, lhs, rhs, dot) self._bindings = bindings self._comparison_key = (self._comparison_key, tuple(sorted(bindings.items()))) @staticmethod def from_production(production, index): return FeatureTreeEdge( span=(index, index), lhs=production.lhs(), rhs=production.rhs(), dot=0 ) def move_dot_forward(self, new_end, bindings=None): return FeatureTreeEdge( span=(self._span[0], new_end), lhs=self._lhs, rhs=self._rhs, dot=self._dot + 1, bindings=bindings, ) def _bind(self, nt, bindings): if not isinstance(nt, FeatStructNonterminal): return nt return nt.substitute_bindings(bindings) def next_with_bindings(self): return self._bind(self.nextsym(), self._bindings) def bindings(self): return self._bindings.copy() def variables(self): return find_variables( [self._lhs] + list(self._rhs) + list(self._bindings.keys()) + list(self._bindings.values()), fs_class=FeatStruct, ) def __str__(self): if self.is_complete(): return super().__str__() else: bindings = "{%s}" % ", ".join( "%s: %r" % item for item in sorted(self._bindings.items()) ) return f"{super().__str__()} {bindings}" class FeatureChart(Chart): def select(self, **restrictions): if restrictions == {}: return iter(self._edges) restr_keys = sorted(restrictions.keys()) restr_keys = tuple(restr_keys) if restr_keys not in self._indexes: self._add_index(restr_keys) vals = tuple( self._get_type_if_possible(restrictions[key]) for key in restr_keys ) return iter(self._indexes[restr_keys].get(vals, [])) def _add_index(self, restr_keys): for key in restr_keys: if not hasattr(EdgeI, key): raise ValueError("Bad restriction: %s" % key) index = self._indexes[restr_keys] = {} for edge in self._edges: vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) index.setdefault(vals, []).append(edge) def _register_with_indexes(self, edge): for (restr_keys, index) in self._indexes.items(): vals = tuple( self._get_type_if_possible(getattr(edge, key)()) for key in restr_keys ) index.setdefault(vals, []).append(edge) def _get_type_if_possible(self, item): if isinstance(item, dict) and TYPE in item: return item[TYPE] else: return item def parses(self, start, tree_class=Tree): for edge in self.select(start=0, end=self._num_leaves): if ( (isinstance(edge, FeatureTreeEdge)) and (edge.lhs()[TYPE] == start[TYPE]) and (unify(edge.lhs(), start, rename_vars=True)) ): yield from self.trees(edge, complete=True, tree_class=tree_class) class FeatureFundamentalRule(FundamentalRule): r def apply(self, chart, grammar, left_edge, right_edge): if not ( left_edge.end() == right_edge.start() and left_edge.is_incomplete() and right_edge.is_complete() and isinstance(left_edge, FeatureTreeEdge) ): return found = right_edge.lhs() nextsym = left_edge.nextsym() if isinstance(right_edge, FeatureTreeEdge): if not is_nonterminal(nextsym): return if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]: return bindings = left_edge.bindings() found = found.rename_variables(used_vars=left_edge.variables()) result = unify(nextsym, found, bindings, rename_vars=False) if result is None: return else: if nextsym != found: return bindings = left_edge.bindings() new_edge = left_edge.move_dot_forward(right_edge.end(), bindings) if chart.insert_with_backpointer(new_edge, left_edge, right_edge): yield new_edge class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule): _fundamental_rule = FeatureFundamentalRule() def _apply_complete(self, chart, grammar, right_edge): fr = self._fundamental_rule for left_edge in chart.select( end=right_edge.start(), is_complete=False, nextsym=right_edge.lhs() ): yield from fr.apply(chart, grammar, left_edge, right_edge) def _apply_incomplete(self, chart, grammar, left_edge): fr = self._fundamental_rule for right_edge in chart.select( start=left_edge.end(), is_complete=True, lhs=left_edge.nextsym() ): yield from fr.apply(chart, grammar, left_edge, right_edge) class FeatureTopDownInitRule(TopDownInitRule): def apply(self, chart, grammar): for prod in grammar.productions(lhs=grammar.start()): new_edge = FeatureTreeEdge.from_production(prod, 0) if chart.insert(new_edge, ()): yield new_edge class FeatureTopDownPredictRule(CachedTopDownPredictRule): r def apply(self, chart, grammar, edge): if edge.is_complete(): return nextsym, index = edge.nextsym(), edge.end() if not is_nonterminal(nextsym): return nextsym_with_bindings = edge.next_with_bindings() done = self._done.get((nextsym_with_bindings, index), (None, None)) if done[0] is chart and done[1] is grammar: return for prod in grammar.productions(lhs=nextsym): if prod.rhs(): first = prod.rhs()[0] if is_terminal(first): if index >= chart.num_leaves(): continue if first != chart.leaf(index): continue if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True): new_edge = FeatureTreeEdge.from_production(prod, edge.end()) if chart.insert(new_edge, ()): yield new_edge self._done[nextsym_with_bindings, index] = (chart, grammar) class FeatureBottomUpPredictRule(BottomUpPredictRule): def apply(self, chart, grammar, edge): if edge.is_incomplete(): return for prod in grammar.productions(rhs=edge.lhs()): if isinstance(edge, FeatureTreeEdge): _next = prod.rhs()[0] if not is_nonterminal(_next): continue new_edge = FeatureTreeEdge.from_production(prod, edge.start()) if chart.insert(new_edge, ()): yield new_edge class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule): def apply(self, chart, grammar, edge): if edge.is_incomplete(): return found = edge.lhs() for prod in grammar.productions(rhs=found): bindings = {} if isinstance(edge, FeatureTreeEdge): _next = prod.rhs()[0] if not is_nonterminal(_next): continue used_vars = find_variables( (prod.lhs(),) + prod.rhs(), fs_class=FeatStruct ) found = found.rename_variables(used_vars=used_vars) result = unify(_next, found, bindings, rename_vars=False) if result is None: continue new_edge = FeatureTreeEdge.from_production( prod, edge.start() ).move_dot_forward(edge.end(), bindings) if chart.insert(new_edge, (edge,)): yield new_edge class FeatureEmptyPredictRule(EmptyPredictRule): def apply(self, chart, grammar): for prod in grammar.productions(empty=True): for index in range(chart.num_leaves() + 1): new_edge = FeatureTreeEdge.from_production(prod, index) if chart.insert(new_edge, ()): yield new_edge TD_FEATURE_STRATEGY = [ LeafInitRule(), FeatureTopDownInitRule(), FeatureTopDownPredictRule(), FeatureSingleEdgeFundamentalRule(), ] BU_FEATURE_STRATEGY = [ LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictRule(), FeatureSingleEdgeFundamentalRule(), ] BU_LC_FEATURE_STRATEGY = [ LeafInitRule(), FeatureEmptyPredictRule(), FeatureBottomUpPredictCombineRule(), FeatureSingleEdgeFundamentalRule(), ] class FeatureChartParser(ChartParser): def __init__( self, grammar, strategy=BU_LC_FEATURE_STRATEGY, trace_chart_width=20, chart_class=FeatureChart, **parser_args, ): ChartParser.__init__( self, grammar, strategy=strategy, trace_chart_width=trace_chart_width, chart_class=chart_class, **parser_args, ) class FeatureTopDownChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args) class FeatureBottomUpChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args) class FeatureBottomUpLeftCornerChartParser(FeatureChartParser): def __init__(self, grammar, **parser_args): FeatureChartParser.__init__( self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args ) class InstantiateVarsChart(FeatureChart): def __init__(self, tokens): FeatureChart.__init__(self, tokens) def initialize(self): self._instantiated = set() FeatureChart.initialize(self) def insert(self, edge, child_pointer_list): if edge in self._instantiated: return False self.instantiate_edge(edge) return FeatureChart.insert(self, edge, child_pointer_list) def instantiate_edge(self, edge): if not isinstance(edge, FeatureTreeEdge): return if not edge.is_complete(): return if edge in self._edge_to_cpls: return inst_vars = self.inst_vars(edge) if not inst_vars: return self._instantiated.add(edge) edge._lhs = edge.lhs().substitute_bindings(inst_vars) def inst_vars(self, edge): return { var: logic.unique_variable() for var in edge.lhs().variables() if var.name.startswith("@") } def demo_grammar(): from nltk.grammar import FeatureGrammar return FeatureGrammar.fromstring( ) def demo( print_times=True, print_grammar=True, print_trees=True, print_sentence=True, trace=1, parser=FeatureChartParser, sent="I saw John with a dog with my cookie", ): import sys import time print() grammar = demo_grammar() if print_grammar: print(grammar) print() print("*", parser.__name__) if print_sentence: print("Sentence:", sent) tokens = sent.split() t = perf_counter() cp = parser(grammar, trace=trace) chart = cp.chart_parse(tokens) trees = list(chart.parses(grammar.start())) if print_times: print("Time: %s" % (perf_counter() - t)) if print_trees: for tree in trees: print(tree) else: print("Nr trees:", len(trees)) def run_profile(): import profile profile.run("for i in range(1): demo()", "/tmp/profile.out") import pstats p = pstats.Stats("/tmp/profile.out") p.strip_dirs().sort_stats("time", "cum").print_stats(60) p.strip_dirs().sort_stats("cum", "time").print_stats(60) if __name__ == "__main__": from nltk.data import load demo() print() grammar = load("grammars/book_grammars/feat0.fcfg") cp = FeatureChartParser(grammar, trace=2) sent = "Kim likes children" tokens = sent.split() trees = cp.parse(tokens) for tree in trees: print(tree)
natural language toolkit interface to maltparser dan garrette dhgarrettegmail com contributor liling tan mustufain osamamukhtar11 c 20012023 nltk project url https www nltk org for license information see license txt a module to find maltparser jar file and its dependencies checks that that the found directory contains all the necessary jar a module to find pretrained maltparser model a class for dependency parsing with maltparser the input is the paths to optionally a maltparser directory optionally the path to a pretrained maltparser mco model file optionally the tagger to use for pos tagging before parsing optionally additional java arguments example from nltk parse import malt with maltparser and maltmodel environment set mp malt maltparsermodelfilename engmalt linear1 7 mco doctest skip mp parseone i shot an elephant in my pajamas split tree doctest skip shot i elephant an in pajamas my without maltparser and maltmodel environment mp malt maltparser homeusermaltparser1 9 2 homeuserengmalt linear1 7 mco doctest skip mp parseone i shot an elephant in my pajamas split tree doctest skip shot i elephant an in pajamas my an interface for parsing with the malt parser param parserdirname the path to the maltparser directory that contains the maltparser1 x jar type parserdirname str param modelfilename the name of the pretrained model with mco file extension if provided training will not be required see http www maltparser orgmcomco html and see http www patful comchalknode185 type modelfilename str param tagger the tagger used to pos tag the raw string before formatting to conll format it should behave like nltk postag type tagger function param additionaljavaargs this is the additional java arguments that one can use when calling maltparser usually this is the heapsize limits e g additionaljavaargs xmx1024m see https goo glmpdbvq type additionaljavaargs list find all the necessary jar files for maltparser initialize additional java arguments initialize model set the workingdir parameters i e w from maltparser s option initialize pos tagger use maltparser to parse multiple pos tagged sentences takes multiple sentences where each sentence is a list of word tag tuples the sentences must have already been tokenized and tagged param sentences input sentences to parse type sentence listlisttuplestr str return iteriterdependencygraph the dependency graph representation of each sentence convert list of sentences to conll format generate command to run maltparser this is a maltparser quirk it needs to be run where the model file is otherwise it goes into an awkward missing jars or strange w workingdir problem must return iteritertree use maltparser to parse multiple sentences takes a list of sentences where each sentence is a list of words each sentence will be automatically tagged with this maltparser instance s tagger param sentences input sentences to parse type sentence listliststr return iterdependencygraph this function generates the maltparser command use at the terminal param inputfilename path to the input file type inputfilename str param outputfilename path to the output file type outputfilename str joins classpaths with if on windows and on linuxmac use adds the model file train maltparser from a list of dependencygraph objects param depgraphs list of dependencygraph objects for training input data type depgraphs dependencygraph write the conllstr to malttrain conll file in tmp trains the model with the malttrain conll removes the malttrain conll once training finishes train maltparser from a file param conllfile str for the filename of the training input data type conllfile str if conllfile is a zipfilepathpointer then we need to do some extra massaging generate command to run maltparser a demonstration function to show how nltk users can use the malt parser api from nltk import postag assert maltparser in os environ str please set maltparser in your global environment e g n export maltparser homeusermaltparser1 9 2 assert maltmodel in os environ str please set maltmodel in your global environment e g n export maltmodel homeuserengmalt linear1 7 mco dg1str str1 john nnp 2 subj n 2 sees vb 0 root n 3 a dt 4 spec n 4 dog nn 2 obj n 5 2 punct n dg2str str1 john nnp 2 subj n 2 walks vb 0 root n 3 2 punct n dg1 dependencygraphdg1str dg2 dependencygraphdg2str initialize a maltparser object mp maltparser trains a model mp traindg1 dg2 verbosefalse sent1 john sees mary sent2 john walks a dog parse a single sentence parsedsent1 mp parseonesent1 parsedsent2 mp parseonesent2 printparsedsent1 tree sees john mary printparsedsent2 tree walks john dog a parsing multiple sentences sentences sent1 sent2 parsedsents mp parsesentssentences printnextnextparsedsents tree sees john mary printnextnextparsedsents tree walks john dog a initialize a maltparser object with an english pretrained model parserdirname maltparser1 9 2 modelname engmalt linear1 7 mco mp maltparserparserdirnameparserdirname modelfilenamemodelname taggerpostag sent1 i shot an elephant in my pajamas split sent2 time flies like banana split parse a single sentence printmp parseonesent1 tree shot i elephant an in pajamas my parsing multiple sentences sentences sent1 sent2 parsedsents mp parsesentssentences printnextnextparsedsents tree shot i elephant an in pajamas my printnextnextparsedsents tree flies time like banana natural language toolkit interface to maltparser dan garrette dhgarrette gmail com contributor liling tan mustufain osamamukhtar11 c 2001 2023 nltk project url https www nltk org for license information see license txt fullstop comma qmark round brackets square brackets cardinal numbers articles pronouns possessive possessive time prepopsitions time prepopsitions time prepopsitions space prepopsitions space prepopsitions space prepopsitions space prepopsitions space prepopsitions space prepopsitions adjectives nouns formed from adjectives adverbs plural nouns gerunds past tense verbs nouns default a module to find maltparser jar file and its dependencies if a full path is given try to find path to maltparser directory in environment variables checks that that the found directory contains all the necessary jar a module to find pre trained maltparser model if a full path is given try to find path to malt model in environment variables a class for dependency parsing with maltparser the input is the paths to optionally a maltparser directory optionally the path to a pre trained maltparser mco model file optionally the tagger to use for pos tagging before parsing optionally additional java arguments example from nltk parse import malt with malt_parser and malt_model environment set mp malt maltparser model_filename engmalt linear 1 7 mco doctest skip mp parse_one i shot an elephant in my pajamas split tree doctest skip shot i elephant an in pajamas my without malt_parser and malt_model environment mp malt maltparser home user maltparser 1 9 2 home user engmalt linear 1 7 mco doctest skip mp parse_one i shot an elephant in my pajamas split tree doctest skip shot i elephant an in pajamas my an interface for parsing with the malt parser param parser_dirname the path to the maltparser directory that contains the maltparser 1 x jar type parser_dirname str param model_filename the name of the pre trained model with mco file extension if provided training will not be required see http www maltparser org mco mco html and see http www patful com chalk node 185 type model_filename str param tagger the tagger used to pos tag the raw string before formatting to conll format it should behave like nltk pos_tag type tagger function param additional_java_args this is the additional java arguments that one can use when calling maltparser usually this is the heapsize limits e g additional_java_args xmx1024m see https goo gl mpdbvq type additional_java_args list find all the necessary jar files for maltparser initialize additional java arguments initialize model set the working_dir parameters i e w from maltparser s option initialize pos tagger use maltparser to parse multiple pos tagged sentences takes multiple sentences where each sentence is a list of word tag tuples the sentences must have already been tokenized and tagged param sentences input sentences to parse type sentence list list tuple str str return iter iter dependencygraph the dependency graph representation of each sentence convert list of sentences to conll format generate command to run maltparser this is a maltparser quirk it needs to be run where the model file is otherwise it goes into an awkward missing jars or strange w working_dir problem remembers the current path change to modelfile path run command change back to current path must return iter iter tree use maltparser to parse multiple sentences takes a list of sentences where each sentence is a list of words each sentence will be automatically tagged with this maltparser instance s tagger param sentences input sentences to parse type sentence list list str return iter dependencygraph this function generates the maltparser command use at the terminal param inputfilename path to the input file type inputfilename str param outputfilename path to the output file type outputfilename str adds additional java arguments joins classpaths with if on windows and on linux mac use adds classpaths for jars adds the main function adds the model file when parsing when learning mode use to generate parses train maltparser from a list of dependencygraph objects param depgraphs list of dependencygraph objects for training input data type depgraphs dependencygraph write the conll_str to malt_train conll file in tmp trains the model with the malt_train conll removes the malt_train conll once training finishes train maltparser from a file param conll_file str for the filename of the training input data type conll_file str if conll_file is a zipfilepathpointer then we need to do some extra massaging generate command to run maltparser a demonstration function to show how nltk users can use the malt parser api from nltk import pos_tag assert malt_parser in os environ str please set malt_parser in your global environment e g n export malt_parser home user maltparser 1 9 2 assert malt_model in os environ str please set malt_model in your global environment e g n export malt_model home user engmalt linear 1 7 mco _dg1_str str 1 john _ nnp _ _ 2 subj _ _ n 2 sees _ vb _ _ 0 root _ _ n 3 a _ dt _ _ 4 spec _ _ n 4 dog _ nn _ _ 2 obj _ _ n 5 _ _ _ 2 punct _ _ n _dg2_str str 1 john _ nnp _ _ 2 subj _ _ n 2 walks _ vb _ _ 0 root _ _ n 3 _ _ _ 2 punct _ _ n dg1 dependencygraph _dg1_str dg2 dependencygraph _dg2_str initialize a maltparser object mp maltparser trains a model mp train dg1 dg2 verbose false sent1 john sees mary sent2 john walks a dog parse a single sentence parsed_sent1 mp parse_one sent1 parsed_sent2 mp parse_one sent2 print parsed_sent1 tree sees john mary print parsed_sent2 tree walks john dog a parsing multiple sentences sentences sent1 sent2 parsed_sents mp parse_sents sentences print next next parsed_sents tree sees john mary print next next parsed_sents tree walks john dog a initialize a maltparser object with an english pre trained model parser_dirname maltparser 1 9 2 model_name engmalt linear 1 7 mco mp maltparser parser_dirname parser_dirname model_filename model_name tagger pos_tag sent1 i shot an elephant in my pajamas split sent2 time flies like banana split parse a single sentence print mp parse_one sent1 tree shot i elephant an in pajamas my parsing multiple sentences sentences sent1 sent2 parsed_sents mp parse_sents sentences print next next parsed_sents tree shot i elephant an in pajamas my print next next parsed_sents tree flies time like banana
import inspect import os import subprocess import sys import tempfile from nltk.data import ZipFilePathPointer from nltk.internals import find_dir, find_file, find_jars_within_path from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.parse.util import taggedsents_to_conll def malt_regex_tagger(): from nltk.tag import RegexpTagger _tagger = RegexpTagger( [ (r"\.$", "."), (r"\,$", ","), (r"\?$", "?"), (r"\($", "("), (r"\)$", ")"), (r"\[$", "["), (r"\]$", "]"), (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r"(The|the|A|a|An|an)$", "DT"), (r"(He|he|She|she|It|it|I|me|Me|You|you)$", "PRP"), (r"(His|his|Her|her|Its|its)$", "PRP$"), (r"(my|Your|your|Yours|yours)$", "PRP$"), (r"(on|On|in|In|at|At|since|Since)$", "IN"), (r"(for|For|ago|Ago|before|Before)$", "IN"), (r"(till|Till|until|Until)$", "IN"), (r"(by|By|beside|Beside)$", "IN"), (r"(under|Under|below|Below)$", "IN"), (r"(over|Over|above|Above)$", "IN"), (r"(across|Across|through|Through)$", "IN"), (r"(into|Into|towards|Towards)$", "IN"), (r"(onto|Onto|from|From)$", "IN"), (r".*able$", "JJ"), (r".*ness$", "NN"), (r".*ly$", "RB"), (r".*s$", "NNS"), (r".*ing$", "VBG"), (r".*ed$", "VBD"), (r".*", "NN"), ] ) return _tagger.tag def find_maltparser(parser_dirname): if os.path.exists(parser_dirname): _malt_dir = parser_dirname else: _malt_dir = find_dir(parser_dirname, env_vars=("MALT_PARSER",)) malt_dependencies = ["", "", ""] _malt_jars = set(find_jars_within_path(_malt_dir)) _jars = {os.path.split(jar)[1] for jar in _malt_jars} malt_dependencies = {"log4j.jar", "libsvm.jar", "liblinear-1.8.jar"} assert malt_dependencies.issubset(_jars) assert any( filter(lambda i: i.startswith("maltparser-") and i.endswith(".jar"), _jars) ) return list(_malt_jars) def find_malt_model(model_filename): if model_filename is None: return "malt_temp.mco" elif os.path.exists(model_filename): return model_filename else: return find_file(model_filename, env_vars=("MALT_MODEL",), verbose=False) class MaltParser(ParserI): def __init__( self, parser_dirname="", model_filename=None, tagger=None, additional_java_args=None, ): self.malt_jars = find_maltparser(parser_dirname) self.additional_java_args = ( additional_java_args if additional_java_args is not None else [] ) self.model = find_malt_model(model_filename) self._trained = self.model != "malt_temp.mco" self.working_dir = tempfile.gettempdir() self.tagger = tagger if tagger is not None else malt_regex_tagger() def parse_tagged_sents(self, sentences, verbose=False, top_relation_label="null"): if not self._trained: raise Exception("Parser has not been trained. Call train() first.") with tempfile.NamedTemporaryFile( prefix="malt_input.conll.", dir=self.working_dir, mode="w", delete=False ) as input_file: with tempfile.NamedTemporaryFile( prefix="malt_output.conll.", dir=self.working_dir, mode="w", delete=False, ) as output_file: for line in taggedsents_to_conll(sentences): input_file.write(str(line)) input_file.close() cmd = self.generate_malt_command( input_file.name, output_file.name, mode="parse" ) _current_path = os.getcwd() try: os.chdir(os.path.split(self.model)[0]) except: pass ret = self._execute(cmd, verbose) os.chdir(_current_path) if ret != 0: raise Exception( "MaltParser parsing (%s) failed with exit " "code %d" % (" ".join(cmd), ret) ) with open(output_file.name) as infile: for tree_str in infile.read().split("\n\n"): yield ( iter( [ DependencyGraph( tree_str, top_relation_label=top_relation_label ) ] ) ) os.remove(input_file.name) os.remove(output_file.name) def parse_sents(self, sentences, verbose=False, top_relation_label="null"): tagged_sentences = (self.tagger(sentence) for sentence in sentences) return self.parse_tagged_sents( tagged_sentences, verbose, top_relation_label=top_relation_label ) def generate_malt_command(self, inputfilename, outputfilename=None, mode=None): cmd = ["java"] cmd += self.additional_java_args classpaths_separator = ";" if sys.platform.startswith("win") else ":" cmd += [ "-cp", classpaths_separator.join(self.malt_jars), ] cmd += ["org.maltparser.Malt"] if os.path.exists(self.model): cmd += ["-c", os.path.split(self.model)[-1]] else: cmd += ["-c", self.model] cmd += ["-i", inputfilename] if mode == "parse": cmd += ["-o", outputfilename] cmd += ["-m", mode] return cmd @staticmethod def _execute(cmd, verbose=False): output = None if verbose else subprocess.PIPE p = subprocess.Popen(cmd, stdout=output, stderr=output) return p.wait() def train(self, depgraphs, verbose=False): with tempfile.NamedTemporaryFile( prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False ) as input_file: input_str = "\n".join(dg.to_conll(10) for dg in depgraphs) input_file.write(str(input_str)) self.train_from_file(input_file.name, verbose=verbose) os.remove(input_file.name) def train_from_file(self, conll_file, verbose=False): if isinstance(conll_file, ZipFilePathPointer): with tempfile.NamedTemporaryFile( prefix="malt_train.conll.", dir=self.working_dir, mode="w", delete=False ) as input_file: with conll_file.open() as conll_input_file: conll_str = conll_input_file.read() input_file.write(str(conll_str)) return self.train_from_file(input_file.name, verbose=verbose) cmd = self.generate_malt_command(conll_file, mode="learn") ret = self._execute(cmd, verbose) if ret != 0: raise Exception( "MaltParser training (%s) failed with exit " "code %d" % (" ".join(cmd), ret) ) self._trained = True if __name__ == "__main__": import doctest doctest.testmod()
natural language toolkit dependency grammars c 20012023 nltk project jason narad jason naradgmail com url https www nltk org for license information see license txt dependencyscoreri interface for graphedge weight calculation a scorer for calculated the weights on the edges of a weighted dependency graph this is used by a probabilisticnonprojectiveparser to initialize the edge weights of a dependencygraph while typically this would be done by training a binary classifier any class that can return a multidimensional list representation of the edge weights can implement this interface as such it has no necessary fields type graphs listdependencygraph param graphs a list of dependency graphs to train the scorer typically the edges present in the graphs can be used as positive training examples and the edges not present as negative examples type graph dependencygraph param graph a dependency graph whose set of edges need to be scored rtype a threedimensional list of numbers return the score is returned in a multidimensional3 list such that the outerdimension refers to the head and the innerdimension refers to the dependencies for instance scores01 would reference the list of scores corresponding to arcs from node 0 to node 1 the node s address field can be used to determine its number identification for further illustration a score list corresponding to fig 2 of keith hall s kbest spanning tree parsing paper scores 5 1 1 11 4 10 5 8 8 when used in conjunction with a maxentclassifier each score would correspond to the confidence of a particular edge being classified with the positive training examples naivebayesdependencyscorer a dependency scorer built around a maxent classifier in this particular class that classifier is a naivebayesclassifier it uses headword headtag childword and childtag features for classification from nltk parse dependencygraph import dependencygraph conlldata2 graphs dependencygraphentry for entry in conlldata2 split nn if entry npp probabilisticnonprojectiveparser npp traingraphs naivebayesdependencyscorer parses npp parse cathy zag hen zwaaien n v pron adj n punc lenlistparses 1 trains a naivebayesclassifier using the edges present in graphs list as positive examples the edges not present as negative examples uses a feature vector of headword headtag childword and childtag type graphs listdependencygraph param graphs a list of dependency graphs to train the scorer create training labeled training examples converts the graph into a featurebased representation of each edge and then assigns a score to each based on the confidence of the classifier in assigning it to the positive label scores are returned in a multidimensional list type graph dependencygraph param graph a dependency graph to score rtype 3 dimensional list return edge scores for the graph parameter convert graph to feature representation score edges smoothing in case the probability 0 a scorer for demo purposes a short class necessary to show parsing example from paper scores for keith hall kbest spanning tree parsing paper nonprojective probabilistic parsing a probabilistic nonprojective dependency parser nonprojective dependencies allows for crossing branches in the parse tree which is necessary for representing particular linguistic phenomena or even typical parses in some languages this parser follows the mst parsing algorithm outlined in mcdonald2005 which likens the search for the best nonprojective parse to finding the maximum spanning tree in a weighted directed graph class scorerdependencyscoreri def trainself graphs pass def scoreself graph return 5 1 1 11 4 10 5 8 8 npp probabilisticnonprojectiveparser npp train scorer parses npp parse v1 v2 v3 none none none lenlistparses 1 rule based example from nltk grammar import dependencygrammar grammar dependencygrammar fromstring taught play man man the in in corner corner the play golf dachshund to dachshund his ndp nonprojectivedependencyparsergrammar parses ndp parse the man in the corner taught his dachshund to play golf lenlistparses 4 creates a new nonprojective parser trains a dependencyscoreri from a set of dependencygraph objects and establishes this as the parser s scorer this is used to initialize the scores on a dependencygraph during the parsing procedure type graphs listdependencygraph param graphs a list of dependency graphs to train the scorer type dependencyscorer dependencyscoreri param dependencyscorer a scorer which implements the dependencyscoreri interface assigns a score to every edge in the dependencygraph graph these scores are generated via the parser s scorer which was assigned during the training process type graph dependencygraph param graph a dependency graph to assign scores to takes a list of nodes that have been identified to belong to a cycle and collapses them into on larger node the arcs of all nodes in the graph must be updated to account for this type newnode node param newnode a node dictionary to collapse the cycle nodes into type cyclepath a list of integers param cyclepath a list of node addresses each of which is in the cycle type ggraph bgraph cgraph dependencygraph param ggraph bgraph cgraph graphs which need to be updated collapse all cycle nodes into vn1 in ggraph updates the edge scores to reflect a collapse operation into newnode type newnode a node param newnode the node which cycle nodes are collapsed into type cyclepath a list of integers param cyclepath a list of node addresses that belong to the cycle as nodes are collapsed into others they are replaced by the new node in the graph but it s still necessary to keep track of what these original nodes were this takes a list of node addresses and replaces any collapsed node addresses with their original addresses type newindexes a list of integers param newindexes a list of node addresses to check for subsumed nodes when updating scores the score of the highestweighted incoming arc is subtracted upon collapse this returns the correct amount to subtract from that edge type columnindex integer param columnindex a index representing the column of incoming arcs to a particular node being updated type cycleindexes a list of integers param cycleindexes only arcs from cycle nodes are considered this is a list of such nodes addresses returns the source of the best incoming arc to the node with address nodeindex type nodeindex integer param nodeindex the address of the destination node the node that is arced to parses a list of tokens in accordance to the mst parsing algorithm for nonprojective dependency parses assumes that the tokens to be parsed have already been tagged and those tags are provided various scoring methods can be used by implementing the dependencyscoreri interface and passing it to the training algorithm type tokens liststr param tokens a list of words or punctuation to be parsed type tags liststr param tags a list of tags corresponding by index to the words in the tokens list return an iterator of nonprojective parses rtype iterdependencygraph initialize ggraph fully connect nonroot nodes in ggraph assign initial scores to ggraph edges initialize a list of unvisited vertices by node address iterate over unvisited vertices mark current node as visited get corresponding node ni to vertex vi get best inedge node b for current node bgraph unionbgraph b betacurrent node b stored for parse recovery if bgraph contains a cycle collapse it create a new node vn1 with address lennodes 1 cgraph unioncgraph vn1 collapse all nodes in cycle c into vn1 self replacedbycycleindex newnode address add vn1 to list of unvisited vertices increment of nodes counter remove cycle nodes from bgraph b b cycle c recover parse tree todo it s dangerous to assume that deps it a dictionary because it s a default dictionary ideally here we should not be concerned how dependencies are stored inside of a dependency graph rulebased nonprojective parser a nonprojective rulebased dependency parser this parser will return the set of all possible nonprojective parses based on the wordtoword relations defined in the parser s dependency grammar and will allow the branches of the parse tree to cross in order to capture a variety of linguistic phenomena that a projective parser will not creates a new nonprojectivedependencyparser param dependencygrammar a grammar of wordtoword relations type dependencygrammar dependencygrammar parses the input tokens with respect to the parser s grammar parsing is accomplished by representing the searchspace of possible parses as a fullyconnected directed graph arcs that would lead to ungrammatical parses are removed and a lattice is constructed of length n where n is the number of input tokens to represent all possible grammatical traversals all possible paths through the lattice are then enumerated to produce the set of nonprojective parses param tokens a list of tokens to parse type tokens liststr return an iterator of nonprojective parses rtype iterdependencygraph create graph representation of tokens create lattice of possible heads set roots to attempt traverse lattice filter parses ensure 1 root every thing has 1 head there are several root elements todo check for cycles demos halldemo taught play man man the in in corner corner the play golf dachshund to dachshund his natural language toolkit dependency grammars c 2001 2023 nltk project jason narad jason narad gmail com url https www nltk org for license information see license txt dependencyscoreri interface for graph edge weight calculation a scorer for calculated the weights on the edges of a weighted dependency graph this is used by a probabilisticnonprojectiveparser to initialize the edge weights of a dependencygraph while typically this would be done by training a binary classifier any class that can return a multidimensional list representation of the edge weights can implement this interface as such it has no necessary fields type graphs list dependencygraph param graphs a list of dependency graphs to train the scorer typically the edges present in the graphs can be used as positive training examples and the edges not present as negative examples type graph dependencygraph param graph a dependency graph whose set of edges need to be scored rtype a three dimensional list of numbers return the score is returned in a multidimensional 3 list such that the outer dimension refers to the head and the inner dimension refers to the dependencies for instance scores 0 1 would reference the list of scores corresponding to arcs from node 0 to node 1 the node s address field can be used to determine its number identification for further illustration a score list corresponding to fig 2 of keith hall s k best spanning tree parsing paper scores 5 1 1 11 4 10 5 8 8 when used in conjunction with a maxentclassifier each score would correspond to the confidence of a particular edge being classified with the positive training examples naivebayesdependencyscorer a dependency scorer built around a maxent classifier in this particular class that classifier is a naivebayesclassifier it uses head word head tag child word and child tag features for classification from nltk parse dependencygraph import dependencygraph conll_data2 graphs dependencygraph entry for entry in conll_data2 split n n if entry npp probabilisticnonprojectiveparser npp train graphs naivebayesdependencyscorer parses npp parse cathy zag hen zwaaien n v pron adj n punc len list parses 1 do nothing without throwing error trains a naivebayesclassifier using the edges present in graphs list as positive examples the edges not present as negative examples uses a feature vector of head word head tag child word and child tag type graphs list dependencygraph param graphs a list of dependency graphs to train the scorer create training labeled training examples converts the graph into a feature based representation of each edge and then assigns a score to each based on the confidence of the classifier in assigning it to the positive label scores are returned in a multidimensional list type graph dependencygraph param graph a dependency graph to score rtype 3 dimensional list return edge scores for the graph parameter convert graph to feature representation score edges smoothing in case the probability 0 a scorer for demo purposes a short class necessary to show parsing example from paper scores for keith hall k best spanning tree parsing paper non projective probabilistic parsing a probabilistic non projective dependency parser nonprojective dependencies allows for crossing branches in the parse tree which is necessary for representing particular linguistic phenomena or even typical parses in some languages this parser follows the mst parsing algorithm outlined in mcdonald 2005 which likens the search for the best non projective parse to finding the maximum spanning tree in a weighted directed graph class scorer dependencyscoreri def train self graphs pass def score self graph return 5 1 1 11 4 10 5 8 8 npp probabilisticnonprojectiveparser npp train scorer parses npp parse v1 v2 v3 none none none len list parses 1 rule based example from nltk grammar import dependencygrammar grammar dependencygrammar fromstring taught play man man the in in corner corner the play golf dachshund to dachshund his ndp nonprojectivedependencyparser grammar parses ndp parse the man in the corner taught his dachshund to play golf len list parses 4 creates a new non projective parser trains a dependencyscoreri from a set of dependencygraph objects and establishes this as the parser s scorer this is used to initialize the scores on a dependencygraph during the parsing procedure type graphs list dependencygraph param graphs a list of dependency graphs to train the scorer type dependency_scorer dependencyscoreri param dependency_scorer a scorer which implements the dependencyscoreri interface assigns a score to every edge in the dependencygraph graph these scores are generated via the parser s scorer which was assigned during the training process type graph dependencygraph param graph a dependency graph to assign scores to takes a list of nodes that have been identified to belong to a cycle and collapses them into on larger node the arcs of all nodes in the graph must be updated to account for this type new_node node param new_node a node dictionary to collapse the cycle nodes into type cycle_path a list of integers param cycle_path a list of node addresses each of which is in the cycle type g_graph b_graph c_graph dependencygraph param g_graph b_graph c_graph graphs which need to be updated collapse all cycle nodes into v_n 1 in g_graph updates the edge scores to reflect a collapse operation into new_node type new_node a node param new_node the node which cycle nodes are collapsed into type cycle_path a list of integers param cycle_path a list of node addresses that belong to the cycle as nodes are collapsed into others they are replaced by the new node in the graph but it s still necessary to keep track of what these original nodes were this takes a list of node addresses and replaces any collapsed node addresses with their original addresses type new_indexes a list of integers param new_indexes a list of node addresses to check for subsumed nodes when updating scores the score of the highest weighted incoming arc is subtracted upon collapse this returns the correct amount to subtract from that edge type column_index integer param column_index a index representing the column of incoming arcs to a particular node being updated type cycle_indexes a list of integers param cycle_indexes only arcs from cycle nodes are considered this is a list of such nodes addresses returns the source of the best incoming arc to the node with address node_index type node_index integer param node_index the address of the destination node the node that is arced to parses a list of tokens in accordance to the mst parsing algorithm for non projective dependency parses assumes that the tokens to be parsed have already been tagged and those tags are provided various scoring methods can be used by implementing the dependencyscoreri interface and passing it to the training algorithm type tokens list str param tokens a list of words or punctuation to be parsed type tags list str param tags a list of tags corresponding by index to the words in the tokens list return an iterator of non projective parses rtype iter dependencygraph initialize g_graph fully connect non root nodes in g_graph assign initial scores to g_graph edges initialize a list of unvisited vertices by node address iterate over unvisited vertices mark current node as visited get corresponding node n_i to vertex v_i get best in edge node b for current node b_graph union b_graph b beta current node b stored for parse recovery if b_graph contains a cycle collapse it create a new node v_n 1 with address len nodes 1 c_graph union c_graph v_n 1 collapse all nodes in cycle c into v_n 1 self replaced_by cycle_index new_node address add v_n 1 to list of unvisited vertices increment of nodes counter remove cycle nodes from b_graph b b cycle c recover parse tree todo it s dangerous to assume that deps it a dictionary because it s a default dictionary ideally here we should not be concerned how dependencies are stored inside of a dependency graph rule based non projective parser a non projective rule based dependency parser this parser will return the set of all possible non projective parses based on the word to word relations defined in the parser s dependency grammar and will allow the branches of the parse tree to cross in order to capture a variety of linguistic phenomena that a projective parser will not creates a new nonprojectivedependencyparser param dependency_grammar a grammar of word to word relations type dependency_grammar dependencygrammar parses the input tokens with respect to the parser s grammar parsing is accomplished by representing the search space of possible parses as a fully connected directed graph arcs that would lead to ungrammatical parses are removed and a lattice is constructed of length n where n is the number of input tokens to represent all possible grammatical traversals all possible paths through the lattice are then enumerated to produce the set of non projective parses param tokens a list of tokens to parse type tokens list str return an iterator of non projective parses rtype iter dependencygraph create graph representation of tokens create lattice of possible heads set roots to attempt traverse lattice filter parses ensure 1 root every thing has 1 head there are several root elements todo check for cycles demos hall_demo taught play man man the in in corner corner the play golf dachshund to dachshund his
import logging import math from nltk.parse.dependencygraph import DependencyGraph logger = logging.getLogger(__name__) class DependencyScorerI: def __init__(self): if self.__class__ == DependencyScorerI: raise TypeError("DependencyScorerI is an abstract interface") def train(self, graphs): raise NotImplementedError() def score(self, graph): raise NotImplementedError() class NaiveBayesDependencyScorer(DependencyScorerI): def __init__(self): pass def train(self, graphs): from nltk.classify import NaiveBayesClassifier labeled_examples = [] for graph in graphs: for head_node in graph.nodes.values(): for child_index, child_node in graph.nodes.items(): if child_index in head_node["deps"]: label = "T" else: label = "F" labeled_examples.append( ( dict( a=head_node["word"], b=head_node["tag"], c=child_node["word"], d=child_node["tag"], ), label, ) ) self.classifier = NaiveBayesClassifier.train(labeled_examples) def score(self, graph): edges = [] for head_node in graph.nodes.values(): for child_node in graph.nodes.values(): edges.append( dict( a=head_node["word"], b=head_node["tag"], c=child_node["word"], d=child_node["tag"], ) ) edge_scores = [] row = [] count = 0 for pdist in self.classifier.prob_classify_many(edges): logger.debug("%.4f %.4f", pdist.prob("T"), pdist.prob("F")) row.append([math.log(pdist.prob("T") + 0.00000000001)]) count += 1 if count == len(graph.nodes): edge_scores.append(row) row = [] count = 0 return edge_scores class DemoScorer(DependencyScorerI): def train(self, graphs): print("Training...") def score(self, graph): return [ [[], [5], [1], [1]], [[], [], [11], [4]], [[], [10], [], [5]], [[], [8], [8], []], ] class ProbabilisticNonprojectiveParser: def __init__(self): logging.debug("initializing prob. nonprojective...") def train(self, graphs, dependency_scorer): self._scorer = dependency_scorer self._scorer.train(graphs) def initialize_edge_scores(self, graph): self.scores = self._scorer.score(graph) def collapse_nodes(self, new_node, cycle_path, g_graph, b_graph, c_graph): logger.debug("Collapsing nodes...") for cycle_node_index in cycle_path: g_graph.remove_by_address(cycle_node_index) g_graph.add_node(new_node) g_graph.redirect_arcs(cycle_path, new_node["address"]) def update_edge_scores(self, new_node, cycle_path): logger.debug("cycle %s", cycle_path) cycle_path = self.compute_original_indexes(cycle_path) logger.debug("old cycle %s", cycle_path) logger.debug("Prior to update: %s", self.scores) for i, row in enumerate(self.scores): for j, column in enumerate(self.scores[i]): logger.debug(self.scores[i][j]) if j in cycle_path and i not in cycle_path and self.scores[i][j]: subtract_val = self.compute_max_subtract_score(j, cycle_path) logger.debug("%s - %s", self.scores[i][j], subtract_val) new_vals = [] for cur_val in self.scores[i][j]: new_vals.append(cur_val - subtract_val) self.scores[i][j] = new_vals for i, row in enumerate(self.scores): for j, cell in enumerate(self.scores[i]): if i in cycle_path and j in cycle_path: self.scores[i][j] = [] logger.debug("After update: %s", self.scores) def compute_original_indexes(self, new_indexes): swapped = True while swapped: originals = [] swapped = False for new_index in new_indexes: if new_index in self.inner_nodes: for old_val in self.inner_nodes[new_index]: if old_val not in originals: originals.append(old_val) swapped = True else: originals.append(new_index) new_indexes = originals return new_indexes def compute_max_subtract_score(self, column_index, cycle_indexes): max_score = -100000 for row_index in cycle_indexes: for subtract_val in self.scores[row_index][column_index]: if subtract_val > max_score: max_score = subtract_val return max_score def best_incoming_arc(self, node_index): originals = self.compute_original_indexes([node_index]) logger.debug("originals: %s", originals) max_arc = None max_score = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): if col_index in originals and ( max_score is None or self.scores[row_index][col_index] > max_score ): max_score = self.scores[row_index][col_index] max_arc = row_index logger.debug("%s, %s", row_index, col_index) logger.debug(max_score) for key in self.inner_nodes: replaced_nodes = self.inner_nodes[key] if max_arc in replaced_nodes: return key return max_arc def original_best_arc(self, node_index): originals = self.compute_original_indexes([node_index]) max_arc = None max_score = None max_orig = None for row_index in range(len(self.scores)): for col_index in range(len(self.scores[row_index])): if col_index in originals and ( max_score is None or self.scores[row_index][col_index] > max_score ): max_score = self.scores[row_index][col_index] max_arc = row_index max_orig = col_index return [max_arc, max_orig] def parse(self, tokens, tags): self.inner_nodes = {} g_graph = DependencyGraph() for index, token in enumerate(tokens): g_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) g_graph.connect_graph() original_graph = DependencyGraph() for index, token in enumerate(tokens): original_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) b_graph = DependencyGraph() c_graph = DependencyGraph() for index, token in enumerate(tokens): c_graph.nodes[index + 1].update( {"word": token, "tag": tags[index], "rel": "NTOP", "address": index + 1} ) self.initialize_edge_scores(g_graph) logger.debug(self.scores) unvisited_vertices = [vertex["address"] for vertex in c_graph.nodes.values()] nr_vertices = len(tokens) betas = {} while unvisited_vertices: current_vertex = unvisited_vertices.pop(0) logger.debug("current_vertex: %s", current_vertex) current_node = g_graph.get_by_address(current_vertex) logger.debug("current_node: %s", current_node) best_in_edge = self.best_incoming_arc(current_vertex) betas[current_vertex] = self.original_best_arc(current_vertex) logger.debug("best in arc: %s --> %s", best_in_edge, current_vertex) for new_vertex in [current_vertex, best_in_edge]: b_graph.nodes[new_vertex].update( {"word": "TEMP", "rel": "NTOP", "address": new_vertex} ) b_graph.add_arc(best_in_edge, current_vertex) cycle_path = b_graph.contains_cycle() if cycle_path: new_node = {"word": "NONE", "rel": "NTOP", "address": nr_vertices + 1} c_graph.add_node(new_node) self.update_edge_scores(new_node, cycle_path) self.collapse_nodes(new_node, cycle_path, g_graph, b_graph, c_graph) for cycle_index in cycle_path: c_graph.add_arc(new_node["address"], cycle_index) self.inner_nodes[new_node["address"]] = cycle_path unvisited_vertices.insert(0, nr_vertices + 1) nr_vertices += 1 for cycle_node_address in cycle_path: b_graph.remove_by_address(cycle_node_address) logger.debug("g_graph: %s", g_graph) logger.debug("b_graph: %s", b_graph) logger.debug("c_graph: %s", c_graph) logger.debug("Betas: %s", betas) logger.debug("replaced nodes %s", self.inner_nodes) logger.debug("Final scores: %s", self.scores) logger.debug("Recovering parse...") for i in range(len(tokens) + 1, nr_vertices + 1): betas[betas[i][1]] = betas[i] logger.debug("Betas: %s", betas) for node in original_graph.nodes.values(): node["deps"] = {} for i in range(1, len(tokens) + 1): original_graph.add_arc(betas[i][0], betas[i][1]) logger.debug("Done.") yield original_graph class NonprojectiveDependencyParser: def __init__(self, dependency_grammar): self._grammar = dependency_grammar def parse(self, tokens): self._graph = DependencyGraph() for index, token in enumerate(tokens): self._graph.nodes[index] = { "word": token, "deps": [], "rel": "NTOP", "address": index, } for head_node in self._graph.nodes.values(): deps = [] for dep_node in self._graph.nodes.values(): if ( self._grammar.contains(head_node["word"], dep_node["word"]) and head_node["word"] != dep_node["word"] ): deps.append(dep_node["address"]) head_node["deps"] = deps roots = [] possible_heads = [] for i, word in enumerate(tokens): heads = [] for j, head in enumerate(tokens): if (i != j) and self._grammar.contains(head, word): heads.append(j) if len(heads) == 0: roots.append(i) possible_heads.append(heads) if len(roots) < 2: if len(roots) == 0: for i in range(len(tokens)): roots.append(i) analyses = [] for _ in roots: stack = [] analysis = [[] for i in range(len(possible_heads))] i = 0 forward = True while i >= 0: if forward: if len(possible_heads[i]) == 1: analysis[i] = possible_heads[i][0] elif len(possible_heads[i]) == 0: analysis[i] = -1 else: head = possible_heads[i].pop() analysis[i] = head stack.append([i, head]) if not forward: index_on_stack = False for stack_item in stack: if stack_item[0] == i: index_on_stack = True orig_length = len(possible_heads[i]) if index_on_stack and orig_length == 0: for j in range(len(stack) - 1, -1, -1): stack_item = stack[j] if stack_item[0] == i: possible_heads[i].append(stack.pop(j)[1]) elif index_on_stack and orig_length > 0: head = possible_heads[i].pop() analysis[i] = head stack.append([i, head]) forward = True if i + 1 == len(possible_heads): analyses.append(analysis[:]) forward = False if forward: i += 1 else: i -= 1 for analysis in analyses: if analysis.count(-1) > 1: continue graph = DependencyGraph() graph.root = graph.nodes[analysis.index(-1) + 1] for address, (token, head_index) in enumerate( zip(tokens, analysis), start=1 ): head_address = head_index + 1 node = graph.nodes[address] node.update({"word": token, "address": address}) if head_address == 0: rel = "ROOT" else: rel = "" graph.nodes[head_index + 1]["deps"][rel].append(address) yield graph def demo(): nonprojective_conll_parse_demo() rule_based_demo() def hall_demo(): npp = ProbabilisticNonprojectiveParser() npp.train([], DemoScorer()) for parse_graph in npp.parse(["v1", "v2", "v3"], [None, None, None]): print(parse_graph) def nonprojective_conll_parse_demo(): from nltk.parse.dependencygraph import conll_data2 graphs = [DependencyGraph(entry) for entry in conll_data2.split("\n\n") if entry] npp = ProbabilisticNonprojectiveParser() npp.train(graphs, NaiveBayesDependencyScorer()) for parse_graph in npp.parse( ["Cathy", "zag", "hen", "zwaaien", "."], ["N", "V", "Pron", "Adj", "N", "Punc"] ): print(parse_graph) def rule_based_demo(): from nltk.grammar import DependencyGrammar grammar = DependencyGrammar.fromstring( ) print(grammar) ndp = NonprojectiveDependencyParser(grammar) graphs = ndp.parse( [ "the", "man", "in", "the", "corner", "taught", "his", "dachshund", "to", "play", "golf", ] ) print("Graphs:") for graph in graphs: print(graph) if __name__ == "__main__": demo()
natural language toolkit recursive descent parser c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt recursive descent parser a simple topdown cfg parser that parses texts by recursively expanding the fringe of a tree and matching it against a text recursivedescentparser uses a list of tree locations called a frontier to remember which subtrees have not yet been expanded and which leaves have not yet been matched against the text each tree location consists of a list of child indices specifying the path from the root of the tree to a subtree or a leaf see the reference documentation for tree for more information about tree locations when the parser begins parsing a text it constructs a tree containing only the start symbol and a frontier containing the location of the tree s root node it then extends the tree to cover the text using the following recursive procedure if the frontier is empty and the text is covered by the tree then return the tree as a possible parse if the frontier is empty and the text is not covered by the tree then return no parses if the first element of the frontier is a subtree then use cfg productions to expand it for each applicable production add the expanded subtree s children to the frontier and recursively find all parses that can be generated by the new tree and frontier if the first element of the frontier is a token then match it against the next token from the text remove the token from the frontier and recursively find all parses that can be generated by the new tree and frontier see nltk grammar create a new recursivedescentparser that uses grammar to parse texts type grammar cfg param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output inherit docs from parseri start a recursive descent parse with an initial tree containing just the start symbol recursively expand and match each elements of tree specified by frontier to cover remainingtext return a list of all parses found return an iterator of all parses that can be generated by matching and expanding the elements of tree specified by frontier rtype itertree type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type remainingtext liststr param remainingtext the portion of the text that is not yet covered by tree type frontier listtupleint param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched this list sorted in lefttoright order of location within the tree if the tree covers the text and there s nothing left to expand then we ve found a complete parse return it if there s still text but nothing left to expand we failed if the next element on the frontier is a tree expand it if the next element on the frontier is a token match it rtype itertree return an iterator of all parses that can be generated by matching the first element of frontier against the first token in rtext in particular if the first element of frontier has the same type as the first token in rtext then substitute the token into tree and return all parses that can be generated by matching and expanding the remaining elements of frontier if the first element of frontier does not have the same type as the first token in rtext then return empty list type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type rtext liststr param rtext the portion of the text that is not yet covered by tree type frontier list of tuple of int param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched if it s a terminal that matches rtext0 then substitute in the token and continue parsing if it s a nonmatching terminal fail rtype itertree return an iterator of all parses that can be generated by expanding the first element of frontier with production in particular if the first element of frontier is a subtree whose node type is equal to production s left hand side then add a child to that subtree for each element of production s right hand side and return all parses that can be generated by matching and expanding the remaining elements of frontier if the first element of frontier is not a subtree whose node type is equal to production s left hand side then return an empty list if production is not specified then return a list of all parses that can be generated by expanding the first element of frontier with any cfg production type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type remainingtext liststr param remainingtext the portion of the text that is not yet covered by tree type frontier listtupleint param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched rtype tree return the tree that is licensed by production in particular given the production lhs elt1 eltn return a tree that has a node lhs symbol and n children for each nonterminal element elti in the production the tree token has a childless subtree with node value elti symbol and for each terminal element eltj the tree token has a leaf token with type eltj param production the cfg production that licenses the tree token that should be returned type production production this will be matched set the level of tracing output that should be generated when parsing a text type trace int param trace the trace level a trace level of 0 will generate no tracing output and higher trace levels will produce more verbose tracing output rtype none print trace output displaying the fringe of tree the fringe of tree consists of all of its leaves and all of its childless subtrees rtype none print trace output displaying the parser s current state param operation a character identifying the operation that generated the current state rtype none stepping recursive descent parser a recursivedescentparser that allows you to step through the parsing process performing a single operation at a time the initialize method is used to start parsing a text expand expands the first element on the frontier using a single cfg production and match matches the first element on the frontier against the next text token backtrack undoes the most recent expand or match operation step performs a single expand match or backtrack operation parses returns the set of parses that have been found by the parser ivar history a list of rtext tree frontier tripples containing the previous states of the parser this history is used to implement the backtrack operation ivar triede a record of all productions that have been tried for a given tree this record is used by expand to perform the next untried production ivar triedm a record of what tokens have been matched for a given tree this record is used by step to decide whether or not to match a token see nltk grammar xx temporary hack warning this should be replaced with something nicer when we get the chance for pos in c treepositions leaves cpos cpos freeze start parsing a given text this sets the parser s tree to the start symbol its frontier to the root node and its remaining text to token subtokens return the portion of the text that is not yet covered by the tree rtype liststr return a list of the tree locations of all subtrees that have not yet been expanded and all leaves that have not yet been matched rtype listtupleint return a partial structure for the text that is currently being parsed the elements specified by the frontier have not yet been expanded or matched rtype tree perform a single parsing operation if an untried match is possible then perform the match and return the matched token if an untried expansion is possible then perform the expansion and return the production that it is based on if backtracking is possible then backtrack and return true otherwise return none return none if no operation was performed a token if a match was performed a production if an expansion was performed and true if a backtrack operation was performed rtype production or string or bool try matching if we haven t already try expanding try backtracking nothing left to do expand the first element of the frontier in particular if the first element of the frontier is a subtree whose node type is equal to production s left hand side then add a child to that subtree for each element of production s right hand side if production is not specified then use the first untried expandable production if all expandable productions have been tried do nothing return the production used to expand the frontier if an expansion was performed if no expansion was performed return none rtype production or none make sure we can expand if they didn t specify a production check all untried ones record that we ve tried this production now try expanding we didn t expand anything match the first element of the frontier in particular if the first element of the frontier has the same type as the next text token then substitute the text token into the tree return the token matched if a match operation was performed if no match was performed return none rtype str or none record that we ve tried matching this token make sure we can match return the token we just matched return the parser to its state before the most recent match or expand operation calling undo repeatedly return the parser to successively earlier states if no match or expand operations have been performed undo will make no changes return true if an operation was successfully undone rtype bool return a list of all the productions for which expansions are available for the current parser state rtype listproduction make sure we can expand return a list of all the untried productions for which expansions are available for the current parser state rtype listproduction return whether the first element of the frontier is a token that has not yet been matched rtype bool return whether the parser s current state represents a complete parse rtype bool a stub version of parse that sets the parsers current state to the given arguments in recursivedescentparser the parse method is used to recursively continue parsing a text steppingrecursivedescentparser overrides it to capture these recursive calls it records the parser s old state in the history to allow for backtracking and updates the parser s new state using the given arguments finally it returns 1 which is used by match and expand to detect whether their operations were successful return 1 rtype list of int is it a good parse if so record it return an iterator of the parses that have been found by this parser so far rtype list of tree change the grammar used to parse texts param grammar the new grammar type grammar cfg demonstration code a demonstration of the recursive descent parser s np vp np det n det n pp vp v np v np pp pp p np np i n man park telescope dog det the a p in with v saw natural language toolkit recursive descent parser c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt recursive descent parser a simple top down cfg parser that parses texts by recursively expanding the fringe of a tree and matching it against a text recursivedescentparser uses a list of tree locations called a frontier to remember which subtrees have not yet been expanded and which leaves have not yet been matched against the text each tree location consists of a list of child indices specifying the path from the root of the tree to a subtree or a leaf see the reference documentation for tree for more information about tree locations when the parser begins parsing a text it constructs a tree containing only the start symbol and a frontier containing the location of the tree s root node it then extends the tree to cover the text using the following recursive procedure if the frontier is empty and the text is covered by the tree then return the tree as a possible parse if the frontier is empty and the text is not covered by the tree then return no parses if the first element of the frontier is a subtree then use cfg productions to expand it for each applicable production add the expanded subtree s children to the frontier and recursively find all parses that can be generated by the new tree and frontier if the first element of the frontier is a token then match it against the next token from the text remove the token from the frontier and recursively find all parses that can be generated by the new tree and frontier see nltk grammar create a new recursivedescentparser that uses grammar to parse texts type grammar cfg param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output inherit docs from parseri start a recursive descent parse with an initial tree containing just the start symbol recursively expand and match each elements of tree specified by frontier to cover remaining_text return a list of all parses found return an iterator of all parses that can be generated by matching and expanding the elements of tree specified by frontier rtype iter tree type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type remaining_text list str param remaining_text the portion of the text that is not yet covered by tree type frontier list tuple int param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched this list sorted in left to right order of location within the tree if the tree covers the text and there s nothing left to expand then we ve found a complete parse return it if there s still text but nothing left to expand we failed if the next element on the frontier is a tree expand it if the next element on the frontier is a token match it rtype iter tree return an iterator of all parses that can be generated by matching the first element of frontier against the first token in rtext in particular if the first element of frontier has the same type as the first token in rtext then substitute the token into tree and return all parses that can be generated by matching and expanding the remaining elements of frontier if the first element of frontier does not have the same type as the first token in rtext then return empty list type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type rtext list str param rtext the portion of the text that is not yet covered by tree type frontier list of tuple of int param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched if it s a terminal that matches rtext 0 then substitute in the token and continue parsing if it s a non matching terminal fail rtype iter tree return an iterator of all parses that can be generated by expanding the first element of frontier with production in particular if the first element of frontier is a subtree whose node type is equal to production s left hand side then add a child to that subtree for each element of production s right hand side and return all parses that can be generated by matching and expanding the remaining elements of frontier if the first element of frontier is not a subtree whose node type is equal to production s left hand side then return an empty list if production is not specified then return a list of all parses that can be generated by expanding the first element of frontier with any cfg production type tree tree param tree a partial structure for the text that is currently being parsed the elements of tree that are specified by frontier have not yet been expanded or matched type remaining_text list str param remaining_text the portion of the text that is not yet covered by tree type frontier list tuple int param frontier a list of the locations within tree of all subtrees that have not yet been expanded and all leaves that have not yet been matched rtype tree return the tree that is licensed by production in particular given the production lhs elt 1 elt n return a tree that has a node lhs symbol and n children for each nonterminal element elt i in the production the tree token has a childless subtree with node value elt i symbol and for each terminal element elt j the tree token has a leaf token with type elt j param production the cfg production that licenses the tree token that should be returned type production production this will be matched set the level of tracing output that should be generated when parsing a text type trace int param trace the trace level a trace level of 0 will generate no tracing output and higher trace levels will produce more verbose tracing output rtype none print trace output displaying the fringe of tree the fringe of tree consists of all of its leaves and all of its childless subtrees rtype none print trace output displaying the parser s current state param operation a character identifying the operation that generated the current state rtype none stepping recursive descent parser a recursivedescentparser that allows you to step through the parsing process performing a single operation at a time the initialize method is used to start parsing a text expand expands the first element on the frontier using a single cfg production and match matches the first element on the frontier against the next text token backtrack undoes the most recent expand or match operation step performs a single expand match or backtrack operation parses returns the set of parses that have been found by the parser ivar _history a list of rtext tree frontier tripples containing the previous states of the parser this history is used to implement the backtrack operation ivar _tried_e a record of all productions that have been tried for a given tree this record is used by expand to perform the next untried production ivar _tried_m a record of what tokens have been matched for a given tree this record is used by step to decide whether or not to match a token see nltk grammar xx temporary hack warning this should be replaced with something nicer when we get the chance for pos in c treepositions leaves c pos c pos freeze start parsing a given text this sets the parser s tree to the start symbol its frontier to the root node and its remaining text to token subtokens return the portion of the text that is not yet covered by the tree rtype list str return a list of the tree locations of all subtrees that have not yet been expanded and all leaves that have not yet been matched rtype list tuple int return a partial structure for the text that is currently being parsed the elements specified by the frontier have not yet been expanded or matched rtype tree perform a single parsing operation if an untried match is possible then perform the match and return the matched token if an untried expansion is possible then perform the expansion and return the production that it is based on if backtracking is possible then backtrack and return true otherwise return none return none if no operation was performed a token if a match was performed a production if an expansion was performed and true if a backtrack operation was performed rtype production or string or bool try matching if we haven t already try expanding try backtracking nothing left to do expand the first element of the frontier in particular if the first element of the frontier is a subtree whose node type is equal to production s left hand side then add a child to that subtree for each element of production s right hand side if production is not specified then use the first untried expandable production if all expandable productions have been tried do nothing return the production used to expand the frontier if an expansion was performed if no expansion was performed return none rtype production or none make sure we can expand if they didn t specify a production check all untried ones record that we ve tried this production now try expanding we didn t expand anything match the first element of the frontier in particular if the first element of the frontier has the same type as the next text token then substitute the text token into the tree return the token matched if a match operation was performed if no match was performed return none rtype str or none record that we ve tried matching this token make sure we can match return the token we just matched return the parser to its state before the most recent match or expand operation calling undo repeatedly return the parser to successively earlier states if no match or expand operations have been performed undo will make no changes return true if an operation was successfully undone rtype bool return a list of all the productions for which expansions are available for the current parser state rtype list production make sure we can expand return a list of all the untried productions for which expansions are available for the current parser state rtype list production return whether the first element of the frontier is a token that has not yet been matched rtype bool return whether the parser s current state represents a complete parse rtype bool a stub version of _parse that sets the parsers current state to the given arguments in recursivedescentparser the _parse method is used to recursively continue parsing a text steppingrecursivedescentparser overrides it to capture these recursive calls it records the parser s old state in the history to allow for backtracking and updates the parser s new state using the given arguments finally it returns 1 which is used by match and expand to detect whether their operations were successful return 1 rtype list of int is it a good parse if so record it return an iterator of the parses that have been found by this parser so far rtype list of tree change the grammar used to parse texts param grammar the new grammar type grammar cfg demonstration code a demonstration of the recursive descent parser s np vp np det n det n pp vp v np v np pp pp p np np i n man park telescope dog det the a p in with v saw
from nltk.grammar import Nonterminal from nltk.parse.api import ParserI from nltk.tree import ImmutableTree, Tree class RecursiveDescentParser(ParserI): def __init__(self, grammar, trace=0): self._grammar = grammar self._trace = trace def grammar(self): return self._grammar def parse(self, tokens): tokens = list(tokens) self._grammar.check_coverage(tokens) start = self._grammar.start().symbol() initial_tree = Tree(start, []) frontier = [()] if self._trace: self._trace_start(initial_tree, frontier, tokens) return self._parse(tokens, initial_tree, frontier) def _parse(self, remaining_text, tree, frontier): if len(remaining_text) == 0 and len(frontier) == 0: if self._trace: self._trace_succeed(tree, frontier) yield tree elif len(frontier) == 0: if self._trace: self._trace_backtrack(tree, frontier) elif isinstance(tree[frontier[0]], Tree): yield from self._expand(remaining_text, tree, frontier) else: yield from self._match(remaining_text, tree, frontier) def _match(self, rtext, tree, frontier): tree_leaf = tree[frontier[0]] if len(rtext) > 0 and tree_leaf == rtext[0]: newtree = tree.copy(deep=True) newtree[frontier[0]] = rtext[0] if self._trace: self._trace_match(newtree, frontier[1:], rtext[0]) yield from self._parse(rtext[1:], newtree, frontier[1:]) else: if self._trace: self._trace_backtrack(tree, frontier, rtext[:1]) def _expand(self, remaining_text, tree, frontier, production=None): if production is None: productions = self._grammar.productions() else: productions = [production] for production in productions: lhs = production.lhs().symbol() if lhs == tree[frontier[0]].label(): subtree = self._production_to_tree(production) if frontier[0] == (): newtree = subtree else: newtree = tree.copy(deep=True) newtree[frontier[0]] = subtree new_frontier = [ frontier[0] + (i,) for i in range(len(production.rhs())) ] if self._trace: self._trace_expand(newtree, new_frontier, production) yield from self._parse( remaining_text, newtree, new_frontier + frontier[1:] ) def _production_to_tree(self, production): children = [] for elt in production.rhs(): if isinstance(elt, Nonterminal): children.append(Tree(elt.symbol(), [])) else: children.append(elt) return Tree(production.lhs().symbol(), children) def trace(self, trace=2): self._trace = trace def _trace_fringe(self, tree, treeloc=None): if treeloc == (): print("*", end=" ") if isinstance(tree, Tree): if len(tree) == 0: print(repr(Nonterminal(tree.label())), end=" ") for i in range(len(tree)): if treeloc is not None and i == treeloc[0]: self._trace_fringe(tree[i], treeloc[1:]) else: self._trace_fringe(tree[i]) else: print(repr(tree), end=" ") def _trace_tree(self, tree, frontier, operation): if self._trace == 2: print(" %c [" % operation, end=" ") else: print(" [", end=" ") if len(frontier) > 0: self._trace_fringe(tree, frontier[0]) else: self._trace_fringe(tree) print("]") def _trace_start(self, tree, frontier, text): print("Parsing %r" % " ".join(text)) if self._trace > 2: print("Start:") if self._trace > 1: self._trace_tree(tree, frontier, " ") def _trace_expand(self, tree, frontier, production): if self._trace > 2: print("Expand: %s" % production) if self._trace > 1: self._trace_tree(tree, frontier, "E") def _trace_match(self, tree, frontier, tok): if self._trace > 2: print("Match: %r" % tok) if self._trace > 1: self._trace_tree(tree, frontier, "M") def _trace_succeed(self, tree, frontier): if self._trace > 2: print("GOOD PARSE:") if self._trace == 1: print("Found a parse:\n%s" % tree) if self._trace > 1: self._trace_tree(tree, frontier, "+") def _trace_backtrack(self, tree, frontier, toks=None): if self._trace > 2: if toks: print("Backtrack: %r match failed" % toks[0]) else: print("Backtrack") class SteppingRecursiveDescentParser(RecursiveDescentParser): def __init__(self, grammar, trace=0): super().__init__(grammar, trace) self._rtext = None self._tree = None self._frontier = [()] self._tried_e = {} self._tried_m = {} self._history = [] self._parses = [] def _freeze(self, tree): c = tree.copy() return ImmutableTree.convert(c) def parse(self, tokens): tokens = list(tokens) self.initialize(tokens) while self.step() is not None: pass return self.parses() def initialize(self, tokens): self._rtext = tokens start = self._grammar.start().symbol() self._tree = Tree(start, []) self._frontier = [()] self._tried_e = {} self._tried_m = {} self._history = [] self._parses = [] if self._trace: self._trace_start(self._tree, self._frontier, self._rtext) def remaining_text(self): return self._rtext def frontier(self): return self._frontier def tree(self): return self._tree def step(self): if self.untried_match(): token = self.match() if token is not None: return token production = self.expand() if production is not None: return production if self.backtrack(): self._trace_backtrack(self._tree, self._frontier) return True return None def expand(self, production=None): if len(self._frontier) == 0: return None if not isinstance(self._tree[self._frontier[0]], Tree): return None if production is None: productions = self.untried_expandable_productions() else: productions = [production] parses = [] for prod in productions: self._tried_e.setdefault(self._freeze(self._tree), []).append(prod) for _result in self._expand(self._rtext, self._tree, self._frontier, prod): return prod return None def match(self): tok = self._rtext[0] self._tried_m.setdefault(self._freeze(self._tree), []).append(tok) if len(self._frontier) == 0: return None if isinstance(self._tree[self._frontier[0]], Tree): return None for _result in self._match(self._rtext, self._tree, self._frontier): return self._history[-1][0][0] return None def backtrack(self): if len(self._history) == 0: return False (self._rtext, self._tree, self._frontier) = self._history.pop() return True def expandable_productions(self): if len(self._frontier) == 0: return [] frontier_child = self._tree[self._frontier[0]] if len(self._frontier) == 0 or not isinstance(frontier_child, Tree): return [] return [ p for p in self._grammar.productions() if p.lhs().symbol() == frontier_child.label() ] def untried_expandable_productions(self): tried_expansions = self._tried_e.get(self._freeze(self._tree), []) return [p for p in self.expandable_productions() if p not in tried_expansions] def untried_match(self): if len(self._rtext) == 0: return False tried_matches = self._tried_m.get(self._freeze(self._tree), []) return self._rtext[0] not in tried_matches def currently_complete(self): return len(self._frontier) == 0 and len(self._rtext) == 0 def _parse(self, remaining_text, tree, frontier): self._history.append((self._rtext, self._tree, self._frontier)) self._rtext = remaining_text self._tree = tree self._frontier = frontier if len(frontier) == 0 and len(remaining_text) == 0: self._parses.append(tree) self._trace_succeed(self._tree, self._frontier) return [1] def parses(self): return iter(self._parses) def set_grammar(self, grammar): self._grammar = grammar def demo(): from nltk import CFG, parse grammar = CFG.fromstring( ) for prod in grammar.productions(): print(prod) sent = "I saw a man in the park".split() parser = parse.RecursiveDescentParser(grammar, trace=2) for p in parser.parse(sent): print(p) if __name__ == "__main__": demo()
natural language toolkit shiftreduce parser c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt shiftreduce parser a simple bottomup cfg parser that uses two operations shift and reduce to find a single parse for a text shiftreduceparser maintains a stack which records the structure of a portion of the text this stack is a list of strings and trees that collectively cover a portion of the text for example while parsing the sentence the dog saw the man with a typical grammar shiftreduceparser will produce the following stack which covers the dog saw np det the n dog v saw shiftreduceparser attempts to extend the stack to cover the entire text and to combine the stack elements into a single tree producing a complete parse for the sentence initially the stack is empty it is extended to cover the text from left to right by repeatedly applying two operations shift moves a token from the beginning of the text to the end of the stack reduce uses a cfg production to combine the rightmost stack elements into a single tree often more than one operation can be performed on a given stack in this case shiftreduceparser uses the following heuristics to decide which operation to perform only shift if no reductions are available if multiple reductions are available then apply the reduction whose cfg production is listed earliest in the grammar note that these heuristics are not guaranteed to choose an operation that leads to a parse of the text also if multiple parses exists shiftreduceparser will return at most one of them see nltk grammar create a new shiftreduceparser that uses grammar to parse texts type grammar grammar param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output initialize the stack trace output iterate through the text pushing the token onto the stack then reducing the stack did we reduce everything did we end up with the right category move a token from the beginning of remainingtext to the end of stack type stack liststr and tree param stack a list of strings and trees encoding the structure of the text that has been parsed so far type remainingtext liststr param remainingtext the portion of the text that is not yet covered by stack rtype none rtype bool return true if the right hand side of a cfg production matches the rightmost elements of the stack rhs matches rightmoststack if they are the same length and each element of rhs matches the corresponding element of rightmoststack a nonterminal element of rhs matches any tree whose node value is equal to the nonterminal s symbol a terminal element of rhs matches any string whose type is equal to the terminal type rhs listterminal and nonterminal param rhs the right hand side of a cfg production type rightmoststack liststring and tree param rightmoststack the rightmost elements of the parser s stack find a cfg production whose right hand side matches the rightmost stack elements and combine those stack elements into a single tree with the node specified by the production s lefthand side if more than one cfg production matches the stack then use the production that is listed earliest in the grammar the new tree replaces the elements in the stack rtype production or none return if a reduction is performed then return the cfg production that the reduction is based on otherwise return false type stack liststring and tree param stack a list of strings and trees encoding the structure of the text that has been parsed so far type remainingtext liststr param remainingtext the portion of the text that is not yet covered by stack try each production in order check if the rhs of a production matches the top of the stack combine the tree to reflect the reduction we reduced something we didn t reduce anything set the level of tracing output that should be generated when parsing a text type trace int param trace the trace level a trace level of 0 will generate no tracing output and higher trace levels will produce more verbose tracing output rtype none 1 just show shifts 2 show shifts reduces 3 display which tokens productions are shifedreduced print trace output displaying the given stack and text rtype none param marker a character that is printed to the left of the stack this is used with trace level 2 to print s before shifted stacks and r before reduced stacks print trace output displaying that a token has been shifted rtype none print trace output displaying that production was used to reduce stack rtype none check to make sure that all of the cfg productions are potentially useful if any productions can never be used then print a warning rtype none any production whose rhs is an extension of another production s rhs will never be used stepping shiftreduce parser a shiftreduceparser that allows you to setp through the parsing process performing a single operation at a time it also allows you to change the parser s grammar midway through parsing a text the initialize method is used to start parsing a text shift performs a single shift operation and reduce performs a single reduce operation step will perform a single reduce operation if possible otherwise it will perform a single shift operation parses returns the set of parses that have been found by the parser ivar history a list of stack remainingtext pairs containing all of the previous states of the parser this history is used to implement the undo operation see nltk grammar return the parser s stack rtype liststr and tree return the portion of the text that is not yet covered by the stack rtype liststr start parsing a given text this sets the parser s stack to and sets its remaining text to tokens perform a single parsing operation if a reduction is possible then perform that reduction and return the production that it is based on otherwise if a shift is possible then perform it and return true otherwise return false return false if no operation was performed true if a shift was performed and the cfg production used to reduce if a reduction was performed rtype production or bool move a token from the beginning of the remaining text to the end of the stack if there are no more tokens in the remaining text then do nothing return true if the shift operation was successful rtype bool use production to combine the rightmost stack elements into a single tree if production does not match the rightmost stack elements then do nothing return the production used to reduce the stack if a reduction was performed if no reduction was performed return none rtype production or none return the parser to its state before the most recent shift or reduce operation calling undo repeatedly return the parser to successively earlier states if no shift or reduce operations have been performed undo will make no changes return true if an operation was successfully undone rtype bool return a list of the productions for which reductions are available for the current parser state rtype listproduction return an iterator of the parses that have been found by this parser so far rtype itertree copied from nltk parser change the grammar used to parse texts param grammar the new grammar type grammar cfg demonstration code a demonstration of the shiftreduce parser s np vp np det n det n pp vp v np v np pp pp p np np i n man park telescope dog det the a p in with v saw natural language toolkit shift reduce parser c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt shift reduce parser a simple bottom up cfg parser that uses two operations shift and reduce to find a single parse for a text shiftreduceparser maintains a stack which records the structure of a portion of the text this stack is a list of strings and trees that collectively cover a portion of the text for example while parsing the sentence the dog saw the man with a typical grammar shiftreduceparser will produce the following stack which covers the dog saw np det the n dog v saw shiftreduceparser attempts to extend the stack to cover the entire text and to combine the stack elements into a single tree producing a complete parse for the sentence initially the stack is empty it is extended to cover the text from left to right by repeatedly applying two operations shift moves a token from the beginning of the text to the end of the stack reduce uses a cfg production to combine the rightmost stack elements into a single tree often more than one operation can be performed on a given stack in this case shiftreduceparser uses the following heuristics to decide which operation to perform only shift if no reductions are available if multiple reductions are available then apply the reduction whose cfg production is listed earliest in the grammar note that these heuristics are not guaranteed to choose an operation that leads to a parse of the text also if multiple parses exists shiftreduceparser will return at most one of them see nltk grammar create a new shiftreduceparser that uses grammar to parse texts type grammar grammar param grammar the grammar used to parse texts type trace int param trace the level of tracing that should be used when parsing a text 0 will generate no tracing output and higher numbers will produce more verbose tracing output initialize the stack trace output iterate through the text pushing the token onto the stack then reducing the stack did we reduce everything did we end up with the right category move a token from the beginning of remaining_text to the end of stack type stack list str and tree param stack a list of strings and trees encoding the structure of the text that has been parsed so far type remaining_text list str param remaining_text the portion of the text that is not yet covered by stack rtype none rtype bool return true if the right hand side of a cfg production matches the rightmost elements of the stack rhs matches rightmost_stack if they are the same length and each element of rhs matches the corresponding element of rightmost_stack a nonterminal element of rhs matches any tree whose node value is equal to the nonterminal s symbol a terminal element of rhs matches any string whose type is equal to the terminal type rhs list terminal and nonterminal param rhs the right hand side of a cfg production type rightmost_stack list string and tree param rightmost_stack the rightmost elements of the parser s stack find a cfg production whose right hand side matches the rightmost stack elements and combine those stack elements into a single tree with the node specified by the production s left hand side if more than one cfg production matches the stack then use the production that is listed earliest in the grammar the new tree replaces the elements in the stack rtype production or none return if a reduction is performed then return the cfg production that the reduction is based on otherwise return false type stack list string and tree param stack a list of strings and trees encoding the structure of the text that has been parsed so far type remaining_text list str param remaining_text the portion of the text that is not yet covered by stack try each production in order check if the rhs of a production matches the top of the stack combine the tree to reflect the reduction we reduced something we didn t reduce anything set the level of tracing output that should be generated when parsing a text type trace int param trace the trace level a trace level of 0 will generate no tracing output and higher trace levels will produce more verbose tracing output rtype none 1 just show shifts 2 show shifts reduces 3 display which tokens productions are shifed reduced print trace output displaying the given stack and text rtype none param marker a character that is printed to the left of the stack this is used with trace level 2 to print s before shifted stacks and r before reduced stacks print trace output displaying that a token has been shifted rtype none print trace output displaying that production was used to reduce stack rtype none check to make sure that all of the cfg productions are potentially useful if any productions can never be used then print a warning rtype none any production whose rhs is an extension of another production s rhs will never be used stepping shift reduce parser a shiftreduceparser that allows you to setp through the parsing process performing a single operation at a time it also allows you to change the parser s grammar midway through parsing a text the initialize method is used to start parsing a text shift performs a single shift operation and reduce performs a single reduce operation step will perform a single reduce operation if possible otherwise it will perform a single shift operation parses returns the set of parses that have been found by the parser ivar _history a list of stack remaining_text pairs containing all of the previous states of the parser this history is used to implement the undo operation see nltk grammar return the parser s stack rtype list str and tree return the portion of the text that is not yet covered by the stack rtype list str start parsing a given text this sets the parser s stack to and sets its remaining text to tokens perform a single parsing operation if a reduction is possible then perform that reduction and return the production that it is based on otherwise if a shift is possible then perform it and return true otherwise return false return false if no operation was performed true if a shift was performed and the cfg production used to reduce if a reduction was performed rtype production or bool move a token from the beginning of the remaining text to the end of the stack if there are no more tokens in the remaining text then do nothing return true if the shift operation was successful rtype bool use production to combine the rightmost stack elements into a single tree if production does not match the rightmost stack elements then do nothing return the production used to reduce the stack if a reduction was performed if no reduction was performed return none rtype production or none return the parser to its state before the most recent shift or reduce operation calling undo repeatedly return the parser to successively earlier states if no shift or reduce operations have been performed undo will make no changes return true if an operation was successfully undone rtype bool return a list of the productions for which reductions are available for the current parser state rtype list production return an iterator of the parses that have been found by this parser so far rtype iter tree copied from nltk parser change the grammar used to parse texts param grammar the new grammar type grammar cfg demonstration code a demonstration of the shift reduce parser s np vp np det n det n pp vp v np v np pp pp p np np i n man park telescope dog det the a p in with v saw
from nltk.grammar import Nonterminal from nltk.parse.api import ParserI from nltk.tree import Tree class ShiftReduceParser(ParserI): def __init__(self, grammar, trace=0): self._grammar = grammar self._trace = trace self._check_grammar() def grammar(self): return self._grammar def parse(self, tokens): tokens = list(tokens) self._grammar.check_coverage(tokens) stack = [] remaining_text = tokens if self._trace: print("Parsing %r" % " ".join(tokens)) self._trace_stack(stack, remaining_text) while len(remaining_text) > 0: self._shift(stack, remaining_text) while self._reduce(stack, remaining_text): pass if len(stack) == 1: if stack[0].label() == self._grammar.start().symbol(): yield stack[0] def _shift(self, stack, remaining_text): stack.append(remaining_text[0]) remaining_text.remove(remaining_text[0]) if self._trace: self._trace_shift(stack, remaining_text) def _match_rhs(self, rhs, rightmost_stack): if len(rightmost_stack) != len(rhs): return False for i in range(len(rightmost_stack)): if isinstance(rightmost_stack[i], Tree): if not isinstance(rhs[i], Nonterminal): return False if rightmost_stack[i].label() != rhs[i].symbol(): return False else: if isinstance(rhs[i], Nonterminal): return False if rightmost_stack[i] != rhs[i]: return False return True def _reduce(self, stack, remaining_text, production=None): if production is None: productions = self._grammar.productions() else: productions = [production] for production in productions: rhslen = len(production.rhs()) if self._match_rhs(production.rhs(), stack[-rhslen:]): tree = Tree(production.lhs().symbol(), stack[-rhslen:]) stack[-rhslen:] = [tree] if self._trace: self._trace_reduce(stack, production, remaining_text) return production return None def trace(self, trace=2): self._trace = trace def _trace_stack(self, stack, remaining_text, marker=" "): s = " " + marker + " [ " for elt in stack: if isinstance(elt, Tree): s += repr(Nonterminal(elt.label())) + " " else: s += repr(elt) + " " s += "* " + " ".join(remaining_text) + "]" print(s) def _trace_shift(self, stack, remaining_text): if self._trace > 2: print("Shift %r:" % stack[-1]) if self._trace == 2: self._trace_stack(stack, remaining_text, "S") elif self._trace > 0: self._trace_stack(stack, remaining_text) def _trace_reduce(self, stack, production, remaining_text): if self._trace > 2: rhs = " ".join(production.rhs()) print(f"Reduce {production.lhs()!r} <- {rhs}") if self._trace == 2: self._trace_stack(stack, remaining_text, "R") elif self._trace > 1: self._trace_stack(stack, remaining_text) def _check_grammar(self): productions = self._grammar.productions() for i in range(len(productions)): for j in range(i + 1, len(productions)): rhs1 = productions[i].rhs() rhs2 = productions[j].rhs() if rhs1[: len(rhs2)] == rhs2: print("Warning: %r will never be used" % productions[i]) class SteppingShiftReduceParser(ShiftReduceParser): def __init__(self, grammar, trace=0): super().__init__(grammar, trace) self._stack = None self._remaining_text = None self._history = [] def parse(self, tokens): tokens = list(tokens) self.initialize(tokens) while self.step(): pass return self.parses() def stack(self): return self._stack def remaining_text(self): return self._remaining_text def initialize(self, tokens): self._stack = [] self._remaining_text = tokens self._history = [] def step(self): return self.reduce() or self.shift() def shift(self): if len(self._remaining_text) == 0: return False self._history.append((self._stack[:], self._remaining_text[:])) self._shift(self._stack, self._remaining_text) return True def reduce(self, production=None): self._history.append((self._stack[:], self._remaining_text[:])) return_val = self._reduce(self._stack, self._remaining_text, production) if not return_val: self._history.pop() return return_val def undo(self): if len(self._history) == 0: return False (self._stack, self._remaining_text) = self._history.pop() return True def reducible_productions(self): productions = [] for production in self._grammar.productions(): rhslen = len(production.rhs()) if self._match_rhs(production.rhs(), self._stack[-rhslen:]): productions.append(production) return productions def parses(self): if ( len(self._remaining_text) == 0 and len(self._stack) == 1 and self._stack[0].label() == self._grammar.start().symbol() ): yield self._stack[0] def set_grammar(self, grammar): self._grammar = grammar def demo(): from nltk import CFG, parse grammar = CFG.fromstring( ) sent = "I saw a man in the park".split() parser = parse.ShiftReduceParser(grammar, trace=2) for p in parser.parse(sent): print(p) if __name__ == "__main__": demo()
natural language toolkit interface to the stanford parser c 20012023 nltk project steven xu xxustudent unimelb edu au url https www nltk org for license information see license txt interface to the stanford parser modeljarpattern rstanfordparserd dmodels jar jar rstanfordparser jar mainclass edu stanford nlp parser lexparser lexicalizedparser usestdin false doublespacedoutput false def init self pathtojarnone pathtomodelsjarnone modelpathedustanfordnlpmodelslexparserenglishpcfg ser gz encodingutf8 verbosefalse javaoptionsmx4g corenlpoptions find the most recent code and model jar stanfordjar max findjariter self jar pathtojar envvarsstanfordparser stanfordcorenlp searchpath urlstanfordurl verboseverbose isregextrue keylambda modelpath os path dirnamemodelpath modeljar max findjariter self modeljarpattern pathtomodelsjar envvarsstanfordmodels stanfordcorenlp searchpath urlstanfordurl verboseverbose isregextrue keylambda modelpath os path dirnamemodelpath self classpath stanfordjar modeljar adding logging jar files to classpath stanforddir os path splitstanfordjar0 self classpath tuplemodeljar findjarswithinpathstanforddir self modelpath modelpath self encoding encoding self corenlpoptions corenlpoptions self javaoptions javaoptions def parsetreesoutputself output res curlines curtrees blank false for line in output splitlinesfalse if line if blank res appenditercurtrees curtrees blank false elif self doublespacedoutput curtrees appendself maketreen joincurlines curlines blank true else res appenditerself maketreen joincurlines curlines else curlines appendline blank false return iterres def parsesentsself sentences verbosefalse cmd self mainclass model self modelpath sentences newline outputformat self outputformat tokenized escaper edu stanford nlp process ptbescapingprocessor return self parsetreesoutput self execute cmd n join joinsentence for sentence in sentences verbose def rawparseself sentence verbosefalse return nextself rawparsesentssentence verbose def rawparsesentsself sentences verbosefalse cmd self mainclass model self modelpath sentences newline outputformat self outputformat return self parsetreesoutput self executecmd n joinsentences verbose def taggedparseself sentence verbosefalse return nextself taggedparsesentssentence verbose def taggedparsesentsself sentences verbosefalse tagseparator cmd self mainclass model self modelpath sentences newline outputformat self outputformat tokenized tagseparator tagseparator tokenizerfactory edu stanford nlp process whitespacetokenizer tokenizermethod newcorelabeltokenizerfactory we don t need to escape slashes as splitting is done on the last instance of the character in the token return self parsetreesoutput self execute cmd n join jointagseparator jointagged for tagged in sentence for sentence in sentences verbose def executeself cmd input verbosefalse encoding self encoding cmd extendencoding encoding if self corenlpoptions cmd extendself corenlpoptions split defaultoptions joinjavaoptions configure java configjavaoptionsself javaoptions verboseverbose windows is incompatible with namedtemporaryfile without passing in deletefalse with tempfile namedtemporaryfilemodewb deletefalse as inputfile write the actual sentences to the temporary input file if isinstanceinput str and encoding input input encodeencoding inputfile writeinput inputfile flush run the tagger and get the output if self usestdin inputfile seek0 stdout stderr java cmd classpathself classpath stdininputfile stdoutpipe stderrpipe else cmd appendinputfile name stdout stderr java cmd classpathself classpath stdoutpipe stderrpipe stdout stdout replacebxc2xa0 b stdout stdout replacebx00xa0 b stdout stdout decodeencoding os unlinkinputfile name return java configurations to their default values configjavaoptionsdefaultoptions verbosefalse return stdout class stanfordparsergenericstanfordparser outputformat penn def initself args kwargs warnings warn the stanfordparser will be deprecatedn please use 03391mnltk parse corenlp corenlpparser0330m instead deprecationwarning stacklevel2 super initargs kwargs def maketreeself result return tree fromstringresult class stanforddependencyparsergenericstanfordparser outputformat conll2007 def initself args kwargs warnings warn the stanforddependencyparser will be deprecatedn please use 03391mnltk parse corenlp corenlpdependencyparser0330m instead deprecationwarning stacklevel2 super initargs kwargs def maketreeself result return dependencygraphresult toprelationlabelroot class stanfordneuraldependencyparsergenericstanfordparser outputformat conll mainclass edu stanford nlp pipeline stanfordcorenlp jar rstanfordcorenlpd d jar modeljarpattern rstanfordcorenlpd dmodels jar usestdin true doublespacedoutput true def initself args kwargs warnings warn the stanfordneuraldependencyparser will be deprecatedn please use 03391mnltk parse corenlp corenlpdependencyparser0330m instead deprecationwarning stacklevel2 super initargs kwargs self corenlpoptions annotators tokenize ssplit pos depparse def taggedparsesentsself sentences verbosefalse raise notimplementederror taggedparsesents is not supported by stanfordneuraldependencyparser use parsesents or rawparsesents instead def maketreeself result return dependencygraphresult toprelationlabelroot natural language toolkit interface to the stanford parser c 2001 2023 nltk project steven xu xxu student unimelb edu au url https www nltk org for license information see license txt interface to the stanford parser find the most recent code and model jar self _classpath stanford_jar model_jar adding logging jar files to classpath use stanfordparser to parse multiple sentences takes multiple sentences as a list where each sentence is a list of words each sentence will be automatically tagged with this stanfordparser instance s tagger if whitespaces exists inside a token then the token will be treated as separate tokens param sentences input sentences to parse type sentences list list str rtype iter iter tree use stanfordparser to parse a sentence takes a sentence as a string before parsing it will be automatically tokenized and tagged by the stanford parser param sentence input sentence to parse type sentence str rtype iter tree use stanfordparser to parse multiple sentences takes multiple sentences as a list of strings each sentence will be automatically tokenized and tagged by the stanford parser param sentences input sentences to parse type sentences list str rtype iter iter tree use stanfordparser to parse a sentence takes a sentence as a list of word tag tuples the sentence must have already been tokenized and tagged param sentence input sentence to parse type sentence list tuple str str rtype iter tree use stanfordparser to parse multiple sentences takes multiple sentences where each sentence is a list of word tag tuples the sentences must have already been tokenized and tagged param sentences input sentences to parse type sentences list list tuple str str rtype iter iter tree we don t need to escape slashes as splitting is done on the last instance of the character in the token configure java windows is incompatible with namedtemporaryfile without passing in delete false write the actual sentences to the temporary input file run the tagger and get the output return java configurations to their default values parser stanfordparser model_path edu stanford nlp models lexparser englishpcfg ser gz doctest skip list parser raw_parse the quick brown fox jumps over the lazy dog doctest normalize_whitespace skip tree root tree np tree np tree dt the tree jj quick tree jj brown tree nn fox tree np tree np tree nns jumps tree pp tree in over tree np tree dt the tree jj lazy tree nn dog sum list dep_graphs for dep_graphs in parser raw_parse_sents the quick brown fox jumps over the lazy dog the quick grey wolf jumps over the lazy fox doctest normalize_whitespace skip tree root tree np tree np tree dt the tree jj quick tree jj brown tree nn fox tree np tree np tree nns jumps tree pp tree in over tree np tree dt the tree jj lazy tree nn dog tree root tree np tree np tree dt the tree jj quick tree jj grey tree nn wolf tree np tree np tree nns jumps tree pp tree in over tree np tree dt the tree jj lazy tree nn fox sum list dep_graphs for dep_graphs in parser parse_sents i m a dog split this is my friends cat the tabby split doctest normalize_whitespace skip tree root tree s tree np tree prp i tree vp tree vbp m tree np tree dt a tree nn dog tree root tree s tree np tree dt this tree vp tree vbz is tree np tree np tree np tree prp my tree nns friends tree pos tree nn cat tree prn tree lrb tree tree np tree dt the tree nn tabby tree rrb sum list dep_graphs for dep_graphs in parser tagged_parse_sents the dt quick jj brown jj fox nn jumped vbd over in the dt lazy jj dog nn doctest normalize_whitespace skip tree root tree s tree np tree dt the tree jj quick tree jj brown tree nn fox tree vp tree vbd jumped tree pp tree in over tree np tree dt the tree jj lazy tree nn dog tree dep_parser stanforddependencyparser model_path edu stanford nlp models lexparser englishpcfg ser gz doctest skip parse tree for parse in dep_parser raw_parse the quick brown fox jumps over the lazy dog doctest normalize_whitespace skip tree jumps tree fox the quick brown tree dog over the lazy list parse triples for parse in dep_parser raw_parse the quick brown fox jumps over the lazy dog doctest normalize_whitespace skip u jumps u vbz u nsubj u fox u nn u fox u nn u det u the u dt u fox u nn u amod u quick u jj u fox u nn u amod u brown u jj u jumps u vbz u nmod u dog u nn u dog u nn u case u over u in u dog u nn u det u the u dt u dog u nn u amod u lazy u jj sum parse tree for parse in dep_graphs for dep_graphs in dep_parser raw_parse_sents the quick brown fox jumps over the lazy dog the quick grey wolf jumps over the lazy fox doctest normalize_whitespace skip tree jumps tree fox the quick brown tree dog over the lazy tree jumps tree wolf the quick grey tree fox over the lazy sum parse tree for parse in dep_graphs for dep_graphs in dep_parser parse_sents i m a dog split this is my friends cat the tabby split doctest normalize_whitespace skip tree dog i m a tree cat this is tree friends my tree tabby the sum list parse triples for parse in dep_graphs for dep_graphs in dep_parser tagged_parse_sents the dt quick jj brown jj fox nn jumped vbd over in the dt lazy jj dog nn doctest normalize_whitespace skip u jumped u vbd u nsubj u fox u nn u fox u nn u det u the u dt u fox u nn u amod u quick u jj u fox u nn u amod u brown u jj u jumped u vbd u nmod u dog u nn u dog u nn u case u over u in u dog u nn u det u the u dt u dog u nn u amod u lazy u jj from nltk parse stanford import stanfordneuraldependencyparser doctest skip dep_parser stanfordneuraldependencyparser java_options mx4g doctest skip parse tree for parse in dep_parser raw_parse the quick brown fox jumps over the lazy dog doctest normalize_whitespace skip tree jumps tree fox the quick brown tree dog over the lazy list parse triples for parse in dep_parser raw_parse the quick brown fox jumps over the lazy dog doctest normalize_whitespace skip u jumps u vbz u nsubj u fox u nn u fox u nn u det u the u dt u fox u nn u amod u quick u jj u fox u nn u amod u brown u jj u jumps u vbz u nmod u dog u nn u dog u nn u case u over u in u dog u nn u det u the u dt u dog u nn u amod u lazy u jj u jumps u vbz u punct u u sum parse tree for parse in dep_graphs for dep_graphs in dep_parser raw_parse_sents the quick brown fox jumps over the lazy dog the quick grey wolf jumps over the lazy fox doctest normalize_whitespace skip tree jumps tree fox the quick brown tree dog over the lazy tree jumps tree wolf the quick grey tree fox over the lazy sum parse tree for parse in dep_graphs for dep_graphs in dep_parser parse_sents i m a dog split this is my friends cat the tabby split doctest normalize_whitespace skip tree dog i m a tree cat this is tree friends my tree tabby lrb the rrb currently unimplemented because the neural dependency parser and the stanfordcorenlp pipeline class doesn t support passing in pre tagged tokens
import os import tempfile import warnings from subprocess import PIPE from nltk.internals import ( _java_options, config_java, find_jar_iter, find_jars_within_path, java, ) from nltk.parse.api import ParserI from nltk.parse.dependencygraph import DependencyGraph from nltk.tree import Tree _stanford_url = "https://nlp.stanford.edu/software/lex-parser.shtml" class GenericStanfordParser(ParserI): _MODEL_JAR_PATTERN = r"stanford-parser-(\d+)(\.(\d+))+-models\.jar" _JAR = r"stanford-parser\.jar" _MAIN_CLASS = "edu.stanford.nlp.parser.lexparser.LexicalizedParser" _USE_STDIN = False _DOUBLE_SPACED_OUTPUT = False def __init__( self, path_to_jar=None, path_to_models_jar=None, model_path="edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz", encoding="utf8", verbose=False, java_options="-mx4g", corenlp_options="", ): stanford_jar = max( find_jar_iter( self._JAR, path_to_jar, env_vars=("STANFORD_PARSER", "STANFORD_CORENLP"), searchpath=(), url=_stanford_url, verbose=verbose, is_regex=True, ), key=lambda model_path: os.path.dirname(model_path), ) model_jar = max( find_jar_iter( self._MODEL_JAR_PATTERN, path_to_models_jar, env_vars=("STANFORD_MODELS", "STANFORD_CORENLP"), searchpath=(), url=_stanford_url, verbose=verbose, is_regex=True, ), key=lambda model_path: os.path.dirname(model_path), ) stanford_dir = os.path.split(stanford_jar)[0] self._classpath = tuple([model_jar] + find_jars_within_path(stanford_dir)) self.model_path = model_path self._encoding = encoding self.corenlp_options = corenlp_options self.java_options = java_options def _parse_trees_output(self, output_): res = [] cur_lines = [] cur_trees = [] blank = False for line in output_.splitlines(False): if line == "": if blank: res.append(iter(cur_trees)) cur_trees = [] blank = False elif self._DOUBLE_SPACED_OUTPUT: cur_trees.append(self._make_tree("\n".join(cur_lines))) cur_lines = [] blank = True else: res.append(iter([self._make_tree("\n".join(cur_lines))])) cur_lines = [] else: cur_lines.append(line) blank = False return iter(res) def parse_sents(self, sentences, verbose=False): cmd = [ self._MAIN_CLASS, "-model", self.model_path, "-sentences", "newline", "-outputFormat", self._OUTPUT_FORMAT, "-tokenized", "-escaper", "edu.stanford.nlp.process.PTBEscapingProcessor", ] return self._parse_trees_output( self._execute( cmd, "\n".join(" ".join(sentence) for sentence in sentences), verbose ) ) def raw_parse(self, sentence, verbose=False): return next(self.raw_parse_sents([sentence], verbose)) def raw_parse_sents(self, sentences, verbose=False): cmd = [ self._MAIN_CLASS, "-model", self.model_path, "-sentences", "newline", "-outputFormat", self._OUTPUT_FORMAT, ] return self._parse_trees_output( self._execute(cmd, "\n".join(sentences), verbose) ) def tagged_parse(self, sentence, verbose=False): return next(self.tagged_parse_sents([sentence], verbose)) def tagged_parse_sents(self, sentences, verbose=False): tag_separator = "/" cmd = [ self._MAIN_CLASS, "-model", self.model_path, "-sentences", "newline", "-outputFormat", self._OUTPUT_FORMAT, "-tokenized", "-tagSeparator", tag_separator, "-tokenizerFactory", "edu.stanford.nlp.process.WhitespaceTokenizer", "-tokenizerMethod", "newCoreLabelTokenizerFactory", ] return self._parse_trees_output( self._execute( cmd, "\n".join( " ".join(tag_separator.join(tagged) for tagged in sentence) for sentence in sentences ), verbose, ) ) def _execute(self, cmd, input_, verbose=False): encoding = self._encoding cmd.extend(["-encoding", encoding]) if self.corenlp_options: cmd.extend(self.corenlp_options.split()) default_options = " ".join(_java_options) config_java(options=self.java_options, verbose=verbose) with tempfile.NamedTemporaryFile(mode="wb", delete=False) as input_file: if isinstance(input_, str) and encoding: input_ = input_.encode(encoding) input_file.write(input_) input_file.flush() if self._USE_STDIN: input_file.seek(0) stdout, stderr = java( cmd, classpath=self._classpath, stdin=input_file, stdout=PIPE, stderr=PIPE, ) else: cmd.append(input_file.name) stdout, stderr = java( cmd, classpath=self._classpath, stdout=PIPE, stderr=PIPE ) stdout = stdout.replace(b"\xc2\xa0", b" ") stdout = stdout.replace(b"\x00\xa0", b" ") stdout = stdout.decode(encoding) os.unlink(input_file.name) config_java(options=default_options, verbose=False) return stdout class StanfordParser(GenericStanfordParser): _OUTPUT_FORMAT = "penn" def __init__(self, *args, **kwargs): warnings.warn( "The StanfordParser will be deprecated\n" "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead.", DeprecationWarning, stacklevel=2, ) super().__init__(*args, **kwargs) def _make_tree(self, result): return Tree.fromstring(result) class StanfordDependencyParser(GenericStanfordParser): _OUTPUT_FORMAT = "conll2007" def __init__(self, *args, **kwargs): warnings.warn( "The StanfordDependencyParser will be deprecated\n" "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", DeprecationWarning, stacklevel=2, ) super().__init__(*args, **kwargs) def _make_tree(self, result): return DependencyGraph(result, top_relation_label="root") class StanfordNeuralDependencyParser(GenericStanfordParser): _OUTPUT_FORMAT = "conll" _MAIN_CLASS = "edu.stanford.nlp.pipeline.StanfordCoreNLP" _JAR = r"stanford-corenlp-(\d+)(\.(\d+))+\.jar" _MODEL_JAR_PATTERN = r"stanford-corenlp-(\d+)(\.(\d+))+-models\.jar" _USE_STDIN = True _DOUBLE_SPACED_OUTPUT = True def __init__(self, *args, **kwargs): warnings.warn( "The StanfordNeuralDependencyParser will be deprecated\n" "Please use \033[91mnltk.parse.corenlp.CoreNLPDependencyParser\033[0m instead.", DeprecationWarning, stacklevel=2, ) super().__init__(*args, **kwargs) self.corenlp_options += "-annotators tokenize,ssplit,pos,depparse" def tagged_parse_sents(self, sentences, verbose=False): raise NotImplementedError( "tagged_parse[_sents] is not supported by " "StanfordNeuralDependencyParser; use " "parse[_sents] or raw_parse[_sents] instead." ) def _make_tree(self, result): return DependencyGraph(result, top_relation_label="ROOT")
natural language toolkit semantic interpretation c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt nltk semantic interpretation package this package contains classes for representing semantic structure in formulas of firstorder logic and for evaluating such formulas in settheoretic models from nltk sem import logic logic counter value 0 the package has two main components logic provides support for analyzing expressions of first order logic fol evaluate allows users to recursively determine truth in a model for formulas of fol a model consists of a domain of discourse and a valuation function which assigns values to nonlogical constants we assume that entities in the domain are represented as strings such as b1 g1 etc a valuation is initialized with a list of symbol value pairs where values are entities sets of entities or sets of tuples of entities the domain of discourse can be inferred from the valuation and model is then created with domain and valuation as parameters from nltk sem import valuation model v adam b1 betty g1 fido d1 girl set g1 g2 boy set b1 b2 dog set d1 love set b1 g1 b2 g2 g1 b1 g2 b1 val valuationv dom val domain m modeldom val from nltk sem glue import glue from nltk sem hole import holesemantics from nltk sem cooperstorage import cooperstore don t import chat80 as its names are too generic natural language toolkit semantic interpretation c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt nltk semantic interpretation package this package contains classes for representing semantic structure in formulas of first order logic and for evaluating such formulas in set theoretic models from nltk sem import logic logic _counter _value 0 the package has two main components logic provides support for analyzing expressions of first order logic fol evaluate allows users to recursively determine truth in a model for formulas of fol a model consists of a domain of discourse and a valuation function which assigns values to non logical constants we assume that entities in the domain are represented as strings such as b1 g1 etc a valuation is initialized with a list of symbol value pairs where values are entities sets of entities or sets of tuples of entities the domain of discourse can be inferred from the valuation and model is then created with domain and valuation as parameters from nltk sem import valuation model v adam b1 betty g1 fido d1 girl set g1 g2 boy set b1 b2 dog set d1 love set b1 g1 b2 g2 g1 b1 g2 b1 val valuation v dom val domain m model dom val from nltk sem glue import glue from nltk sem hole import holesemantics from nltk sem cooper_storage import cooperstore don t import chat80 as its names are too generic
from nltk.sem.boxer import Boxer from nltk.sem.drt import DRS, DrtExpression from nltk.sem.evaluate import ( Assignment, Model, Undefined, Valuation, arity, is_rel, read_valuation, set2rel, ) from nltk.sem.lfg import FStructure from nltk.sem.logic import ( ApplicationExpression, Expression, LogicalExpressionException, Variable, binding_ops, boolean_ops, equality_preds, read_logic, ) from nltk.sem.relextract import clause, extract_rels, rtuple from nltk.sem.skolemize import skolemize from nltk.sem.util import evaluate_sents, interpret_sents, parse_sents, root_semrep
natural language toolkit interface to boxer http svn ask it usyd edu autraccandcwikiboxer dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt an interface to boxer this interface relies on the latest version of the development subversion version of cc and boxer usage set the environment variable candc to the bin directory of your candc installation the models directory should be in the candc root directory for example pathtocandc bin candc boxer models boxer this class is an interface to johan bos s program boxer a widecoverage semantic parser that produces discourse representation structures drss param boxerdrsinterpreter a class that converts from the abstractboxerdrs object hierarchy to a different object the default is nltkdrtboxerdrsinterpreter which converts to the nltk drt hierarchy param elimeq when set to true boxer removes all equalities from the drss and discourse referents standing in the equality relation are unified but only if this can be done in a meaningpreserving manner param resolve when set to true boxer will resolve all anaphoric drss and perform mergereduction resolution follows van der sandt s theory of binding and accommodation use boxer to give a first order representation param input str input sentence to parse param occurindex bool should predicates be occurrence indexed param discourseid str an identifier to be inserted to each occurrenceindexed predicate return drt drtexpression use boxer to give a first order representation param input list of str input sentences to parse as a single discourse param occurindex bool should predicates be occurrence indexed param discourseid str an identifier to be inserted to each occurrenceindexed predicate return drt drtexpression use boxer to give a first order representation param inputs list of str input sentences to parse as individual discourses param occurindex bool should predicates be occurrence indexed param discourseids list of str identifiers to be inserted to each occurrenceindexed predicate return list of drt drtexpression use boxer to give a first order representation param inputs list of list of str input discourses to parse param occurindex bool should predicates be occurrence indexed param discourseids list of str identifiers to be inserted to each occurrenceindexed predicate return drt drtexpression if error input file contains no ccg2 terms in boxerout raise unparseableinputexception could not parse with candc s inputstr call the candc binary with the given input param inputs list of list of str input discourses to parse param discourseids list of str identifiers to be inserted to each occurrenceindexed predicate param filename str a filename for the output file return stdout call the boxer binary with the given input param candcout str output from cc parser return stdout flat false removed from boxer call the binary with the given input param inputstr a string whose contents are used as stdin param binary the location of the binary to call param args a list of commandline arguments return stdout call via a subprocess this class is used to parse the prolog drs output from boxer into a hierarchy of python objects parse a drs condition return list of drtexpression handle a drs condition param indices list of int return list of drtexpression predg3943 dog n 0 duplexwhq drs var drs self asserttokenself token while self token0 cat self token self asserttokenself token if cat des anstypes appendself token elif cat num anstypes append number typ self token if typ cou anstypes append count else anstypes appendtyp else anstypes appendself token self token swallow the namedx0 john per 0 relg3993 g3943 agent 0 timexg18322 date xxxx 1004 04 xx date xxxx 1004 04 xx xxxx 1004 04 xx time1018 18 xx xx cardg18535 28 ge propg15949 drs 1001 1002 drs1001 g3943 1002 predg3943 dog n 0 return list of sentindex wordindices tuples reparse the str form of subclasses of abstractboxerdrs if tok drs self assertnexttokendrttokens open label intself token self assertnexttokendrttokens comma refs listmapint self handlerefs self assertnexttokendrttokens comma conds self handlecondsnone self assertnexttokendrttokens close return boxerdrslabel refs conds return setvariables setevents setpropositions return setvariables setevents setpropositions param ex abstractboxerdrs return drtexpression natural language toolkit interface to boxer http svn ask it usyd edu au trac candc wiki boxer dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt an interface to boxer this interface relies on the latest version of the development subversion version of c c and boxer usage set the environment variable candc to the bin directory of your candc installation the models directory should be in the candc root directory for example path to candc bin candc boxer models boxer this class is an interface to johan bos s program boxer a wide coverage semantic parser that produces discourse representation structures drss param boxer_drs_interpreter a class that converts from the abstractboxerdrs object hierarchy to a different object the default is nltkdrtboxerdrsinterpreter which converts to the nltk drt hierarchy param elimeq when set to true boxer removes all equalities from the drss and discourse referents standing in the equality relation are unified but only if this can be done in a meaning preserving manner param resolve when set to true boxer will resolve all anaphoric drss and perform merge reduction resolution follows van der sandt s theory of binding and accommodation use boxer to give a first order representation param input str input sentence to parse param occur_index bool should predicates be occurrence indexed param discourse_id str an identifier to be inserted to each occurrence indexed predicate return drt drtexpression use boxer to give a first order representation param input list of str input sentences to parse as a single discourse param occur_index bool should predicates be occurrence indexed param discourse_id str an identifier to be inserted to each occurrence indexed predicate return drt drtexpression use boxer to give a first order representation param inputs list of str input sentences to parse as individual discourses param occur_index bool should predicates be occurrence indexed param discourse_ids list of str identifiers to be inserted to each occurrence indexed predicate return list of drt drtexpression use boxer to give a first order representation param inputs list of list of str input discourses to parse param occur_index bool should predicates be occurrence indexed param discourse_ids list of str identifiers to be inserted to each occurrence indexed predicate return drt drtexpression if error input file contains no ccg 2 terms in boxer_out raise unparseableinputexception could not parse with candc s input_str call the candc binary with the given input param inputs list of list of str input discourses to parse param discourse_ids list of str identifiers to be inserted to each occurrence indexed predicate param filename str a filename for the output file return stdout call the boxer binary with the given input param candc_out str output from c c parser return stdout flat false removed from boxer call the binary with the given input param input_str a string whose contents are used as stdin param binary the location of the binary to call param args a list of command line arguments return stdout call via a subprocess this class is used to parse the prolog drs output from boxer into a hierarchy of python objects parse a drs condition return list of drtexpression handle a drs condition param indices list of int return list of drtexpression pred _g3943 dog n 0 duplex whq drs var drs self asserttoken self token while self token 0 cat self token self asserttoken self token if cat des ans_types append self token elif cat num ans_types append number typ self token if typ cou ans_types append count else ans_types append typ else ans_types append self token self token swallow the named x0 john per 0 as per boxer rev 2554 rel _g3993 _g3943 agent 0 timex _g18322 date xxxx 1004 04 xx date xxxx 1004 04 xx xxxx 1004 04 xx time 1018 18 xx xx card _g18535 28 ge prop _g15949 drs 1001 1002 swallow swallow drs 1001 _g3943 1002 pred _g3943 dog n 0 swallow swallow swallow swallow swallow the return list of sent_index word_indices tuples reparse the str form of subclasses of abstractboxerdrs if tok drs self assertnexttoken drttokens open label int self token self assertnexttoken drttokens comma refs list map int self handle_refs self assertnexttoken drttokens comma conds self handle_conds none self assertnexttoken drttokens close return boxerdrs label refs conds return set variables set events set propositions return set variables set events set propositions param ex abstractboxerdrs return drtexpression
import operator import os import re import subprocess import tempfile from functools import reduce from optparse import OptionParser from nltk.internals import find_binary from nltk.sem.drt import ( DRS, DrtApplicationExpression, DrtEqualityExpression, DrtNegatedExpression, DrtOrExpression, DrtParser, DrtProposition, DrtTokens, DrtVariableExpression, ) from nltk.sem.logic import ( ExpectedMoreTokensException, LogicalExpressionException, UnexpectedTokenException, Variable, ) class Boxer: def __init__( self, boxer_drs_interpreter=None, elimeq=False, bin_dir=None, verbose=False, resolve=True, ): if boxer_drs_interpreter is None: boxer_drs_interpreter = NltkDrtBoxerDrsInterpreter() self._boxer_drs_interpreter = boxer_drs_interpreter self._resolve = resolve self._elimeq = elimeq self.set_bin_dir(bin_dir, verbose) def set_bin_dir(self, bin_dir, verbose=False): self._candc_bin = self._find_binary("candc", bin_dir, verbose) self._candc_models_path = os.path.normpath( os.path.join(self._candc_bin[:-5], "../models") ) self._boxer_bin = self._find_binary("boxer", bin_dir, verbose) def interpret(self, input, discourse_id=None, question=False, verbose=False): discourse_ids = [discourse_id] if discourse_id is not None else None (d,) = self.interpret_multi_sents([[input]], discourse_ids, question, verbose) if not d: raise Exception(f'Unable to interpret: "{input}"') return d def interpret_multi(self, input, discourse_id=None, question=False, verbose=False): discourse_ids = [discourse_id] if discourse_id is not None else None (d,) = self.interpret_multi_sents([input], discourse_ids, question, verbose) if not d: raise Exception(f'Unable to interpret: "{input}"') return d def interpret_sents( self, inputs, discourse_ids=None, question=False, verbose=False ): return self.interpret_multi_sents( [[input] for input in inputs], discourse_ids, question, verbose ) def interpret_multi_sents( self, inputs, discourse_ids=None, question=False, verbose=False ): if discourse_ids is not None: assert len(inputs) == len(discourse_ids) assert reduce(operator.and_, (id is not None for id in discourse_ids)) use_disc_id = True else: discourse_ids = list(map(str, range(len(inputs)))) use_disc_id = False candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose) boxer_out = self._call_boxer(candc_out, verbose=verbose) drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id) return [drs_dict.get(id, None) for id in discourse_ids] def _call_candc(self, inputs, discourse_ids, question, verbose=False): args = [ "--models", os.path.join(self._candc_models_path, ["boxer", "questions"][question]), "--candc-printer", "boxer", ] return self._call( "\n".join( sum( ([f"<META>'{id}'"] + d for d, id in zip(inputs, discourse_ids)), [], ) ), self._candc_bin, args, verbose, ) def _call_boxer(self, candc_out, verbose=False): f = None try: fd, temp_filename = tempfile.mkstemp( prefix="boxer-", suffix=".in", text=True ) f = os.fdopen(fd, "w") f.write(candc_out.decode("utf-8")) finally: if f: f.close() args = [ "--box", "false", "--semantics", "drs", "--resolve", ["false", "true"][self._resolve], "--elimeq", ["false", "true"][self._elimeq], "--format", "prolog", "--instantiate", "true", "--input", temp_filename, ] stdout = self._call(None, self._boxer_bin, args, verbose) os.remove(temp_filename) return stdout def _find_binary(self, name, bin_dir, verbose=False): return find_binary( name, path_to_bin=bin_dir, env_vars=["CANDC"], url="http://svn.ask.it.usyd.edu.au/trac/candc/", binary_names=[name, name + ".exe"], verbose=verbose, ) def _call(self, input_str, binary, args=[], verbose=False): if verbose: print("Calling:", binary) print("Args:", args) print("Input:", input_str) print("Command:", binary + " " + " ".join(args)) if input_str is None: cmd = [binary] + args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: cmd = 'echo "{}" | {} {}'.format(input_str, binary, " ".join(args)) p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) stdout, stderr = p.communicate() if verbose: print("Return code:", p.returncode) if stdout: print("stdout:\n", stdout, "\n") if stderr: print("stderr:\n", stderr, "\n") if p.returncode != 0: raise Exception( "ERROR CALLING: {} {}\nReturncode: {}\n{}".format( binary, " ".join(args), p.returncode, stderr ) ) return stdout def _parse_to_drs_dict(self, boxer_out, use_disc_id): lines = boxer_out.decode("utf-8").split("\n") drs_dict = {} i = 0 while i < len(lines): line = lines[i] if line.startswith("id("): comma_idx = line.index(",") discourse_id = line[3:comma_idx] if discourse_id[0] == "'" and discourse_id[-1] == "'": discourse_id = discourse_id[1:-1] drs_id = line[comma_idx + 1 : line.index(")")] i += 1 line = lines[i] assert line.startswith(f"sem({drs_id},") if line[-4:] == "').'": line = line[:-4] + ")." assert line.endswith(")."), f"can't parse line: {line}" search_start = len(f"sem({drs_id},[") brace_count = 1 drs_start = -1 for j, c in enumerate(line[search_start:]): if c == "[": brace_count += 1 if c == "]": brace_count -= 1 if brace_count == 0: drs_start = search_start + j + 1 if line[drs_start : drs_start + 3] == "','": drs_start = drs_start + 3 else: drs_start = drs_start + 1 break assert drs_start > -1 drs_input = line[drs_start:-2].strip() parsed = self._parse_drs(drs_input, discourse_id, use_disc_id) drs_dict[discourse_id] = self._boxer_drs_interpreter.interpret(parsed) i += 1 return drs_dict def _parse_drs(self, drs_string, discourse_id, use_disc_id): return BoxerOutputDrsParser([None, discourse_id][use_disc_id]).parse(drs_string) class BoxerOutputDrsParser(DrtParser): def __init__(self, discourse_id=None): DrtParser.__init__(self) self.discourse_id = discourse_id self.sentence_id_offset = None self.quote_chars = [("'", "'", "\\", False)] def parse(self, data, signature=None): return DrtParser.parse(self, data, signature) def get_all_symbols(self): return ["(", ")", ",", "[", "]", ":"] def handle(self, tok, context): return self.handle_drs(tok) def attempt_adjuncts(self, expression, context): return expression def parse_condition(self, indices): tok = self.token() accum = self.handle_condition(tok, indices) if accum is None: raise UnexpectedTokenException(tok) return accum def handle_drs(self, tok): if tok == "drs": return self.parse_drs() elif tok in ["merge", "smerge"]: return self._handle_binary_expression(self._make_merge_expression)(None, []) elif tok in ["alfa"]: return self._handle_alfa(self._make_merge_expression)(None, []) def handle_condition(self, tok, indices): if tok == "not": return [self._handle_not()] if tok == "or": conds = [self._handle_binary_expression(self._make_or_expression)] elif tok == "imp": conds = [self._handle_binary_expression(self._make_imp_expression)] elif tok == "eq": conds = [self._handle_eq()] elif tok == "prop": conds = [self._handle_prop()] elif tok == "pred": conds = [self._handle_pred()] elif tok == "named": conds = [self._handle_named()] elif tok == "rel": conds = [self._handle_rel()] elif tok == "timex": conds = self._handle_timex() elif tok == "card": conds = [self._handle_card()] elif tok == "whq": conds = [self._handle_whq()] elif tok == "duplex": conds = [self._handle_duplex()] else: conds = [] return sum( ( [cond(sent_index, word_indices) for cond in conds] for sent_index, word_indices in self._sent_and_word_indices(indices) ), [], ) def _handle_not(self): self.assertToken(self.token(), "(") drs = self.process_next_expression(None) self.assertToken(self.token(), ")") return BoxerNot(drs) def _handle_pred(self): self.assertToken(self.token(), "(") variable = self.parse_variable() self.assertToken(self.token(), ",") name = self.token() self.assertToken(self.token(), ",") pos = self.token() self.assertToken(self.token(), ",") sense = int(self.token()) self.assertToken(self.token(), ")") def _handle_pred_f(sent_index, word_indices): return BoxerPred( self.discourse_id, sent_index, word_indices, variable, name, pos, sense ) return _handle_pred_f def _handle_duplex(self): self.assertToken(self.token(), "(") ans_types = [] self.assertToken(self.token(), "whq") self.assertToken(self.token(), ",") d1 = self.process_next_expression(None) self.assertToken(self.token(), ",") ref = self.parse_variable() self.assertToken(self.token(), ",") d2 = self.process_next_expression(None) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerWhq( self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 ) def _handle_named(self): self.assertToken(self.token(), "(") variable = self.parse_variable() self.assertToken(self.token(), ",") name = self.token() self.assertToken(self.token(), ",") type = self.token() self.assertToken(self.token(), ",") sense = self.token() self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerNamed( self.discourse_id, sent_index, word_indices, variable, name, type, sense ) def _handle_rel(self): self.assertToken(self.token(), "(") var1 = self.parse_variable() self.assertToken(self.token(), ",") var2 = self.parse_variable() self.assertToken(self.token(), ",") rel = self.token() self.assertToken(self.token(), ",") sense = int(self.token()) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerRel( self.discourse_id, sent_index, word_indices, var1, var2, rel, sense ) def _handle_timex(self): self.assertToken(self.token(), "(") arg = self.parse_variable() self.assertToken(self.token(), ",") new_conds = self._handle_time_expression(arg) self.assertToken(self.token(), ")") return new_conds def _handle_time_expression(self, arg): tok = self.token() self.assertToken(self.token(), "(") if tok == "date": conds = self._handle_date(arg) elif tok == "time": conds = self._handle_time(arg) else: return None self.assertToken(self.token(), ")") return [ lambda sent_index, word_indices: BoxerPred( self.discourse_id, sent_index, word_indices, arg, tok, "n", 0 ) ] + [lambda sent_index, word_indices: cond for cond in conds] def _handle_date(self, arg): conds = [] ((sent_index, word_indices),) = self._sent_and_word_indices( self._parse_index_list() ) self.assertToken(self.token(), "(") pol = self.token() self.assertToken(self.token(), ")") conds.append( BoxerPred( self.discourse_id, sent_index, word_indices, arg, f"date_pol_{pol}", "a", 0, ) ) self.assertToken(self.token(), ",") ((sent_index, word_indices),) = self._sent_and_word_indices( self._parse_index_list() ) year = self.token() if year != "XXXX": year = year.replace(":", "_") conds.append( BoxerPred( self.discourse_id, sent_index, word_indices, arg, f"date_year_{year}", "a", 0, ) ) self.assertToken(self.token(), ",") ((sent_index, word_indices),) = self._sent_and_word_indices( self._parse_index_list() ) month = self.token() if month != "XX": conds.append( BoxerPred( self.discourse_id, sent_index, word_indices, arg, f"date_month_{month}", "a", 0, ) ) self.assertToken(self.token(), ",") ((sent_index, word_indices),) = self._sent_and_word_indices( self._parse_index_list() ) day = self.token() if day != "XX": conds.append( BoxerPred( self.discourse_id, sent_index, word_indices, arg, f"date_day_{day}", "a", 0, ) ) return conds def _handle_time(self, arg): conds = [] self._parse_index_list() hour = self.token() if hour != "XX": conds.append(self._make_atom("r_hour_2", arg, hour)) self.assertToken(self.token(), ",") self._parse_index_list() min = self.token() if min != "XX": conds.append(self._make_atom("r_min_2", arg, min)) self.assertToken(self.token(), ",") self._parse_index_list() sec = self.token() if sec != "XX": conds.append(self._make_atom("r_sec_2", arg, sec)) return conds def _handle_card(self): self.assertToken(self.token(), "(") variable = self.parse_variable() self.assertToken(self.token(), ",") value = self.token() self.assertToken(self.token(), ",") type = self.token() self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerCard( self.discourse_id, sent_index, word_indices, variable, value, type ) def _handle_prop(self): self.assertToken(self.token(), "(") variable = self.parse_variable() self.assertToken(self.token(), ",") drs = self.process_next_expression(None) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerProp( self.discourse_id, sent_index, word_indices, variable, drs ) def _parse_index_list(self): indices = [] self.assertToken(self.token(), "[") while self.token(0) != "]": indices.append(self.parse_index()) if self.token(0) == ",": self.token() self.token() self.assertToken(self.token(), ":") return indices def parse_drs(self): self.assertToken(self.token(), "(") self.assertToken(self.token(), "[") refs = set() while self.token(0) != "]": indices = self._parse_index_list() refs.add(self.parse_variable()) if self.token(0) == ",": self.token() self.token() self.assertToken(self.token(), ",") self.assertToken(self.token(), "[") conds = [] while self.token(0) != "]": indices = self._parse_index_list() conds.extend(self.parse_condition(indices)) if self.token(0) == ",": self.token() self.token() self.assertToken(self.token(), ")") return BoxerDrs(list(refs), conds) def _handle_binary_expression(self, make_callback): self.assertToken(self.token(), "(") drs1 = self.process_next_expression(None) self.assertToken(self.token(), ",") drs2 = self.process_next_expression(None) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: make_callback( sent_index, word_indices, drs1, drs2 ) def _handle_alfa(self, make_callback): self.assertToken(self.token(), "(") type = self.token() self.assertToken(self.token(), ",") drs1 = self.process_next_expression(None) self.assertToken(self.token(), ",") drs2 = self.process_next_expression(None) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: make_callback( sent_index, word_indices, drs1, drs2 ) def _handle_eq(self): self.assertToken(self.token(), "(") var1 = self.parse_variable() self.assertToken(self.token(), ",") var2 = self.parse_variable() self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerEq( self.discourse_id, sent_index, word_indices, var1, var2 ) def _handle_whq(self): self.assertToken(self.token(), "(") self.assertToken(self.token(), "[") ans_types = [] while self.token(0) != "]": cat = self.token() self.assertToken(self.token(), ":") if cat == "des": ans_types.append(self.token()) elif cat == "num": ans_types.append("number") typ = self.token() if typ == "cou": ans_types.append("count") else: ans_types.append(typ) else: ans_types.append(self.token()) self.token() self.assertToken(self.token(), ",") d1 = self.process_next_expression(None) self.assertToken(self.token(), ",") ref = self.parse_variable() self.assertToken(self.token(), ",") d2 = self.process_next_expression(None) self.assertToken(self.token(), ")") return lambda sent_index, word_indices: BoxerWhq( self.discourse_id, sent_index, word_indices, ans_types, d1, ref, d2 ) def _make_merge_expression(self, sent_index, word_indices, drs1, drs2): return BoxerDrs(drs1.refs + drs2.refs, drs1.conds + drs2.conds) def _make_or_expression(self, sent_index, word_indices, drs1, drs2): return BoxerOr(self.discourse_id, sent_index, word_indices, drs1, drs2) def _make_imp_expression(self, sent_index, word_indices, drs1, drs2): return BoxerDrs(drs1.refs, drs1.conds, drs2) def parse_variable(self): var = self.token() assert re.match(r"^[exps]\d+$", var), var return var def parse_index(self): return int(self.token()) def _sent_and_word_indices(self, indices): sent_indices = {(i / 1000) - 1 for i in indices if i >= 0} if sent_indices: pairs = [] for sent_index in sent_indices: word_indices = [ (i % 1000) - 1 for i in indices if sent_index == (i / 1000) - 1 ] pairs.append((sent_index, word_indices)) return pairs else: word_indices = [(i % 1000) - 1 for i in indices] return [(None, word_indices)] class BoxerDrsParser(DrtParser): def __init__(self, discourse_id=None): DrtParser.__init__(self) self.discourse_id = discourse_id def get_all_symbols(self): return [ DrtTokens.OPEN, DrtTokens.CLOSE, DrtTokens.COMMA, DrtTokens.OPEN_BRACKET, DrtTokens.CLOSE_BRACKET, ] def attempt_adjuncts(self, expression, context): return expression def handle(self, tok, context): try: if tok == "pred": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = list(map(int, self.handle_refs())) self.assertNextToken(DrtTokens.COMMA) variable = int(self.token()) self.assertNextToken(DrtTokens.COMMA) name = self.token() self.assertNextToken(DrtTokens.COMMA) pos = self.token() self.assertNextToken(DrtTokens.COMMA) sense = int(self.token()) self.assertNextToken(DrtTokens.CLOSE) return BoxerPred(disc_id, sent_id, word_ids, variable, name, pos, sense) elif tok == "named": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = int(self.token()) self.assertNextToken(DrtTokens.COMMA) word_ids = map(int, self.handle_refs()) self.assertNextToken(DrtTokens.COMMA) variable = int(self.token()) self.assertNextToken(DrtTokens.COMMA) name = self.token() self.assertNextToken(DrtTokens.COMMA) type = self.token() self.assertNextToken(DrtTokens.COMMA) sense = int(self.token()) self.assertNextToken(DrtTokens.CLOSE) return BoxerNamed( disc_id, sent_id, word_ids, variable, name, type, sense ) elif tok == "rel": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = list(map(int, self.handle_refs())) self.assertNextToken(DrtTokens.COMMA) var1 = int(self.token()) self.assertNextToken(DrtTokens.COMMA) var2 = int(self.token()) self.assertNextToken(DrtTokens.COMMA) rel = self.token() self.assertNextToken(DrtTokens.COMMA) sense = int(self.token()) self.assertNextToken(DrtTokens.CLOSE) return BoxerRel(disc_id, sent_id, word_ids, var1, var2, rel, sense) elif tok == "prop": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = int(self.token()) self.assertNextToken(DrtTokens.COMMA) word_ids = list(map(int, self.handle_refs())) self.assertNextToken(DrtTokens.COMMA) variable = int(self.token()) self.assertNextToken(DrtTokens.COMMA) drs = self.process_next_expression(None) self.assertNextToken(DrtTokens.CLOSE) return BoxerProp(disc_id, sent_id, word_ids, variable, drs) elif tok == "not": self.assertNextToken(DrtTokens.OPEN) drs = self.process_next_expression(None) self.assertNextToken(DrtTokens.CLOSE) return BoxerNot(drs) elif tok == "imp": self.assertNextToken(DrtTokens.OPEN) drs1 = self.process_next_expression(None) self.assertNextToken(DrtTokens.COMMA) drs2 = self.process_next_expression(None) self.assertNextToken(DrtTokens.CLOSE) return BoxerDrs(drs1.refs, drs1.conds, drs2) elif tok == "or": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = map(int, self.handle_refs()) self.assertNextToken(DrtTokens.COMMA) drs1 = self.process_next_expression(None) self.assertNextToken(DrtTokens.COMMA) drs2 = self.process_next_expression(None) self.assertNextToken(DrtTokens.CLOSE) return BoxerOr(disc_id, sent_id, word_ids, drs1, drs2) elif tok == "eq": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = list(map(int, self.handle_refs())) self.assertNextToken(DrtTokens.COMMA) var1 = int(self.token()) self.assertNextToken(DrtTokens.COMMA) var2 = int(self.token()) self.assertNextToken(DrtTokens.CLOSE) return BoxerEq(disc_id, sent_id, word_ids, var1, var2) elif tok == "card": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = map(int, self.handle_refs()) self.assertNextToken(DrtTokens.COMMA) var = int(self.token()) self.assertNextToken(DrtTokens.COMMA) value = self.token() self.assertNextToken(DrtTokens.COMMA) type = self.token() self.assertNextToken(DrtTokens.CLOSE) return BoxerCard(disc_id, sent_id, word_ids, var, value, type) elif tok == "whq": self.assertNextToken(DrtTokens.OPEN) disc_id = ( self.discourse_id if self.discourse_id is not None else self.token() ) self.assertNextToken(DrtTokens.COMMA) sent_id = self.nullableIntToken() self.assertNextToken(DrtTokens.COMMA) word_ids = list(map(int, self.handle_refs())) self.assertNextToken(DrtTokens.COMMA) ans_types = self.handle_refs() self.assertNextToken(DrtTokens.COMMA) drs1 = self.process_next_expression(None) self.assertNextToken(DrtTokens.COMMA) var = int(self.token()) self.assertNextToken(DrtTokens.COMMA) drs2 = self.process_next_expression(None) self.assertNextToken(DrtTokens.CLOSE) return BoxerWhq(disc_id, sent_id, word_ids, ans_types, drs1, var, drs2) except Exception as e: raise LogicalExpressionException(self._currentIndex, str(e)) from e assert False, repr(tok) def nullableIntToken(self): t = self.token() return int(t) if t != "None" else None def get_next_token_variable(self, description): try: return self.token() except ExpectedMoreTokensException as e: raise ExpectedMoreTokensException(e.index, "Variable expected.") from e class AbstractBoxerDrs: def variables(self): variables, events, propositions = self._variables() return (variables - (events | propositions), events, propositions - events) def variable_types(self): vartypes = {} for t, vars in zip(("z", "e", "p"), self.variables()): for v in vars: vartypes[v] = t return vartypes def _variables(self): return (set(), set(), set()) def atoms(self): return set() def clean(self): return self def _clean_name(self, name): return name.replace("-", "_").replace("'", "_") def renumber_sentences(self, f): return self def __hash__(self): return hash(f"{self}") class BoxerDrs(AbstractBoxerDrs): def __init__(self, refs, conds, consequent=None): AbstractBoxerDrs.__init__(self) self.refs = refs self.conds = conds self.consequent = consequent def _variables(self): variables = (set(), set(), set()) for cond in self.conds: for s, v in zip(variables, cond._variables()): s.update(v) if self.consequent is not None: for s, v in zip(variables, self.consequent._variables()): s.update(v) return variables def atoms(self): atoms = reduce(operator.or_, (cond.atoms() for cond in self.conds), set()) if self.consequent is not None: atoms.update(self.consequent.atoms()) return atoms def clean(self): consequent = self.consequent.clean() if self.consequent else None return BoxerDrs(self.refs, [c.clean() for c in self.conds], consequent) def renumber_sentences(self, f): consequent = self.consequent.renumber_sentences(f) if self.consequent else None return BoxerDrs( self.refs, [c.renumber_sentences(f) for c in self.conds], consequent ) def __repr__(self): s = "drs([{}], [{}])".format( ", ".join("%s" % r for r in self.refs), ", ".join("%s" % c for c in self.conds), ) if self.consequent is not None: s = f"imp({s}, {self.consequent})" return s def __eq__(self, other): return ( self.__class__ == other.__class__ and self.refs == other.refs and len(self.conds) == len(other.conds) and reduce( operator.and_, (c1 == c2 for c1, c2 in zip(self.conds, other.conds)) ) and self.consequent == other.consequent ) def __ne__(self, other): return not self == other __hash__ = AbstractBoxerDrs.__hash__ class BoxerNot(AbstractBoxerDrs): def __init__(self, drs): AbstractBoxerDrs.__init__(self) self.drs = drs def _variables(self): return self.drs._variables() def atoms(self): return self.drs.atoms() def clean(self): return BoxerNot(self.drs.clean()) def renumber_sentences(self, f): return BoxerNot(self.drs.renumber_sentences(f)) def __repr__(self): return "not(%s)" % (self.drs) def __eq__(self, other): return self.__class__ == other.__class__ and self.drs == other.drs def __ne__(self, other): return not self == other __hash__ = AbstractBoxerDrs.__hash__ class BoxerIndexed(AbstractBoxerDrs): def __init__(self, discourse_id, sent_index, word_indices): AbstractBoxerDrs.__init__(self) self.discourse_id = discourse_id self.sent_index = sent_index self.word_indices = word_indices def atoms(self): return {self} def __eq__(self, other): return ( self.__class__ == other.__class__ and self.discourse_id == other.discourse_id and self.sent_index == other.sent_index and self.word_indices == other.word_indices and reduce(operator.and_, (s == o for s, o in zip(self, other))) ) def __ne__(self, other): return not self == other __hash__ = AbstractBoxerDrs.__hash__ def __repr__(self): s = "{}({}, {}, [{}]".format( self._pred(), self.discourse_id, self.sent_index, ", ".join("%s" % wi for wi in self.word_indices), ) for v in self: s += ", %s" % v return s + ")" class BoxerPred(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var, name, pos, sense): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var = var self.name = name self.pos = pos self.sense = sense def _variables(self): return ({self.var}, set(), set()) def change_var(self, var): return BoxerPred( self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.pos, self.sense, ) def clean(self): return BoxerPred( self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.pos, self.sense, ) def renumber_sentences(self, f): new_sent_index = f(self.sent_index) return BoxerPred( self.discourse_id, new_sent_index, self.word_indices, self.var, self.name, self.pos, self.sense, ) def __iter__(self): return iter((self.var, self.name, self.pos, self.sense)) def _pred(self): return "pred" class BoxerNamed(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var, name, type, sense): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var = var self.name = name self.type = type self.sense = sense def _variables(self): return ({self.var}, set(), set()) def change_var(self, var): return BoxerNamed( self.discourse_id, self.sent_index, self.word_indices, var, self.name, self.type, self.sense, ) def clean(self): return BoxerNamed( self.discourse_id, self.sent_index, self.word_indices, self.var, self._clean_name(self.name), self.type, self.sense, ) def renumber_sentences(self, f): return BoxerNamed( self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.name, self.type, self.sense, ) def __iter__(self): return iter((self.var, self.name, self.type, self.sense)) def _pred(self): return "named" class BoxerRel(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var1, var2, rel, sense): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var1 = var1 self.var2 = var2 self.rel = rel self.sense = sense def _variables(self): return ({self.var1, self.var2}, set(), set()) def clean(self): return BoxerRel( self.discourse_id, self.sent_index, self.word_indices, self.var1, self.var2, self._clean_name(self.rel), self.sense, ) def renumber_sentences(self, f): return BoxerRel( self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2, self.rel, self.sense, ) def __iter__(self): return iter((self.var1, self.var2, self.rel, self.sense)) def _pred(self): return "rel" class BoxerProp(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var, drs): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var = var self.drs = drs def _variables(self): return tuple( map(operator.or_, (set(), set(), {self.var}), self.drs._variables()) ) def referenced_labels(self): return {self.drs} def atoms(self): return self.drs.atoms() def clean(self): return BoxerProp( self.discourse_id, self.sent_index, self.word_indices, self.var, self.drs.clean(), ) def renumber_sentences(self, f): return BoxerProp( self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.drs.renumber_sentences(f), ) def __iter__(self): return iter((self.var, self.drs)) def _pred(self): return "prop" class BoxerEq(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var1, var2): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var1 = var1 self.var2 = var2 def _variables(self): return ({self.var1, self.var2}, set(), set()) def atoms(self): return set() def renumber_sentences(self, f): return BoxerEq( self.discourse_id, f(self.sent_index), self.word_indices, self.var1, self.var2, ) def __iter__(self): return iter((self.var1, self.var2)) def _pred(self): return "eq" class BoxerCard(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, var, value, type): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.var = var self.value = value self.type = type def _variables(self): return ({self.var}, set(), set()) def renumber_sentences(self, f): return BoxerCard( self.discourse_id, f(self.sent_index), self.word_indices, self.var, self.value, self.type, ) def __iter__(self): return iter((self.var, self.value, self.type)) def _pred(self): return "card" class BoxerOr(BoxerIndexed): def __init__(self, discourse_id, sent_index, word_indices, drs1, drs2): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.drs1 = drs1 self.drs2 = drs2 def _variables(self): return tuple(map(operator.or_, self.drs1._variables(), self.drs2._variables())) def atoms(self): return self.drs1.atoms() | self.drs2.atoms() def clean(self): return BoxerOr( self.discourse_id, self.sent_index, self.word_indices, self.drs1.clean(), self.drs2.clean(), ) def renumber_sentences(self, f): return BoxerOr( self.discourse_id, f(self.sent_index), self.word_indices, self.drs1, self.drs2, ) def __iter__(self): return iter((self.drs1, self.drs2)) def _pred(self): return "or" class BoxerWhq(BoxerIndexed): def __init__( self, discourse_id, sent_index, word_indices, ans_types, drs1, variable, drs2 ): BoxerIndexed.__init__(self, discourse_id, sent_index, word_indices) self.ans_types = ans_types self.drs1 = drs1 self.variable = variable self.drs2 = drs2 def _variables(self): return tuple( map( operator.or_, ({self.variable}, set(), set()), self.drs1._variables(), self.drs2._variables(), ) ) def atoms(self): return self.drs1.atoms() | self.drs2.atoms() def clean(self): return BoxerWhq( self.discourse_id, self.sent_index, self.word_indices, self.ans_types, self.drs1.clean(), self.variable, self.drs2.clean(), ) def renumber_sentences(self, f): return BoxerWhq( self.discourse_id, f(self.sent_index), self.word_indices, self.ans_types, self.drs1, self.variable, self.drs2, ) def __iter__(self): return iter( ("[" + ",".join(self.ans_types) + "]", self.drs1, self.variable, self.drs2) ) def _pred(self): return "whq" class PassthroughBoxerDrsInterpreter: def interpret(self, ex): return ex class NltkDrtBoxerDrsInterpreter: def __init__(self, occur_index=False): self._occur_index = occur_index def interpret(self, ex): if isinstance(ex, BoxerDrs): drs = DRS( [Variable(r) for r in ex.refs], list(map(self.interpret, ex.conds)) ) if ex.consequent is not None: drs.consequent = self.interpret(ex.consequent) return drs elif isinstance(ex, BoxerNot): return DrtNegatedExpression(self.interpret(ex.drs)) elif isinstance(ex, BoxerPred): pred = self._add_occur_indexing(f"{ex.pos}_{ex.name}", ex) return self._make_atom(pred, ex.var) elif isinstance(ex, BoxerNamed): pred = self._add_occur_indexing(f"ne_{ex.type}_{ex.name}", ex) return self._make_atom(pred, ex.var) elif isinstance(ex, BoxerRel): pred = self._add_occur_indexing("%s" % (ex.rel), ex) return self._make_atom(pred, ex.var1, ex.var2) elif isinstance(ex, BoxerProp): return DrtProposition(Variable(ex.var), self.interpret(ex.drs)) elif isinstance(ex, BoxerEq): return DrtEqualityExpression( DrtVariableExpression(Variable(ex.var1)), DrtVariableExpression(Variable(ex.var2)), ) elif isinstance(ex, BoxerCard): pred = self._add_occur_indexing(f"card_{ex.type}_{ex.value}", ex) return self._make_atom(pred, ex.var) elif isinstance(ex, BoxerOr): return DrtOrExpression(self.interpret(ex.drs1), self.interpret(ex.drs2)) elif isinstance(ex, BoxerWhq): drs1 = self.interpret(ex.drs1) drs2 = self.interpret(ex.drs2) return DRS(drs1.refs + drs2.refs, drs1.conds + drs2.conds) assert False, f"{ex.__class__.__name__}: {ex}" def _make_atom(self, pred, *args): accum = DrtVariableExpression(Variable(pred)) for arg in args: accum = DrtApplicationExpression( accum, DrtVariableExpression(Variable(arg)) ) return accum def _add_occur_indexing(self, base, ex): if self._occur_index and ex.sent_index is not None: if ex.discourse_id: base += "_%s" % ex.discourse_id base += "_s%s" % ex.sent_index base += "_w%s" % sorted(ex.word_indices)[0] return base class UnparseableInputException(Exception): pass if __name__ == "__main__": opts = OptionParser("usage: %prog TEXT [options]") opts.add_option( "--verbose", "-v", help="display verbose logs", action="store_true", default=False, dest="verbose", ) opts.add_option( "--fol", "-f", help="output FOL", action="store_true", default=False, dest="fol" ) opts.add_option( "--question", "-q", help="input is a question", action="store_true", default=False, dest="question", ) opts.add_option( "--occur", "-o", help="occurrence index", action="store_true", default=False, dest="occur_index", ) (options, args) = opts.parse_args() if len(args) != 1: opts.error("incorrect number of arguments") interpreter = NltkDrtBoxerDrsInterpreter(occur_index=options.occur_index) drs = Boxer(interpreter).interpret_multi( args[0].split(r"\n"), question=options.question, verbose=options.verbose ) if drs is None: print(None) else: drs = drs.simplify().eliminate_equality() if options.fol: print(drs.fol().normalize()) else: drs.pretty_print()
natural language toolkit cooper storage for quantifier ambiguity c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt a container for handling quantifier ambiguity via cooper storage param featstruct the value of the sem node in a tree from parsewithbindops type featstruct featstruct with features core and store return an iterator over the permutations of the input list type lst list rtype iter for perm storeperm in enumerateself permuteself store if trace printpermutation s perm 1 term self core for bindop in storeperm we just want the arguments that are wrapped by the bo predicate quant varex tuplebindop args use var to make an abstraction over the current term and then apply the quantifier to it term applicationexpression quant lambdaexpressionvarex variable term if trace print term term term simplify self readings appendterm def parsewithbindopssentence grammarnone trace0 if not grammar grammar grammarsbookgrammarsstorage fcfg parser loadparsergrammar tracetrace chartclassinstantiatevarschart parse the sentence tokens sentence split return listparser parsetokens def demo from nltk sem import cooperstorage as cs sentence every girl chases a dog sentence a man gives a bone to every dog print printanalysis of sentence s sentence print 50 trees cs parsewithbindopssentence trace0 for tree in trees semrep cs cooperstoretree labelsem print printbinding operators print 15 for s in semrep store prints print printcore print 15 printsemrep core print printsretrieval print 15 semrep sretrievetracetrue printreadings print 15 for i reading in enumeratesemrep readings printfi 1 reading if name main demo natural language toolkit cooper storage for quantifier ambiguity c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt a container for handling quantifier ambiguity via cooper storage param featstruct the value of the sem node in a tree from parse_with_bindops type featstruct featstruct with features core and store return an iterator over the permutations of the input list type lst list rtype iter carry out s retrieval of binding operators in store if hack true serialize the bindop and core as strings and reparse ugh each permutation of the store i e list of binding operators is taken to be a possible scoping of quantifiers we iterate through the binding operators in each permutation and successively apply them to the current term starting with the core semantic representation working from the inside out binding operators are of the form bo p all x man x p x z1 we just want the arguments that are wrapped by the bo predicate use var to make an abstraction over the current term and then apply the quantifier to it use a grammar with binding operators to parse a sentence parse the sentence sentence a man gives a bone to every dog
from nltk.parse import load_parser from nltk.parse.featurechart import InstantiateVarsChart from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable class CooperStore: def __init__(self, featstruct): self.featstruct = featstruct self.readings = [] try: self.core = featstruct["CORE"] self.store = featstruct["STORE"] except KeyError: print("%s is not a Cooper storage structure" % featstruct) def _permute(self, lst): remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :] if lst: for index, x in enumerate(lst): for y in self._permute(remove(lst, index)): yield (x,) + y else: yield () def s_retrieve(self, trace=False): r for perm, store_perm in enumerate(self._permute(self.store)): if trace: print("Permutation %s" % (perm + 1)) term = self.core for bindop in store_perm: quant, varex = tuple(bindop.args) term = ApplicationExpression( quant, LambdaExpression(varex.variable, term) ) if trace: print(" ", term) term = term.simplify() self.readings.append(term) def parse_with_bindops(sentence, grammar=None, trace=0): if not grammar: grammar = "grammars/book_grammars/storage.fcfg" parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart) tokens = sentence.split() return list(parser.parse(tokens)) def demo(): from nltk.sem import cooper_storage as cs sentence = "every girl chases a dog" print() print("Analysis of sentence '%s'" % sentence) print("=" * 50) trees = cs.parse_with_bindops(sentence, trace=0) for tree in trees: semrep = cs.CooperStore(tree.label()["SEM"]) print() print("Binding operators:") print("-" * 15) for s in semrep.store: print(s) print() print("Core:") print("-" * 15) print(semrep.core) print() print("S-Retrieval:") print("-" * 15) semrep.s_retrieve(trace=True) print("Readings:") print("-" * 15) for i, reading in enumerate(semrep.readings): print(f"{i + 1}: {reading}") if __name__ == "__main__": demo()
natural language toolkit discourse representation theory drt dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt import tkinterbased modules if they are available no need to print a warning here nltk draw has already printed one a lambda calculus expression parser def initself logicparser initself self operatorprecedence dict x 1 for x in drttokens lambdalist x 2 for x in drttokens notlist app 3 x 4 for x in drttokens eqlist tokens neqlist drttokens colon 5 drttokens drsconc 6 x 7 for x in drttokens orlist x 8 for x in drttokens implist none 9 def getallsymbolsself this method is intended to be overridden for logics that use different operators or expressions if tok in drttokens notlist return self handlenegationtok context elif tok in drttokens lambdalist return self handlelambdatok context elif tok drttokens open if self inrange0 and self token0 drttokens openbracket return self handledrstok context else return self handleopentok context elif tok upper drttokens drs self assertnexttokendrttokens open return self handledrstok context elif self isvariabletok if self inrange0 and self token0 drttokens colon return self handleproptok context else return self handlevariabletok context def makenegatedexpressionself expression return drtnegatedexpressionexpression def handledrsself tok context a drs refs self handlerefs if self inrange0 and self token0 drttokens comma if there is a comma it s optional self token swallow the comma conds self handlecondscontext self assertnexttokendrttokens close return drsrefs conds none def handlerefsself self assertnexttokendrttokens openbracket refs while self inrange0 and self token0 drttokens closebracket support expressions like drsx y c drsx y c if refs and self token0 drttokens comma self token swallow the comma refs appendself getnexttokenvariablequantified self assertnexttokendrttokens closebracket return refs def handlecondsself context self assertnexttokendrttokens openbracket conds while self inrange0 and self token0 drttokens closebracket support expressions like drsx y c drsx y c if conds and self token0 drttokens comma self token swallow the comma conds appendself processnextexpressioncontext self assertnexttokendrttokens closebracket return conds def handlepropself tok context variable self makevariableexpressiontok self assertnexttoken drs self processnextexpressiondrttokens colon return drtpropositionvariable drs def makeequalityexpressionself first second this method serves as a hook for other logic parsers that have different boolean operators if tok drttokens drsconc return lambda first second drtconcatenationfirst second none elif tok in drttokens orlist return drtorexpression elif tok in drttokens implist def makeimpexpressionfirst second if isinstancefirst drs return drsfirst refs first conds second if isinstancefirst drtconcatenation return drtconcatenationfirst first first second second raise exceptionantecedent of implication must be a drs return makeimpexpression else return none def makebooleanexpressionself factory first second return factoryfirst second def makeapplicationexpressionself function argument return drtapplicationexpressionfunction argument def makevariableexpressionself name return drtvariableexpressionvariablename def makelambdaexpressionself variables term return drtlambdaexpressionvariables term class drtexpression drtparser drtparser classmethod def fromstringcls s return cls drtparser parses def applytoself other return drtapplicationexpressionself other def negself return drtnegatedexpressionself def andself other return notimplemented def orself other assert isinstanceother drtexpression return drtorexpressionself other def gtself other assert isinstanceother drtexpression if isinstanceself drs return drsself refs self conds other if isinstanceself drtconcatenation return drtconcatenationself first self second other raise exceptionantecedent of implication must be a drs def equivself other provernone assert isinstanceother drtexpression f1 self simplify fol f2 other simplify fol return f1 equivf2 prover property def typeself raise attributeerror s object has no attribute type self class name def typecheckself signaturenone raise notimplementederror def addself other return drtconcatenationself other none def getrefsself recursivefalse raise notimplementederror def ispronounfunctionself draw the drs return the pretty print string a discourse representation structure def initself refs conds consequentnone self refs refs self conds conds self consequent consequent def replaceself variable expression replaceboundfalse alphaconverttrue if a bound variable is the thing being replaced any bound variable that appears in the expression must be alpha converted to avoid a conflict replace in the conditions see expression free condsfree reduceoperator or c free for c in self conds set if self consequent condsfree updateself consequent free return condsfree setself refs def getrefsself recursivefalse see expression visit parts listmapfunction self conds if self consequent parts appendfunctionself consequent return combinatorparts def visitstructuredself function combinator this is a factory method that instantiates and returns a subtype of drtabstractvariableexpression appropriate for the given variable see abstractexpression getrefs return def prettyself s s self blank lens return blank blank s blank def eliminateequalityself return self class drtindividualvariableexpression drtabstractvariableexpression individualvariableexpression pass class drtfunctionvariableexpression drtabstractvariableexpression functionvariableexpression pass class drteventvariableexpression drtindividualvariableexpression eventvariableexpression pass class drtconstantexpressiondrtabstractvariableexpression constantexpression pass class drtpropositiondrtexpression expression def initself variable drs self variable variable self drs drs def replaceself variable expression replaceboundfalse alphaconverttrue if self variable variable assert isinstance expression drtabstractvariableexpression can only replace a proposition label with a variable return drtproposition expression variable self drs replacevariable expression replacebound alphaconvert else return drtproposition self variable self drs replacevariable expression replacebound alphaconvert def eliminateequalityself return drtpropositionself variable self drs eliminateequality def getrefsself recursivefalse return self drs getrefstrue if recursive else def eqself other return self class other class and self variable other variable and self drs other drs def neself other return not self other hash expression hash def folself return self drs fol def prettyself drss self drs pretty blank lens self variable return blank line for line in drss 1 s self variable line for line in drss1 2 blank line for line in drss2 def visitself function combinator see expression visitstructured return combinatorself variable functionself drs def strself return fpropself variable self drs class drtnegatedexpressiondrtexpression negatedexpression def folself return negatedexpressionself term fol def getrefsself recursivefalse rename all occurrences of the variable introduced by this variable binder in the expression to newvar param newvar variable for the new variable see abstractexpression getrefs return self variable self term getrefstrue if recursive else self variable class drtbinaryexpressiondrtexpression binaryexpression def getrefsself recursivefalse drs of the form drs drs def initself first second consequentnone drtbooleanexpression initself first second self consequent consequent def replaceself variable expression replaceboundfalse alphaconverttrue if variable is bound alpha convert every ref that is free in expression todo at some point for now simplify for any ref that is in both first and second alpha convert the ref in second to prevent collision see abstractexpression getrefs refs self first getrefsrecursive self second getrefsrecursive if self consequent and recursive refs extendself consequent getrefstrue return refs def getopself return drttokens drsconc def eqself other rdefines equality modulo alphabetic variance if we are comparing x m and y n then check equality of m and nxy if isinstanceother drtconcatenation selfrefs self getrefs otherrefs other getrefs if lenselfrefs lenotherrefs convertedother other for r1 r2 in zipselfrefs otherrefs varex self makevariableexpressionr1 convertedother convertedother replacer2 varex true return self first convertedother first and self second convertedother second and self consequent convertedother consequent return false def neself other return not self other hash drtbooleanexpression hash def folself e andexpressionself first fol self second fol if self consequent e impexpressione self consequent fol return e def prettyself drs drtbinaryexpression assemblepretty self prettysubexself first self getop self prettysubexself second if self consequent drs drtbinaryexpression assemblepretty drs drttokens imp self consequent pretty return drs def prettysubexself subex if isinstancesubex drtconcatenation return line1 1 for line in subex pretty return drtbooleanexpression prettysubexself subex def visitself function combinator see abstractexpression getrefs return self function getrefstrue self argument getrefstrue if recursive else def prettyself function args self uncurry functionlines function pretty argslines arg pretty for arg in args maxlines maxmaplen functionlines argslines functionlines padverticallyfunctionlines maxlines argslines padverticallyarglines maxlines for arglines in argslines funcargslines listzipfunctionlines listzipargslines return funcline joinargsline for funcline argsline in funcargslines 2 funcline joinargsline for funcline argsline in funcargslines2 3 funcline joinargsline for funcline argsline in funcargslines3 def padverticallylines maxlines padline lenlines0 return lines padline maxlines lenlines class possibleantecedentslist drtexpression expression def freeself replace all instances of variable v with expression e in self where v is free in self result possibleantecedents for item in self if item variable self appendexpression else self appenditem return result def prettyself s s self blank lens return blank blank s def strself return joins it for it in self class anaphoraresolutionexceptionexception pass def resolveanaphoraexpression trail if isinstanceexpression applicationexpression if expression ispronounfunction possibleantecedents possibleantecedents for ancestor in trail for ref in ancestor getrefs refex expression makevariableexpressionref don t allow resolution to itself or other types if refex class expression argument class and not refex expression argument possibleantecedents appendrefex if lenpossibleantecedents 1 resolution possibleantecedents0 else resolution possibleantecedents return expression makeequalityexpressionexpression argument resolution else rfunction resolveanaphoraexpression function trail expression rargument resolveanaphoraexpression argument trail expression return expression classrfunction rargument elif isinstanceexpression drs rconds for cond in expression conds rcond resolveanaphoracond trail expression if the condition is of the form x then raise exception if isinstancercond equalityexpression if isinstancercond first possibleantecedents reverse the order so that the variable is on the left temp rcond first rcond first rcond second rcond second temp if isinstancercond second possibleantecedents if not rcond second raise anaphoraresolutionexception variable s does not resolve to anything rcond first rconds appendrcond if expression consequent consequent resolveanaphoraexpression consequent trail expression else consequent none return expression classexpression refs rconds consequent elif isinstanceexpression abstractvariableexpression return expression elif isinstanceexpression negatedexpression return expression class resolveanaphoraexpression term trail expression elif isinstanceexpression drtconcatenation if expression consequent consequent resolveanaphoraexpression consequent trail expression else consequent none return expression class resolveanaphoraexpression first trail expression resolveanaphoraexpression second trail expression consequent elif isinstanceexpression binaryexpression return expression class resolveanaphoraexpression first trail expression resolveanaphoraexpression second trail expression elif isinstanceexpression lambdaexpression return expression class expression variable resolveanaphoraexpression term trail expression class drsdrawer buffer 3 space between elements topspace 10 space above whole drs outerspace 6 space to the left right and bottom of the while drs def initself drs sizecanvastrue canvasnone master none if not canvas master tk master titledrt font fontfamilyhelvetica size12 if sizecanvas canvas canvasmaster width0 height0 canvas font font self canvas canvas right bottom self visitdrs self outerspace self topspace width maxright self outerspace 100 height bottom self outerspace canvas canvasmaster widthwidth heightheight bg white else canvas canvasmaster width300 height300 canvas pack canvas font font self canvas canvas self drs drs self master master def gettextheightself draw the drs self handleself drs self drawcommand x y if self master and not inidle self master mainloop else return self visitself drs x y def visitself expression x y return self handleexpression self visitcommand x y def drawcommandself item x y if isinstanceitem str self canvas createtextx y anchornw fontself canvas font textitem elif isinstanceitem tuple item is the lowerright of a box right bottom item self canvas createrectanglex y right bottom horizliney y self gettextheight self buffer 2 the line separating refs from conds self canvas createlinex horizliney right horizliney return self visitcommanditem x y def visitcommandself item x y if isinstanceitem str return x self canvas font measureitem y self gettextheight elif isinstanceitem tuple return item def handleself expression command x0 y0 if command self visitcommand if we don t need to draw the item then we can use the cached values try attempt to retrieve cached values right expression drawingwidth x bottom expression drawingheight y return right bottom except attributeerror the values have not been cached yet so compute them pass if isinstanceexpression drtabstractvariableexpression factory self handlevariableexpression elif isinstanceexpression drs factory self handledrs elif isinstanceexpression drtnegatedexpression factory self handlenegatedexpression elif isinstanceexpression drtlambdaexpression factory self handlelambdaexpression elif isinstanceexpression binaryexpression factory self handlebinaryexpression elif isinstanceexpression drtapplicationexpression factory self handleapplicationexpression elif isinstanceexpression possibleantecedents factory self handlevariableexpression elif isinstanceexpression drtproposition factory self handledrtproposition else raise exceptionexpression class name right bottom factoryexpression command x y cache the values expression drawingwidth right x expression drawingheight bottom y return right bottom def handlevariableexpressionself expression command x y return commands expression x y def handlenegatedexpressionself expression command x y find the width of the negation symbol right self visitcommanddrttokens not x y0 handle term right bottom self handleexpression term command right y handle variables now that we know the ycoordinate command drttokens not x self getcenteredtopy bottom y self gettextheight return right bottom def handledrsself expression command x y left x self buffer indent the left side bottom y self buffer indent the top handle discourse referents if expression refs refs joins r for r in expression refs else refs maxright bottom commandrefs left bottom bottom self buffer 2 handle conditions if expression conds for cond in expression conds right bottom self handlecond command left bottom maxright maxmaxright right bottom self buffer else bottom self gettextheight self buffer handle box maxright self buffer return commandmaxright bottom x y def handleapplicationexpressionself expression command x y function args expression uncurry if not isinstancefunction drtabstractvariableexpression it s not a predicate expression px y so leave arguments curried function expression function args expression argument get the max bottom of any element on the line functionbottom self visitfunction x y1 maxbottom max functionbottom self visitarg x y1 for arg in args lineheight maxbottom y handle function functiondrawingtop self getcenteredtop y lineheight function drawingheight right self handlefunction command x functiondrawingtop0 handle open paren centredstringtop self getcenteredtop y lineheight self gettextheight right commanddrttokens open right centredstringtop0 handle each arg for i arg in enumerateargs argdrawingtop self getcenteredtop y lineheight arg drawingheight right self handlearg command right argdrawingtop0 if i 1 lenargs since it s not the last arg add a comma right commanddrttokens comma right centredstringtop0 handle close paren right commanddrttokens close right centredstringtop0 return right maxbottom def handlelambdaexpressionself expression command x y find the width of the lambda symbol and abstracted variables variables drttokens lambda s expression variable drttokens dot right self visitcommandvariables x y0 handle term right bottom self handleexpression term command right y handle variables now that we know the ycoordinate command variables x self getcenteredtopy bottom y self gettextheight return right bottom def handlebinaryexpressionself expression command x y get the full height of the line based on the operands firstheight self visitexpression first 0 01 secondheight self visitexpression second 0 01 lineheight maxfirstheight secondheight handle open paren centredstringtop self getcenteredtop y lineheight self gettextheight right commanddrttokens open x centredstringtop0 handle the first operand firstheight expression first drawingheight right firstbottom self handle expression first command right self getcenteredtopy lineheight firstheight handle the operator right command s expression getop right centredstringtop0 handle the second operand secondheight expression second drawingheight right secondbottom self handle expression second command right self getcenteredtopy lineheight secondheight handle close paren right commanddrttokens close right centredstringtop0 return right maxfirstbottom secondbottom def handledrtpropositionself expression command x y find the width of the negation symbol right commandexpression variable x y0 handle term right bottom self handleexpression term command right y return right bottom def getcenteredtopself top fullheight itemheight natural language toolkit discourse representation theory drt dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt import tkinter based modules if they are available no need to print a warning here nltk draw has already printed one a lambda calculus expression parser this method exists to be overridden this method is intended to be overridden for logics that use different operators or expressions a drs if there is a comma it s optional swallow the comma support expressions like drs x y c drs x y c swallow the comma support expressions like drs x y c drs x y c swallow the comma this method serves as a hook for other logic parsers that have different equality expression classes this method serves as a hook for other logic parsers that have different boolean operators this is the base abstract drt expression from which every drt expression extends check for logical equivalence pass the expression self other to the theorem prover if the prover says it is valid then the self and other are equal param other an drtexpression to check equality against param prover a nltk inference api prover return the set of discourse referents in this drs param recursive bool also find discourse referents in subterms return list of variable objects is self of the form pro x draw the drs return the pretty print string a discourse representation structure param refs list of drtindividualvariableexpression for the discourse referents param conds list of expression for the conditions replace all instances of variable v with expression e in self where v is free in self if a bound variable is the thing being replaced any bound variable that appears in the expression must be alpha converted to avoid a conflict replace in the conditions see expression free see abstractexpression get_refs see expression visit see expression visit_structured defines equality modulo alphabetic variance if we are comparing x m and y n then check equality of m and n x y map str self conds this is a factory method that instantiates and returns a subtype of drtabstractvariableexpression appropriate for the given variable see abstractexpression get_refs see expression visit see expression visit_structured see abstractexpression get_refs rename all occurrences of the variable introduced by this variable binder in the expression to newvar param newvar variable for the new variable see abstractexpression get_refs see abstractexpression get_refs drs of the form drs drs replace all instances of variable v with expression e in self where v is free in self if variable is bound alpha convert every ref that is free in expression todo at some point for now simplify for any ref that is in both first and second alpha convert the ref in second to prevent collision see abstractexpression get_refs defines equality modulo alphabetic variance if we are comparing x m and y n then check equality of m and n x y see expression visit see abstractexpression get_refs set of free variables replace all instances of variable v with expression e in self where v is free in self don t allow resolution to itself or other types if the condition is of the form x then raise exception reverse the order so that the variable is on the left space between elements space above whole drs space to the left right and bottom of the while drs param drs drtexpression the drs to be drawn param size_canvas bool true if the canvas size should be the exact size of the drs param canvas canvas the canvas on which to draw the drs if none is given create a new canvas bg white get the height of a line of text draw the drs return the bottom rightmost point without actually drawing the item param expression the item to visit param x the top of the current drawing area param y the left side of the current drawing area return the bottom rightmost point draw the given item at the given location param item the item to draw param x the top of the current drawing area param y the left side of the current drawing area return the bottom rightmost point item is the lower right of a box the line separating refs from conds return the bottom rightmost point without actually drawing the item param item the item to visit param x the top of the current drawing area param y the left side of the current drawing area return the bottom rightmost point param expression the expression to handle param command the function to apply either _draw_command or _visit_command param x the top of the current drawing area param y the left side of the current drawing area return the bottom rightmost point if we don t need to draw the item then we can use the cached values attempt to retrieve cached values the values have not been cached yet so compute them cache the values find the width of the negation symbol handle term handle variables now that we know the y coordinate indent the left side indent the top handle discourse referents handle conditions handle box it s not a predicate expression p x y so leave arguments curried get the max bottom of any element on the line handle function handle open paren handle each arg since it s not the last arg add a comma handle close paren find the width of the lambda symbol and abstracted variables handle term handle variables now that we know the y coordinate get the full height of the line based on the operands handle open paren handle the first operand handle the operator handle the second operand handle close paren find the width of the negation symbol handle term get the y coordinate of the point that a figure should start at if its height is item_height and it needs to be centered in an area that starts at top and is full_height tall
import operator from functools import reduce from itertools import chain from nltk.sem.logic import ( APP, AbstractVariableExpression, AllExpression, AndExpression, ApplicationExpression, BinaryExpression, BooleanExpression, ConstantExpression, EqualityExpression, EventVariableExpression, ExistsExpression, Expression, FunctionVariableExpression, ImpExpression, IndividualVariableExpression, LambdaExpression, LogicParser, NegatedExpression, OrExpression, Tokens, Variable, is_eventvar, is_funcvar, is_indvar, unique_variable, ) try: from tkinter import Canvas, Tk from tkinter.font import Font from nltk.util import in_idle except ImportError: pass class DrtTokens(Tokens): DRS = "DRS" DRS_CONC = "+" PRONOUN = "PRO" OPEN_BRACKET = "[" CLOSE_BRACKET = "]" COLON = ":" PUNCT = [DRS_CONC, OPEN_BRACKET, CLOSE_BRACKET, COLON] SYMBOLS = Tokens.SYMBOLS + PUNCT TOKENS = Tokens.TOKENS + [DRS] + PUNCT class DrtParser(LogicParser): def __init__(self): LogicParser.__init__(self) self.operator_precedence = dict( [(x, 1) for x in DrtTokens.LAMBDA_LIST] + [(x, 2) for x in DrtTokens.NOT_LIST] + [(APP, 3)] + [(x, 4) for x in DrtTokens.EQ_LIST + Tokens.NEQ_LIST] + [(DrtTokens.COLON, 5)] + [(DrtTokens.DRS_CONC, 6)] + [(x, 7) for x in DrtTokens.OR_LIST] + [(x, 8) for x in DrtTokens.IMP_LIST] + [(None, 9)] ) def get_all_symbols(self): return DrtTokens.SYMBOLS def isvariable(self, tok): return tok not in DrtTokens.TOKENS def handle(self, tok, context): if tok in DrtTokens.NOT_LIST: return self.handle_negation(tok, context) elif tok in DrtTokens.LAMBDA_LIST: return self.handle_lambda(tok, context) elif tok == DrtTokens.OPEN: if self.inRange(0) and self.token(0) == DrtTokens.OPEN_BRACKET: return self.handle_DRS(tok, context) else: return self.handle_open(tok, context) elif tok.upper() == DrtTokens.DRS: self.assertNextToken(DrtTokens.OPEN) return self.handle_DRS(tok, context) elif self.isvariable(tok): if self.inRange(0) and self.token(0) == DrtTokens.COLON: return self.handle_prop(tok, context) else: return self.handle_variable(tok, context) def make_NegatedExpression(self, expression): return DrtNegatedExpression(expression) def handle_DRS(self, tok, context): refs = self.handle_refs() if ( self.inRange(0) and self.token(0) == DrtTokens.COMMA ): self.token() conds = self.handle_conds(context) self.assertNextToken(DrtTokens.CLOSE) return DRS(refs, conds, None) def handle_refs(self): self.assertNextToken(DrtTokens.OPEN_BRACKET) refs = [] while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: if refs and self.token(0) == DrtTokens.COMMA: self.token() refs.append(self.get_next_token_variable("quantified")) self.assertNextToken(DrtTokens.CLOSE_BRACKET) return refs def handle_conds(self, context): self.assertNextToken(DrtTokens.OPEN_BRACKET) conds = [] while self.inRange(0) and self.token(0) != DrtTokens.CLOSE_BRACKET: if conds and self.token(0) == DrtTokens.COMMA: self.token() conds.append(self.process_next_expression(context)) self.assertNextToken(DrtTokens.CLOSE_BRACKET) return conds def handle_prop(self, tok, context): variable = self.make_VariableExpression(tok) self.assertNextToken(":") drs = self.process_next_expression(DrtTokens.COLON) return DrtProposition(variable, drs) def make_EqualityExpression(self, first, second): return DrtEqualityExpression(first, second) def get_BooleanExpression_factory(self, tok): if tok == DrtTokens.DRS_CONC: return lambda first, second: DrtConcatenation(first, second, None) elif tok in DrtTokens.OR_LIST: return DrtOrExpression elif tok in DrtTokens.IMP_LIST: def make_imp_expression(first, second): if isinstance(first, DRS): return DRS(first.refs, first.conds, second) if isinstance(first, DrtConcatenation): return DrtConcatenation(first.first, first.second, second) raise Exception("Antecedent of implication must be a DRS") return make_imp_expression else: return None def make_BooleanExpression(self, factory, first, second): return factory(first, second) def make_ApplicationExpression(self, function, argument): return DrtApplicationExpression(function, argument) def make_VariableExpression(self, name): return DrtVariableExpression(Variable(name)) def make_LambdaExpression(self, variables, term): return DrtLambdaExpression(variables, term) class DrtExpression: _drt_parser = DrtParser() @classmethod def fromstring(cls, s): return cls._drt_parser.parse(s) def applyto(self, other): return DrtApplicationExpression(self, other) def __neg__(self): return DrtNegatedExpression(self) def __and__(self, other): return NotImplemented def __or__(self, other): assert isinstance(other, DrtExpression) return DrtOrExpression(self, other) def __gt__(self, other): assert isinstance(other, DrtExpression) if isinstance(self, DRS): return DRS(self.refs, self.conds, other) if isinstance(self, DrtConcatenation): return DrtConcatenation(self.first, self.second, other) raise Exception("Antecedent of implication must be a DRS") def equiv(self, other, prover=None): assert isinstance(other, DrtExpression) f1 = self.simplify().fol() f2 = other.simplify().fol() return f1.equiv(f2, prover) @property def type(self): raise AttributeError( "'%s' object has no attribute 'type'" % self.__class__.__name__ ) def typecheck(self, signature=None): raise NotImplementedError() def __add__(self, other): return DrtConcatenation(self, other, None) def get_refs(self, recursive=False): raise NotImplementedError() def is_pronoun_function(self): return ( isinstance(self, DrtApplicationExpression) and isinstance(self.function, DrtAbstractVariableExpression) and self.function.variable.name == DrtTokens.PRONOUN and isinstance(self.argument, DrtIndividualVariableExpression) ) def make_EqualityExpression(self, first, second): return DrtEqualityExpression(first, second) def make_VariableExpression(self, variable): return DrtVariableExpression(variable) def resolve_anaphora(self): return resolve_anaphora(self) def eliminate_equality(self): return self.visit_structured(lambda e: e.eliminate_equality(), self.__class__) def pretty_format(self): return "\n".join(self._pretty()) def pretty_print(self): print(self.pretty_format()) def draw(self): DrsDrawer(self).draw() class DRS(DrtExpression, Expression): def __init__(self, refs, conds, consequent=None): self.refs = refs self.conds = conds self.consequent = consequent def replace(self, variable, expression, replace_bound=False, alpha_convert=True): if variable in self.refs: if not replace_bound: return self else: i = self.refs.index(variable) if self.consequent: consequent = self.consequent.replace( variable, expression, True, alpha_convert ) else: consequent = None return DRS( self.refs[:i] + [expression.variable] + self.refs[i + 1 :], [ cond.replace(variable, expression, True, alpha_convert) for cond in self.conds ], consequent, ) else: if alpha_convert: for ref in set(self.refs) & expression.free(): newvar = unique_variable(ref) newvarex = DrtVariableExpression(newvar) i = self.refs.index(ref) if self.consequent: consequent = self.consequent.replace( ref, newvarex, True, alpha_convert ) else: consequent = None self = DRS( self.refs[:i] + [newvar] + self.refs[i + 1 :], [ cond.replace(ref, newvarex, True, alpha_convert) for cond in self.conds ], consequent, ) if self.consequent: consequent = self.consequent.replace( variable, expression, replace_bound, alpha_convert ) else: consequent = None return DRS( self.refs, [ cond.replace(variable, expression, replace_bound, alpha_convert) for cond in self.conds ], consequent, ) def free(self): conds_free = reduce(operator.or_, [c.free() for c in self.conds], set()) if self.consequent: conds_free.update(self.consequent.free()) return conds_free - set(self.refs) def get_refs(self, recursive=False): if recursive: conds_refs = self.refs + list( chain.from_iterable(c.get_refs(True) for c in self.conds) ) if self.consequent: conds_refs.extend(self.consequent.get_refs(True)) return conds_refs else: return self.refs def visit(self, function, combinator): parts = list(map(function, self.conds)) if self.consequent: parts.append(function(self.consequent)) return combinator(parts) def visit_structured(self, function, combinator): consequent = function(self.consequent) if self.consequent else None return combinator(self.refs, list(map(function, self.conds)), consequent) def eliminate_equality(self): drs = self i = 0 while i < len(drs.conds): cond = drs.conds[i] if ( isinstance(cond, EqualityExpression) and isinstance(cond.first, AbstractVariableExpression) and isinstance(cond.second, AbstractVariableExpression) ): drs = DRS( list(set(drs.refs) - {cond.second.variable}), drs.conds[:i] + drs.conds[i + 1 :], drs.consequent, ) if cond.second.variable != cond.first.variable: drs = drs.replace(cond.second.variable, cond.first, False, False) i = 0 i -= 1 i += 1 conds = [] for cond in drs.conds: new_cond = cond.eliminate_equality() new_cond_simp = new_cond.simplify() if ( not isinstance(new_cond_simp, DRS) or new_cond_simp.refs or new_cond_simp.conds or new_cond_simp.consequent ): conds.append(new_cond) consequent = drs.consequent.eliminate_equality() if drs.consequent else None return DRS(drs.refs, conds, consequent) def fol(self): if self.consequent: accum = None if self.conds: accum = reduce(AndExpression, [c.fol() for c in self.conds]) if accum: accum = ImpExpression(accum, self.consequent.fol()) else: accum = self.consequent.fol() for ref in self.refs[::-1]: accum = AllExpression(ref, accum) return accum else: if not self.conds: raise Exception("Cannot convert DRS with no conditions to FOL.") accum = reduce(AndExpression, [c.fol() for c in self.conds]) for ref in map(Variable, self._order_ref_strings(self.refs)[::-1]): accum = ExistsExpression(ref, accum) return accum def _pretty(self): refs_line = " ".join(self._order_ref_strings(self.refs)) cond_lines = [ cond for cond_line in [ filter(lambda s: s.strip(), cond._pretty()) for cond in self.conds ] for cond in cond_line ] length = max([len(refs_line)] + list(map(len, cond_lines))) drs = ( [ " _" + "_" * length + "_ ", "| " + refs_line.ljust(length) + " |", "|-" + "-" * length + "-|", ] + ["| " + line.ljust(length) + " |" for line in cond_lines] + ["|_" + "_" * length + "_|"] ) if self.consequent: return DrtBinaryExpression._assemble_pretty( drs, DrtTokens.IMP, self.consequent._pretty() ) return drs def _order_ref_strings(self, refs): strings = ["%s" % ref for ref in refs] ind_vars = [] func_vars = [] event_vars = [] other_vars = [] for s in strings: if is_indvar(s): ind_vars.append(s) elif is_funcvar(s): func_vars.append(s) elif is_eventvar(s): event_vars.append(s) else: other_vars.append(s) return ( sorted(other_vars) + sorted(event_vars, key=lambda v: int([v[2:], -1][len(v[2:]) == 0])) + sorted(func_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) + sorted(ind_vars, key=lambda v: (v[0], int([v[1:], -1][len(v[1:]) == 0]))) ) def __eq__(self, other): r if isinstance(other, DRS): if len(self.refs) == len(other.refs): converted_other = other for (r1, r2) in zip(self.refs, converted_other.refs): varex = self.make_VariableExpression(r1) converted_other = converted_other.replace(r2, varex, True) if self.consequent == converted_other.consequent and len( self.conds ) == len(converted_other.conds): for c1, c2 in zip(self.conds, converted_other.conds): if not (c1 == c2): return False return True return False def __ne__(self, other): return not self == other __hash__ = Expression.__hash__ def __str__(self): drs = "([{}],[{}])".format( ",".join(self._order_ref_strings(self.refs)), ", ".join("%s" % cond for cond in self.conds), ) if self.consequent: return ( DrtTokens.OPEN + drs + " " + DrtTokens.IMP + " " + "%s" % self.consequent + DrtTokens.CLOSE ) return drs def DrtVariableExpression(variable): if is_indvar(variable.name): return DrtIndividualVariableExpression(variable) elif is_funcvar(variable.name): return DrtFunctionVariableExpression(variable) elif is_eventvar(variable.name): return DrtEventVariableExpression(variable) else: return DrtConstantExpression(variable) class DrtAbstractVariableExpression(DrtExpression, AbstractVariableExpression): def fol(self): return self def get_refs(self, recursive=False): return [] def _pretty(self): s = "%s" % self blank = " " * len(s) return [blank, blank, s, blank] def eliminate_equality(self): return self class DrtIndividualVariableExpression( DrtAbstractVariableExpression, IndividualVariableExpression ): pass class DrtFunctionVariableExpression( DrtAbstractVariableExpression, FunctionVariableExpression ): pass class DrtEventVariableExpression( DrtIndividualVariableExpression, EventVariableExpression ): pass class DrtConstantExpression(DrtAbstractVariableExpression, ConstantExpression): pass class DrtProposition(DrtExpression, Expression): def __init__(self, variable, drs): self.variable = variable self.drs = drs def replace(self, variable, expression, replace_bound=False, alpha_convert=True): if self.variable == variable: assert isinstance( expression, DrtAbstractVariableExpression ), "Can only replace a proposition label with a variable" return DrtProposition( expression.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert), ) else: return DrtProposition( self.variable, self.drs.replace(variable, expression, replace_bound, alpha_convert), ) def eliminate_equality(self): return DrtProposition(self.variable, self.drs.eliminate_equality()) def get_refs(self, recursive=False): return self.drs.get_refs(True) if recursive else [] def __eq__(self, other): return ( self.__class__ == other.__class__ and self.variable == other.variable and self.drs == other.drs ) def __ne__(self, other): return not self == other __hash__ = Expression.__hash__ def fol(self): return self.drs.fol() def _pretty(self): drs_s = self.drs._pretty() blank = " " * len("%s" % self.variable) return ( [blank + " " + line for line in drs_s[:1]] + ["%s" % self.variable + ":" + line for line in drs_s[1:2]] + [blank + " " + line for line in drs_s[2:]] ) def visit(self, function, combinator): return combinator([function(self.drs)]) def visit_structured(self, function, combinator): return combinator(self.variable, function(self.drs)) def __str__(self): return f"prop({self.variable}, {self.drs})" class DrtNegatedExpression(DrtExpression, NegatedExpression): def fol(self): return NegatedExpression(self.term.fol()) def get_refs(self, recursive=False): return self.term.get_refs(recursive) def _pretty(self): term_lines = self.term._pretty() return ( [" " + line for line in term_lines[:2]] + ["__ " + line for line in term_lines[2:3]] + [" | " + line for line in term_lines[3:4]] + [" " + line for line in term_lines[4:]] ) class DrtLambdaExpression(DrtExpression, LambdaExpression): def alpha_convert(self, newvar): return self.__class__( newvar, self.term.replace(self.variable, DrtVariableExpression(newvar), True), ) def fol(self): return LambdaExpression(self.variable, self.term.fol()) def _pretty(self): variables = [self.variable] term = self.term while term.__class__ == self.__class__: variables.append(term.variable) term = term.term var_string = " ".join("%s" % v for v in variables) + DrtTokens.DOT term_lines = term._pretty() blank = " " * len(var_string) return ( [" " + blank + line for line in term_lines[:1]] + [r" \ " + blank + line for line in term_lines[1:2]] + [r" /\ " + var_string + line for line in term_lines[2:3]] + [" " + blank + line for line in term_lines[3:]] ) def get_refs(self, recursive=False): return ( [self.variable] + self.term.get_refs(True) if recursive else [self.variable] ) class DrtBinaryExpression(DrtExpression, BinaryExpression): def get_refs(self, recursive=False): return ( self.first.get_refs(True) + self.second.get_refs(True) if recursive else [] ) def _pretty(self): return DrtBinaryExpression._assemble_pretty( self._pretty_subex(self.first), self.getOp(), self._pretty_subex(self.second), ) @staticmethod def _assemble_pretty(first_lines, op, second_lines): max_lines = max(len(first_lines), len(second_lines)) first_lines = _pad_vertically(first_lines, max_lines) second_lines = _pad_vertically(second_lines, max_lines) blank = " " * len(op) first_second_lines = list(zip(first_lines, second_lines)) return ( [ " " + first_line + " " + blank + " " + second_line + " " for first_line, second_line in first_second_lines[:2] ] + [ "(" + first_line + " " + op + " " + second_line + ")" for first_line, second_line in first_second_lines[2:3] ] + [ " " + first_line + " " + blank + " " + second_line + " " for first_line, second_line in first_second_lines[3:] ] ) def _pretty_subex(self, subex): return subex._pretty() class DrtBooleanExpression(DrtBinaryExpression, BooleanExpression): pass class DrtOrExpression(DrtBooleanExpression, OrExpression): def fol(self): return OrExpression(self.first.fol(), self.second.fol()) def _pretty_subex(self, subex): if isinstance(subex, DrtOrExpression): return [line[1:-1] for line in subex._pretty()] return DrtBooleanExpression._pretty_subex(self, subex) class DrtEqualityExpression(DrtBinaryExpression, EqualityExpression): def fol(self): return EqualityExpression(self.first.fol(), self.second.fol()) class DrtConcatenation(DrtBooleanExpression): def __init__(self, first, second, consequent=None): DrtBooleanExpression.__init__(self, first, second) self.consequent = consequent def replace(self, variable, expression, replace_bound=False, alpha_convert=True): first = self.first second = self.second consequent = self.consequent if variable in self.get_refs(): if replace_bound: first = first.replace( variable, expression, replace_bound, alpha_convert ) second = second.replace( variable, expression, replace_bound, alpha_convert ) if consequent: consequent = consequent.replace( variable, expression, replace_bound, alpha_convert ) else: if alpha_convert: for ref in set(self.get_refs(True)) & expression.free(): v = DrtVariableExpression(unique_variable(ref)) first = first.replace(ref, v, True, alpha_convert) second = second.replace(ref, v, True, alpha_convert) if consequent: consequent = consequent.replace(ref, v, True, alpha_convert) first = first.replace(variable, expression, replace_bound, alpha_convert) second = second.replace(variable, expression, replace_bound, alpha_convert) if consequent: consequent = consequent.replace( variable, expression, replace_bound, alpha_convert ) return self.__class__(first, second, consequent) def eliminate_equality(self): drs = self.simplify() assert not isinstance(drs, DrtConcatenation) return drs.eliminate_equality() def simplify(self): first = self.first.simplify() second = self.second.simplify() consequent = self.consequent.simplify() if self.consequent else None if isinstance(first, DRS) and isinstance(second, DRS): for ref in set(first.get_refs(True)) & set(second.get_refs(True)): newvar = DrtVariableExpression(unique_variable(ref)) second = second.replace(ref, newvar, True) return DRS(first.refs + second.refs, first.conds + second.conds, consequent) else: return self.__class__(first, second, consequent) def get_refs(self, recursive=False): refs = self.first.get_refs(recursive) + self.second.get_refs(recursive) if self.consequent and recursive: refs.extend(self.consequent.get_refs(True)) return refs def getOp(self): return DrtTokens.DRS_CONC def __eq__(self, other): r if isinstance(other, DrtConcatenation): self_refs = self.get_refs() other_refs = other.get_refs() if len(self_refs) == len(other_refs): converted_other = other for (r1, r2) in zip(self_refs, other_refs): varex = self.make_VariableExpression(r1) converted_other = converted_other.replace(r2, varex, True) return ( self.first == converted_other.first and self.second == converted_other.second and self.consequent == converted_other.consequent ) return False def __ne__(self, other): return not self == other __hash__ = DrtBooleanExpression.__hash__ def fol(self): e = AndExpression(self.first.fol(), self.second.fol()) if self.consequent: e = ImpExpression(e, self.consequent.fol()) return e def _pretty(self): drs = DrtBinaryExpression._assemble_pretty( self._pretty_subex(self.first), self.getOp(), self._pretty_subex(self.second), ) if self.consequent: drs = DrtBinaryExpression._assemble_pretty( drs, DrtTokens.IMP, self.consequent._pretty() ) return drs def _pretty_subex(self, subex): if isinstance(subex, DrtConcatenation): return [line[1:-1] for line in subex._pretty()] return DrtBooleanExpression._pretty_subex(self, subex) def visit(self, function, combinator): if self.consequent: return combinator( [function(self.first), function(self.second), function(self.consequent)] ) else: return combinator([function(self.first), function(self.second)]) def __str__(self): first = self._str_subex(self.first) second = self._str_subex(self.second) drs = Tokens.OPEN + first + " " + self.getOp() + " " + second + Tokens.CLOSE if self.consequent: return ( DrtTokens.OPEN + drs + " " + DrtTokens.IMP + " " + "%s" % self.consequent + DrtTokens.CLOSE ) return drs def _str_subex(self, subex): s = "%s" % subex if isinstance(subex, DrtConcatenation) and subex.consequent is None: return s[1:-1] return s class DrtApplicationExpression(DrtExpression, ApplicationExpression): def fol(self): return ApplicationExpression(self.function.fol(), self.argument.fol()) def get_refs(self, recursive=False): return ( self.function.get_refs(True) + self.argument.get_refs(True) if recursive else [] ) def _pretty(self): function, args = self.uncurry() function_lines = function._pretty() args_lines = [arg._pretty() for arg in args] max_lines = max(map(len, [function_lines] + args_lines)) function_lines = _pad_vertically(function_lines, max_lines) args_lines = [_pad_vertically(arg_lines, max_lines) for arg_lines in args_lines] func_args_lines = list(zip(function_lines, list(zip(*args_lines)))) return ( [ func_line + " " + " ".join(args_line) + " " for func_line, args_line in func_args_lines[:2] ] + [ func_line + "(" + ",".join(args_line) + ")" for func_line, args_line in func_args_lines[2:3] ] + [ func_line + " " + " ".join(args_line) + " " for func_line, args_line in func_args_lines[3:] ] ) def _pad_vertically(lines, max_lines): pad_line = [" " * len(lines[0])] return lines + pad_line * (max_lines - len(lines)) class PossibleAntecedents(list, DrtExpression, Expression): def free(self): return set(self) def replace(self, variable, expression, replace_bound=False, alpha_convert=True): result = PossibleAntecedents() for item in self: if item == variable: self.append(expression) else: self.append(item) return result def _pretty(self): s = "%s" % self blank = " " * len(s) return [blank, blank, s] def __str__(self): return "[" + ",".join("%s" % it for it in self) + "]" class AnaphoraResolutionException(Exception): pass def resolve_anaphora(expression, trail=[]): if isinstance(expression, ApplicationExpression): if expression.is_pronoun_function(): possible_antecedents = PossibleAntecedents() for ancestor in trail: for ref in ancestor.get_refs(): refex = expression.make_VariableExpression(ref) if refex.__class__ == expression.argument.__class__ and not ( refex == expression.argument ): possible_antecedents.append(refex) if len(possible_antecedents) == 1: resolution = possible_antecedents[0] else: resolution = possible_antecedents return expression.make_EqualityExpression(expression.argument, resolution) else: r_function = resolve_anaphora(expression.function, trail + [expression]) r_argument = resolve_anaphora(expression.argument, trail + [expression]) return expression.__class__(r_function, r_argument) elif isinstance(expression, DRS): r_conds = [] for cond in expression.conds: r_cond = resolve_anaphora(cond, trail + [expression]) if isinstance(r_cond, EqualityExpression): if isinstance(r_cond.first, PossibleAntecedents): temp = r_cond.first r_cond.first = r_cond.second r_cond.second = temp if isinstance(r_cond.second, PossibleAntecedents): if not r_cond.second: raise AnaphoraResolutionException( "Variable '%s' does not " "resolve to anything." % r_cond.first ) r_conds.append(r_cond) if expression.consequent: consequent = resolve_anaphora(expression.consequent, trail + [expression]) else: consequent = None return expression.__class__(expression.refs, r_conds, consequent) elif isinstance(expression, AbstractVariableExpression): return expression elif isinstance(expression, NegatedExpression): return expression.__class__( resolve_anaphora(expression.term, trail + [expression]) ) elif isinstance(expression, DrtConcatenation): if expression.consequent: consequent = resolve_anaphora(expression.consequent, trail + [expression]) else: consequent = None return expression.__class__( resolve_anaphora(expression.first, trail + [expression]), resolve_anaphora(expression.second, trail + [expression]), consequent, ) elif isinstance(expression, BinaryExpression): return expression.__class__( resolve_anaphora(expression.first, trail + [expression]), resolve_anaphora(expression.second, trail + [expression]), ) elif isinstance(expression, LambdaExpression): return expression.__class__( expression.variable, resolve_anaphora(expression.term, trail + [expression]) ) class DrsDrawer: BUFFER = 3 TOPSPACE = 10 OUTERSPACE = 6 def __init__(self, drs, size_canvas=True, canvas=None): master = None if not canvas: master = Tk() master.title("DRT") font = Font(family="helvetica", size=12) if size_canvas: canvas = Canvas(master, width=0, height=0) canvas.font = font self.canvas = canvas (right, bottom) = self._visit(drs, self.OUTERSPACE, self.TOPSPACE) width = max(right + self.OUTERSPACE, 100) height = bottom + self.OUTERSPACE canvas = Canvas(master, width=width, height=height) else: canvas = Canvas(master, width=300, height=300) canvas.pack() canvas.font = font self.canvas = canvas self.drs = drs self.master = master def _get_text_height(self): return self.canvas.font.metrics("linespace") def draw(self, x=OUTERSPACE, y=TOPSPACE): self._handle(self.drs, self._draw_command, x, y) if self.master and not in_idle(): self.master.mainloop() else: return self._visit(self.drs, x, y) def _visit(self, expression, x, y): return self._handle(expression, self._visit_command, x, y) def _draw_command(self, item, x, y): if isinstance(item, str): self.canvas.create_text(x, y, anchor="nw", font=self.canvas.font, text=item) elif isinstance(item, tuple): (right, bottom) = item self.canvas.create_rectangle(x, y, right, bottom) horiz_line_y = ( y + self._get_text_height() + (self.BUFFER * 2) ) self.canvas.create_line(x, horiz_line_y, right, horiz_line_y) return self._visit_command(item, x, y) def _visit_command(self, item, x, y): if isinstance(item, str): return (x + self.canvas.font.measure(item), y + self._get_text_height()) elif isinstance(item, tuple): return item def _handle(self, expression, command, x=0, y=0): if command == self._visit_command: try: right = expression._drawing_width + x bottom = expression._drawing_height + y return (right, bottom) except AttributeError: pass if isinstance(expression, DrtAbstractVariableExpression): factory = self._handle_VariableExpression elif isinstance(expression, DRS): factory = self._handle_DRS elif isinstance(expression, DrtNegatedExpression): factory = self._handle_NegatedExpression elif isinstance(expression, DrtLambdaExpression): factory = self._handle_LambdaExpression elif isinstance(expression, BinaryExpression): factory = self._handle_BinaryExpression elif isinstance(expression, DrtApplicationExpression): factory = self._handle_ApplicationExpression elif isinstance(expression, PossibleAntecedents): factory = self._handle_VariableExpression elif isinstance(expression, DrtProposition): factory = self._handle_DrtProposition else: raise Exception(expression.__class__.__name__) (right, bottom) = factory(expression, command, x, y) expression._drawing_width = right - x expression._drawing_height = bottom - y return (right, bottom) def _handle_VariableExpression(self, expression, command, x, y): return command("%s" % expression, x, y) def _handle_NegatedExpression(self, expression, command, x, y): right = self._visit_command(DrtTokens.NOT, x, y)[0] (right, bottom) = self._handle(expression.term, command, right, y) command( DrtTokens.NOT, x, self._get_centered_top(y, bottom - y, self._get_text_height()), ) return (right, bottom) def _handle_DRS(self, expression, command, x, y): left = x + self.BUFFER bottom = y + self.BUFFER if expression.refs: refs = " ".join("%s" % r for r in expression.refs) else: refs = " " (max_right, bottom) = command(refs, left, bottom) bottom += self.BUFFER * 2 if expression.conds: for cond in expression.conds: (right, bottom) = self._handle(cond, command, left, bottom) max_right = max(max_right, right) bottom += self.BUFFER else: bottom += self._get_text_height() + self.BUFFER max_right += self.BUFFER return command((max_right, bottom), x, y) def _handle_ApplicationExpression(self, expression, command, x, y): function, args = expression.uncurry() if not isinstance(function, DrtAbstractVariableExpression): function = expression.function args = [expression.argument] function_bottom = self._visit(function, x, y)[1] max_bottom = max( [function_bottom] + [self._visit(arg, x, y)[1] for arg in args] ) line_height = max_bottom - y function_drawing_top = self._get_centered_top( y, line_height, function._drawing_height ) right = self._handle(function, command, x, function_drawing_top)[0] centred_string_top = self._get_centered_top( y, line_height, self._get_text_height() ) right = command(DrtTokens.OPEN, right, centred_string_top)[0] for (i, arg) in enumerate(args): arg_drawing_top = self._get_centered_top( y, line_height, arg._drawing_height ) right = self._handle(arg, command, right, arg_drawing_top)[0] if i + 1 < len(args): right = command(DrtTokens.COMMA + " ", right, centred_string_top)[0] right = command(DrtTokens.CLOSE, right, centred_string_top)[0] return (right, max_bottom) def _handle_LambdaExpression(self, expression, command, x, y): variables = DrtTokens.LAMBDA + "%s" % expression.variable + DrtTokens.DOT right = self._visit_command(variables, x, y)[0] (right, bottom) = self._handle(expression.term, command, right, y) command( variables, x, self._get_centered_top(y, bottom - y, self._get_text_height()) ) return (right, bottom) def _handle_BinaryExpression(self, expression, command, x, y): first_height = self._visit(expression.first, 0, 0)[1] second_height = self._visit(expression.second, 0, 0)[1] line_height = max(first_height, second_height) centred_string_top = self._get_centered_top( y, line_height, self._get_text_height() ) right = command(DrtTokens.OPEN, x, centred_string_top)[0] first_height = expression.first._drawing_height (right, first_bottom) = self._handle( expression.first, command, right, self._get_centered_top(y, line_height, first_height), ) right = command(" %s " % expression.getOp(), right, centred_string_top)[0] second_height = expression.second._drawing_height (right, second_bottom) = self._handle( expression.second, command, right, self._get_centered_top(y, line_height, second_height), ) right = command(DrtTokens.CLOSE, right, centred_string_top)[0] return (right, max(first_bottom, second_bottom)) def _handle_DrtProposition(self, expression, command, x, y): right = command(expression.variable, x, y)[0] (right, bottom) = self._handle(expression.term, command, right, y) return (right, bottom) def _get_centered_top(self, top, full_height, item_height): return top + (full_height - item_height) / 2 def demo(): print("=" * 20 + "TEST PARSE" + "=" * 20) dexpr = DrtExpression.fromstring print(dexpr(r"([x,y],[sees(x,y)])")) print(dexpr(r"([x],[man(x), walks(x)])")) print(dexpr(r"\x.\y.([],[sees(x,y)])")) print(dexpr(r"\x.([],[walks(x)])(john)")) print(dexpr(r"(([x],[walks(x)]) + ([y],[runs(y)]))")) print(dexpr(r"(([],[walks(x)]) -> ([],[runs(x)]))")) print(dexpr(r"([x],[PRO(x), sees(John,x)])")) print(dexpr(r"([x],[man(x), -([],[walks(x)])])")) print(dexpr(r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])")) print("=" * 20 + "Test fol()" + "=" * 20) print(dexpr(r"([x,y],[sees(x,y)])").fol()) print("=" * 20 + "Test alpha conversion and lambda expression equality" + "=" * 20) e1 = dexpr(r"\x.([],[P(x)])") print(e1) e2 = e1.alpha_convert(Variable("z")) print(e2) print(e1 == e2) print("=" * 20 + "Test resolve_anaphora()" + "=" * 20) print(resolve_anaphora(dexpr(r"([x,y,z],[dog(x), cat(y), walks(z), PRO(z)])"))) print( resolve_anaphora(dexpr(r"([],[(([x],[dog(x)]) -> ([y],[walks(y), PRO(y)]))])")) ) print(resolve_anaphora(dexpr(r"(([x,y],[]) + ([],[PRO(x)]))"))) print("=" * 20 + "Test pretty_print()" + "=" * 20) dexpr(r"([],[])").pretty_print() dexpr( r"([],[([x],[big(x), dog(x)]) -> ([],[bark(x)]) -([x],[walk(x)])])" ).pretty_print() dexpr(r"([x,y],[x=y]) + ([z],[dog(z), walk(z)])").pretty_print() dexpr(r"([],[([x],[]) | ([y],[]) | ([z],[dog(z), walk(z)])])").pretty_print() dexpr(r"\P.\Q.(([x],[]) + P(x) + Q(x))(\x.([],[dog(x)]))").pretty_print() def test_draw(): try: from tkinter import Tk except ImportError as e: raise ValueError("tkinter is required, but it's not available.") expressions = [ r"x", r"([],[])", r"([x],[])", r"([x],[man(x)])", r"([x,y],[sees(x,y)])", r"([x],[man(x), walks(x)])", r"\x.([],[man(x), walks(x)])", r"\x y.([],[sees(x,y)])", r"([],[(([],[walks(x)]) + ([],[runs(x)]))])", r"([x],[man(x), -([],[walks(x)])])", r"([],[(([x],[man(x)]) -> ([],[walks(x)]))])", ] for e in expressions: d = DrtExpression.fromstring(e) d.draw() if __name__ == "__main__": demo()
natural language toolkit logic peter wang updated by dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt an implementation of the hole semantics model following blackburn and bos representation and inference for natural language csli 2005 the semantic representations are built by the grammar hole fcfg this module contains driver code to read in sentences and parse them according to a hole semantics grammar after parsing the semantic representation is in the form of an underspecified representation that is not easy to read we use a plugging algorithm to convert that representation into firstorder logic formulas note that in this code there may be multiple types of trees being referred to 1 parse trees 2 the underspecified representation 3 firstorder logic formula trees 4 the search space when plugging search tree this class holds the brokendown components of a hole semantics i e it extracts the holes labels logic formula fragments and constraints out of a big conjunction of such as produced by the hole semantics grammar it then provides some operations on the semantics dealing with holes labels and finding legal ways to plug holes with labels constructor usr is a sem expression representing an underspecified representation structure usr a usr has the following special predicates alll v n existsl v n andl n n orl n n impl n n iffl n n predl v n v v where the brackets and star indicate zero or more repetitions leqn n holen labeln where l is the label of the node described by the predicate n is either a label or a hole and v is a variable return true if x is a node label or hole in this semantic representation extract holes labels formula fragments and constraints from the hole semantics underspecified representation usr the label is the first argument of the predicate return the set of labels which are not referenced directly as part of another formula fragment these will be the topmost labels for the subtree that they are part of return the hole that will be the top of the formula tree calculate and return all the legal pluggings mappings of labels to holes of this semantics given the constraints plug the nodes in queue with the labels in potentiallabels each element of queue is a tuple of the node to plug and the list of ancestor holes from the root of the graph to that node potentiallabels is a set of the labels which are still available for plugging plugacc is the incomplete mapping of holes to labels made on the current branch of the search tree so far record is a list of all the complete pluggings that we have found in total so far it is the only parameter that is destructively updated the node is a hole try to plug it the node is a label replace it in the queue by the holes and labels in the formula fragment named by that label try all possible ways of plugging a single hole see plugnodes for the meanings of the parameters add the current hole we re trying to plug into the list of ancestors try each potential label in this hole in turn is the label valid in this hole no more potential labels that must mean all the holes have been filled so we have found a legal plugging so remember it note that the queue might not be empty because there might be labels on there that point to formula fragments with no holes in them sanitycheckplugging will make sure all holes are filled recursively try to fill in the rest of the holes in the queue the label we just plugged into the hole could have holes of its own so at the end of the queue putting it on the end of the queue gives us a breadthfirst search so that all the holes at level i of the formula tree are filled before filling level i1 a depthfirst search would work as well since the trees must be finite but the bookkeeping would be harder return true if the label cannot be placed underneath the holes given by the set ancestors because it would violate the constraints imposed on it make sure that a given plugging is legal we recursively go through each node and make sure that no constraints are violated we also check that all holes have been filled return the firstorder logic formula tree for this underspecified representation using the plugging given this class represents a constraint of the form l n where l is a label and n is a node a label or a hole parse the sentence get the semantic feature from the top of the parse tree print the raw semantic representation skolemize away all quantifiers all variables become unique break the hole semantics representation down into its components i e holes labels formula fragments and constraints maybe show the details of the semantic representation find all the possible ways to plug the formulas together build fol formula trees using the pluggings print out the formulas in a textual format natural language toolkit logic peter wang updated by dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt an implementation of the hole semantics model following blackburn and bos representation and inference for natural language csli 2005 the semantic representations are built by the grammar hole fcfg this module contains driver code to read in sentences and parse them according to a hole semantics grammar after parsing the semantic representation is in the form of an underspecified representation that is not easy to read we use a plugging algorithm to convert that representation into first order logic formulas note that in this code there may be multiple types of trees being referred to 1 parse trees 2 the underspecified representation 3 first order logic formula trees 4 the search space when plugging search tree this class holds the broken down components of a hole semantics i e it extracts the holes labels logic formula fragments and constraints out of a big conjunction of such as produced by the hole semantics grammar it then provides some operations on the semantics dealing with holes labels and finding legal ways to plug holes with labels constructor usr is a sem expression representing an underspecified representation structure usr a usr has the following special predicates all l v n exists l v n and l n n or l n n imp l n n iff l n n pred l v n v v where the brackets and star indicate zero or more repetitions leq n n hole n label n where l is the label of the node described by the predicate n is either a label or a hole and v is a variable mapping of label formula fragment set of constraints return true if x is a node label or hole in this semantic representation extract holes labels formula fragments and constraints from the hole semantics underspecified representation usr the label is the first argument of the predicate return the set of labels which are not referenced directly as part of another formula fragment these will be the top most labels for the subtree that they are part of return the hole that will be the top of the formula tree it must be unique calculate and return all the legal pluggings mappings of labels to holes of this semantics given the constraints plug the nodes in queue with the labels in potential_labels each element of queue is a tuple of the node to plug and the list of ancestor holes from the root of the graph to that node potential_labels is a set of the labels which are still available for plugging plug_acc is the incomplete mapping of holes to labels made on the current branch of the search tree so far record is a list of all the complete pluggings that we have found in total so far it is the only parameter that is destructively updated the node is a hole try to plug it the node is a label replace it in the queue by the holes and labels in the formula fragment named by that label try all possible ways of plugging a single hole see _plug_nodes for the meanings of the parameters add the current hole we re trying to plug into the list of ancestors try each potential label in this hole in turn is the label valid in this hole no more potential labels that must mean all the holes have been filled so we have found a legal plugging so remember it note that the queue might not be empty because there might be labels on there that point to formula fragments with no holes in them _sanity_check_plugging will make sure all holes are filled recursively try to fill in the rest of the holes in the queue the label we just plugged into the hole could have holes of its own so at the end of the queue putting it on the end of the queue gives us a breadth first search so that all the holes at level i of the formula tree are filled before filling level i 1 a depth first search would work as well since the trees must be finite but the bookkeeping would be harder return true if the label cannot be placed underneath the holes given by the set ancestors because it would violate the constraints imposed on it make sure that a given plugging is legal we recursively go through each node and make sure that no constraints are violated we also check that all holes have been filled return the first order logic formula tree for this underspecified representation using the plugging given this class represents a constraint of the form l n where l is a label and n is a node a label or a hole parse the sentence get the semantic feature from the top of the parse tree print the raw semantic representation skolemize away all quantifiers all variables become unique break the hole semantics representation down into its components i e holes labels formula fragments and constraints maybe show the details of the semantic representation find all the possible ways to plug the formulas together build fol formula trees using the pluggings print out the formulas in a textual format
from functools import reduce from nltk.parse import load_parser from nltk.sem.logic import ( AllExpression, AndExpression, ApplicationExpression, ExistsExpression, IffExpression, ImpExpression, LambdaExpression, NegatedExpression, OrExpression, ) from nltk.sem.skolemize import skolemize class Constants: ALL = "ALL" EXISTS = "EXISTS" NOT = "NOT" AND = "AND" OR = "OR" IMP = "IMP" IFF = "IFF" PRED = "PRED" LEQ = "LEQ" HOLE = "HOLE" LABEL = "LABEL" MAP = { ALL: lambda v, e: AllExpression(v.variable, e), EXISTS: lambda v, e: ExistsExpression(v.variable, e), NOT: NegatedExpression, AND: AndExpression, OR: OrExpression, IMP: ImpExpression, IFF: IffExpression, PRED: ApplicationExpression, } class HoleSemantics: def __init__(self, usr): self.holes = set() self.labels = set() self.fragments = {} self.constraints = set() self._break_down(usr) self.top_most_labels = self._find_top_most_labels() self.top_hole = self._find_top_hole() def is_node(self, x): return x in (self.labels | self.holes) def _break_down(self, usr): if isinstance(usr, AndExpression): self._break_down(usr.first) self._break_down(usr.second) elif isinstance(usr, ApplicationExpression): func, args = usr.uncurry() if func.variable.name == Constants.LEQ: self.constraints.add(Constraint(args[0], args[1])) elif func.variable.name == Constants.HOLE: self.holes.add(args[0]) elif func.variable.name == Constants.LABEL: self.labels.add(args[0]) else: label = args[0] assert label not in self.fragments self.fragments[label] = (func, args[1:]) else: raise ValueError(usr.label()) def _find_top_nodes(self, node_list): top_nodes = node_list.copy() for f in self.fragments.values(): args = f[1] for arg in args: if arg in node_list: top_nodes.discard(arg) return top_nodes def _find_top_most_labels(self): return self._find_top_nodes(self.labels) def _find_top_hole(self): top_holes = self._find_top_nodes(self.holes) assert len(top_holes) == 1 return top_holes.pop() def pluggings(self): record = [] self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) return record def _plug_nodes(self, queue, potential_labels, plug_acc, record): if queue != []: (node, ancestors) = queue[0] if node in self.holes: self._plug_hole( node, ancestors, queue[1:], potential_labels, plug_acc, record ) else: assert node in self.labels args = self.fragments[node][1] head = [(a, ancestors) for a in args if self.is_node(a)] self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) else: raise Exception("queue empty") def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): assert hole not in ancestors0 ancestors = [hole] + ancestors0 for l in potential_labels0: if self._violates_constraints(l, ancestors): continue plug_acc = plug_acc0.copy() plug_acc[hole] = l potential_labels = potential_labels0.copy() potential_labels.remove(l) if len(potential_labels) == 0: self._sanity_check_plugging(plug_acc, self.top_hole, []) record.append(plug_acc) else: self._plug_nodes( queue + [(l, ancestors)], potential_labels, plug_acc, record ) def _violates_constraints(self, label, ancestors): for c in self.constraints: if c.lhs == label: if c.rhs not in ancestors: return True return False def _sanity_check_plugging(self, plugging, node, ancestors): if node in self.holes: ancestors = [node] + ancestors label = plugging[node] else: label = node assert label in self.labels for c in self.constraints: if c.lhs == label: assert c.rhs in ancestors args = self.fragments[label][1] for arg in args: if self.is_node(arg): self._sanity_check_plugging(plugging, arg, [label] + ancestors) def formula_tree(self, plugging): return self._formula_tree(plugging, self.top_hole) def _formula_tree(self, plugging, node): if node in plugging: return self._formula_tree(plugging, plugging[node]) elif node in self.fragments: pred, args = self.fragments[node] children = [self._formula_tree(plugging, arg) for arg in args] return reduce(Constants.MAP[pred.variable.name], children) else: return node class Constraint: def __init__(self, lhs, rhs): self.lhs = lhs self.rhs = rhs def __eq__(self, other): if self.__class__ == other.__class__: return self.lhs == other.lhs and self.rhs == other.rhs else: return False def __ne__(self, other): return not (self == other) def __hash__(self): return hash(repr(self)) def __repr__(self): return f"({self.lhs} < {self.rhs})" def hole_readings(sentence, grammar_filename=None, verbose=False): if not grammar_filename: grammar_filename = "grammars/sample_grammars/hole.fcfg" if verbose: print("Reading grammar file", grammar_filename) parser = load_parser(grammar_filename) tokens = sentence.split() trees = list(parser.parse(tokens)) if verbose: print("Got %d different parses" % len(trees)) all_readings = [] for tree in trees: sem = tree.label()["SEM"].simplify() if verbose: print("Raw: ", sem) while isinstance(sem, LambdaExpression): sem = sem.term skolemized = skolemize(sem) if verbose: print("Skolemized:", skolemized) hole_sem = HoleSemantics(skolemized) if verbose: print("Holes: ", hole_sem.holes) print("Labels: ", hole_sem.labels) print("Constraints: ", hole_sem.constraints) print("Top hole: ", hole_sem.top_hole) print("Top labels: ", hole_sem.top_most_labels) print("Fragments:") for l, f in hole_sem.fragments.items(): print(f"\t{l}: {f}") pluggings = hole_sem.pluggings() readings = list(map(hole_sem.formula_tree, pluggings)) if verbose: for i, r in enumerate(readings): print() print("%d. %s" % (i, r)) print() all_readings.extend(readings) return all_readings if __name__ == "__main__": for r in hole_readings("a dog barks"): print(r) print() for r in hole_readings("every girl chases a dog"): print(r)
natural language toolkit lexical functional grammar dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt append item to the list at key if no list exists for key then construct one add all the dependencies for all the nodes the value of a spec entry is a word not an fstructure pick an alphabetic character as identifier for an entity in the model param value where to index into the list of characters type value int esso nnp 2 sub said vbd 0 root the dt 5 nmod whiting nnp 5 nmod field nn 6 sub started vbd 2 vmod production nn 6 obj tuesday nnp 6 vmod john nnp 2 sub sees vbp 0 root mary nnp 2 obj a dt 2 spec man nn 3 subj walks vb 0 root every dt 2 spec girl nn 3 subj chases vb 0 root a dt 5 spec dog nn 3 obj natural language toolkit lexical functional grammar dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt append item to the list at key if no list exists for key then construct one add all the dependencies for all the nodes the value of a spec entry is a word not an fstructure pick an alphabetic character as identifier for an entity in the model param value where to index into the list of characters type value int error esso nnp 2 sub said vbd 0 root the dt 5 nmod whiting nnp 5 nmod field nn 6 sub started vbd 2 vmod production nn 6 obj tuesday nnp 6 vmod john nnp 2 sub sees vbp 0 root mary nnp 2 obj a dt 2 spec man nn 3 subj walks vb 0 root every dt 2 spec girl nn 3 subj chases vb 0 root a dt 5 spec dog nn 3 obj
from itertools import chain from nltk.internals import Counter class FStructure(dict): def safeappend(self, key, item): if key not in self: self[key] = [] self[key].append(item) def __setitem__(self, key, value): dict.__setitem__(self, key.lower(), value) def __getitem__(self, key): return dict.__getitem__(self, key.lower()) def __contains__(self, key): return dict.__contains__(self, key.lower()) def to_glueformula_list(self, glue_dict): depgraph = self.to_depgraph() return glue_dict.to_glueformula_list(depgraph) def to_depgraph(self, rel=None): from nltk.parse.dependencygraph import DependencyGraph depgraph = DependencyGraph() nodes = depgraph.nodes self._to_depgraph(nodes, 0, "ROOT") for address, node in nodes.items(): for n2 in (n for n in nodes.values() if n["rel"] != "TOP"): if n2["head"] == address: relation = n2["rel"] node["deps"].setdefault(relation, []) node["deps"][relation].append(n2["address"]) depgraph.root = nodes[1] return depgraph def _to_depgraph(self, nodes, head, rel): index = len(nodes) nodes[index].update( { "address": index, "word": self.pred[0], "tag": self.pred[1], "head": head, "rel": rel, } ) for feature in sorted(self): for item in sorted(self[feature]): if isinstance(item, FStructure): item._to_depgraph(nodes, index, feature) elif isinstance(item, tuple): new_index = len(nodes) nodes[new_index].update( { "address": new_index, "word": item[0], "tag": item[1], "head": index, "rel": feature, } ) elif isinstance(item, list): for n in item: n._to_depgraph(nodes, index, feature) else: raise Exception( "feature %s is not an FStruct, a list, or a tuple" % feature ) @staticmethod def read_depgraph(depgraph): return FStructure._read_depgraph(depgraph.root, depgraph) @staticmethod def _read_depgraph(node, depgraph, label_counter=None, parent=None): if not label_counter: label_counter = Counter() if node["rel"].lower() in ["spec", "punct"]: return (node["word"], node["tag"]) else: fstruct = FStructure() fstruct.pred = None fstruct.label = FStructure._make_label(label_counter.get()) fstruct.parent = parent word, tag = node["word"], node["tag"] if tag[:2] == "VB": if tag[2:3] == "D": fstruct.safeappend("tense", ("PAST", "tense")) fstruct.pred = (word, tag[:2]) if not fstruct.pred: fstruct.pred = (word, tag) children = [ depgraph.nodes[idx] for idx in chain.from_iterable(node["deps"].values()) ] for child in children: fstruct.safeappend( child["rel"], FStructure._read_depgraph(child, depgraph, label_counter, fstruct), ) return fstruct @staticmethod def _make_label(value): letter = [ "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "a", "b", "c", "d", "e", ][value - 1] num = int(value) // 26 if num > 0: return letter + str(num) else: return letter def __repr__(self): return self.__str__().replace("\n", "") def __str__(self): return self.pretty_format() def pretty_format(self, indent=3): try: accum = "%s:[" % self.label except NameError: accum = "[" try: accum += "pred '%s'" % (self.pred[0]) except NameError: pass for feature in sorted(self): for item in self[feature]: if isinstance(item, FStructure): next_indent = indent + len(feature) + 3 + len(self.label) accum += "\n{}{} {}".format( " " * (indent), feature, item.pretty_format(next_indent), ) elif isinstance(item, tuple): accum += "\n{}{} '{}'".format(" " * (indent), feature, item[0]) elif isinstance(item, list): accum += "\n{}{} {{{}}}".format( " " * (indent), feature, ("\n%s" % (" " * (indent + len(feature) + 2))).join(item), ) else: raise Exception( "feature %s is not an FStruct, a list, or a tuple" % feature ) return accum + "]" def demo_read_depgraph(): from nltk.parse.dependencygraph import DependencyGraph dg1 = DependencyGraph( ) dg2 = DependencyGraph( ) dg3 = DependencyGraph( ) dg4 = DependencyGraph( ) depgraphs = [dg1, dg2, dg3, dg4] for dg in depgraphs: print(FStructure.read_depgraph(dg)) if __name__ == "__main__": demo_read_depgraph()
natural language toolkit linear logic dan garrette dhgarrettegmail com c 20012023 nltk project url https www nltk org for license information see license txt punctuation operations a linear logic expression parser def initself logicparser initself self operatorprecedence app 1 tokens imp 2 none 3 self rightassociatedoperations tokens imp def getallsymbolsself return tokens tokens def handleself tok context if tok not in tokens tokens return self handlevariabletok context elif tok tokens open return self handleopentok context def getbooleanexpressionfactoryself tok if tok tokens imp return impexpression else return none def makebooleanexpressionself factory first second return factoryfirst second def attemptapplicationexpressionself expression context param name str for the constant name param dependencies list of int for the indices on which this atom is dependent if self is bound by bindings return the atomic to which it is bound otherwise return self param bindings bindingdict a dictionary of bindings used to simplify return atomicexpression from iddo lev s phd dissertation p108109 param indexcounter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas from iddo lev s phd dissertation p108109 param indexcounter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas if other is a constant then it must be equal to self if other is a variable then it must not be bound to anything other than self param other expression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and any new binding raise unificationexception if self and other cannot be unified in the context of bindings self must not be bound to anything other than other param other expression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and the new binding raise unificationexception if self and other cannot be unified in the context of bindings param antecedent expression for the antecedent param consequent expression for the consequent both the antecedent and consequent of self and other must unify param other impexpression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and any new bindings raise unificationexception if self and other cannot be unified in the context of bindings from iddo lev s phd dissertation p108109 param indexcounter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas from iddo lev s phd dissertation p108109 param indexcounter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression list of glueformula for the compiled linear logic and any newly created glue formulas param function expression for the function param argument expression for the argument param argumentindices set for the indices of the glue formula from which the argument came raise linearlogicapplicationexception if function cannot be applied to argument given argumentindices if you are running it on complied premises more conditions apply a dependencies of a o b o c must be a proper subset of argumentindices since function is an implication return its consequent there should be no need to check that the application is valid since the checking is done by the constructor param bindings bindingdict a dictionary of bindings used to simplify return expression param bindings list variableexpression atomicexpression to initialize the dictionary dict variableexpression atomicexpression to initialize the dictionary a binding is consistent with the dict if its variable is not already bound or if its variable is already bound to its argument param variable variableexpression the variable bind param binding expression the expression to which variable should be bound raise variablebindingexception if the variable cannot be bound in this dictionary return the expression to which variable is bound param other bindingdict the dict with which to combine self return bindingdict a new dict containing all the elements of both parameters raise variablebindingexception if the parameter dictionaries are not consistent with each other natural language toolkit linear logic dan garrette dhgarrette gmail com c 2001 2023 nltk project url https www nltk org for license information see license txt punctuation operations a linear logic expression parser attempt to make an application expression if the next tokens are an argument in parens then the argument expression is a function being applied to the arguments otherwise return the argument expression swallow then open paren param name str for the constant name param dependencies list of int for the indices on which this atom is dependent if self is bound by bindings return the atomic to which it is bound otherwise return self param bindings bindingdict a dictionary of bindings used to simplify return atomicexpression from iddo lev s phd dissertation p108 109 param index_counter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas from iddo lev s phd dissertation p108 109 param index_counter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas if other is a constant then it must be equal to self if other is a variable then it must not be bound to anything other than self param other expression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and any new binding raise unificationexception if self and other cannot be unified in the context of bindings self must not be bound to anything other than other param other expression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and the new binding raise unificationexception if self and other cannot be unified in the context of bindings param antecedent expression for the antecedent param consequent expression for the consequent both the antecedent and consequent of self and other must unify param other impexpression param bindings bindingdict a dictionary of all current bindings return bindingdict a new combined dictionary of of bindings and any new bindings raise unificationexception if self and other cannot be unified in the context of bindings from iddo lev s phd dissertation p108 109 param index_counter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression set for the compiled linear logic and any newly created glue formulas from iddo lev s phd dissertation p108 109 param index_counter counter for unique indices param glueformulafactory glueformula for creating new glue formulas return expression list of glueformula for the compiled linear logic and any newly created glue formulas param function expression for the function param argument expression for the argument param argument_indices set for the indices of the glue formula from which the argument came raise linearlogicapplicationexception if function cannot be applied to argument given argument_indices if you are running it on complied premises more conditions apply a dependencies of a o b o c must be a proper subset of argument_indices since function is an implication return its consequent there should be no need to check that the application is valid since the checking is done by the constructor param bindings bindingdict a dictionary of bindings used to simplify return expression param bindings list variableexpression atomicexpression to initialize the dictionary dict variableexpression atomicexpression to initialize the dictionary a binding is consistent with the dict if its variable is not already bound or if its variable is already bound to its argument param variable variableexpression the variable bind param binding expression the expression to which variable should be bound raise variablebindingexception if the variable cannot be bound in this dictionary return the expression to which variable is bound param other bindingdict the dict with which to combine self return bindingdict a new dict containing all the elements of both parameters raise variablebindingexception if the parameter dictionaries are not consistent with each other
from nltk.internals import Counter from nltk.sem.logic import APP, LogicParser _counter = Counter() class Tokens: OPEN = "(" CLOSE = ")" IMP = "-o" PUNCT = [OPEN, CLOSE] TOKENS = PUNCT + [IMP] class LinearLogicParser(LogicParser): def __init__(self): LogicParser.__init__(self) self.operator_precedence = {APP: 1, Tokens.IMP: 2, None: 3} self.right_associated_operations += [Tokens.IMP] def get_all_symbols(self): return Tokens.TOKENS def handle(self, tok, context): if tok not in Tokens.TOKENS: return self.handle_variable(tok, context) elif tok == Tokens.OPEN: return self.handle_open(tok, context) def get_BooleanExpression_factory(self, tok): if tok == Tokens.IMP: return ImpExpression else: return None def make_BooleanExpression(self, factory, first, second): return factory(first, second) def attempt_ApplicationExpression(self, expression, context): if self.has_priority(APP, context): if self.inRange(0) and self.token(0) == Tokens.OPEN: self.token() argument = self.process_next_expression(APP) self.assertNextToken(Tokens.CLOSE) expression = ApplicationExpression(expression, argument, None) return expression def make_VariableExpression(self, name): if name[0].isupper(): return VariableExpression(name) else: return ConstantExpression(name) class Expression: _linear_logic_parser = LinearLogicParser() @classmethod def fromstring(cls, s): return cls._linear_logic_parser.parse(s) def applyto(self, other, other_indices=None): return ApplicationExpression(self, other, other_indices) def __call__(self, other): return self.applyto(other) def __repr__(self): return f"<{self.__class__.__name__} {self}>" class AtomicExpression(Expression): def __init__(self, name, dependencies=None): assert isinstance(name, str) self.name = name if not dependencies: dependencies = [] self.dependencies = dependencies def simplify(self, bindings=None): if bindings and self in bindings: return bindings[self] else: return self def compile_pos(self, index_counter, glueFormulaFactory): self.dependencies = [] return (self, []) def compile_neg(self, index_counter, glueFormulaFactory): self.dependencies = [] return (self, []) def initialize_labels(self, fstruct): self.name = fstruct.initialize_label(self.name.lower()) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __ne__(self, other): return not self == other def __str__(self): accum = self.name if self.dependencies: accum += "%s" % self.dependencies return accum def __hash__(self): return hash(self.name) class ConstantExpression(AtomicExpression): def unify(self, other, bindings): assert isinstance(other, Expression) if isinstance(other, VariableExpression): try: return bindings + BindingDict([(other, self)]) except VariableBindingException: pass elif self == other: return bindings raise UnificationException(self, other, bindings) class VariableExpression(AtomicExpression): def unify(self, other, bindings): assert isinstance(other, Expression) try: if self == other: return bindings else: return bindings + BindingDict([(self, other)]) except VariableBindingException as e: raise UnificationException(self, other, bindings) from e class ImpExpression(Expression): def __init__(self, antecedent, consequent): assert isinstance(antecedent, Expression) assert isinstance(consequent, Expression) self.antecedent = antecedent self.consequent = consequent def simplify(self, bindings=None): return self.__class__( self.antecedent.simplify(bindings), self.consequent.simplify(bindings) ) def unify(self, other, bindings): assert isinstance(other, ImpExpression) try: return ( bindings + self.antecedent.unify(other.antecedent, bindings) + self.consequent.unify(other.consequent, bindings) ) except VariableBindingException as e: raise UnificationException(self, other, bindings) from e def compile_pos(self, index_counter, glueFormulaFactory): (a, a_new) = self.antecedent.compile_neg(index_counter, glueFormulaFactory) (c, c_new) = self.consequent.compile_pos(index_counter, glueFormulaFactory) return (ImpExpression(a, c), a_new + c_new) def compile_neg(self, index_counter, glueFormulaFactory): (a, a_new) = self.antecedent.compile_pos(index_counter, glueFormulaFactory) (c, c_new) = self.consequent.compile_neg(index_counter, glueFormulaFactory) fresh_index = index_counter.get() c.dependencies.append(fresh_index) new_v = glueFormulaFactory("v%s" % fresh_index, a, {fresh_index}) return (c, a_new + c_new + [new_v]) def initialize_labels(self, fstruct): self.antecedent.initialize_labels(fstruct) self.consequent.initialize_labels(fstruct) def __eq__(self, other): return ( self.__class__ == other.__class__ and self.antecedent == other.antecedent and self.consequent == other.consequent ) def __ne__(self, other): return not self == other def __str__(self): return "{}{} {} {}{}".format( Tokens.OPEN, self.antecedent, Tokens.IMP, self.consequent, Tokens.CLOSE, ) def __hash__(self): return hash(f"{hash(self.antecedent)}{Tokens.IMP}{hash(self.consequent)}") class ApplicationExpression(Expression): def __init__(self, function, argument, argument_indices=None): function_simp = function.simplify() argument_simp = argument.simplify() assert isinstance(function_simp, ImpExpression) assert isinstance(argument_simp, Expression) bindings = BindingDict() try: if isinstance(function, ApplicationExpression): bindings += function.bindings if isinstance(argument, ApplicationExpression): bindings += argument.bindings bindings += function_simp.antecedent.unify(argument_simp, bindings) except UnificationException as e: raise LinearLogicApplicationException( f"Cannot apply {function_simp} to {argument_simp}. {e}" ) from e if argument_indices: if not set(function_simp.antecedent.dependencies) < argument_indices: raise LinearLogicApplicationException( "Dependencies unfulfilled when attempting to apply Linear Logic formula %s to %s" % (function_simp, argument_simp) ) if set(function_simp.antecedent.dependencies) == argument_indices: raise LinearLogicApplicationException( "Dependencies not a proper subset of indices when attempting to apply Linear Logic formula %s to %s" % (function_simp, argument_simp) ) self.function = function self.argument = argument self.bindings = bindings def simplify(self, bindings=None): if not bindings: bindings = self.bindings return self.function.simplify(bindings).consequent def __eq__(self, other): return ( self.__class__ == other.__class__ and self.function == other.function and self.argument == other.argument ) def __ne__(self, other): return not self == other def __str__(self): return "%s" % self.function + Tokens.OPEN + "%s" % self.argument + Tokens.CLOSE def __hash__(self): return hash(f"{hash(self.antecedent)}{Tokens.OPEN}{hash(self.consequent)}") class BindingDict: def __init__(self, bindings=None): self.d = {} if isinstance(bindings, dict): bindings = bindings.items() if bindings: for (v, b) in bindings: self[v] = b def __setitem__(self, variable, binding): assert isinstance(variable, VariableExpression) assert isinstance(binding, Expression) assert variable != binding existing = self.d.get(variable, None) if not existing or binding == existing: self.d[variable] = binding else: raise VariableBindingException( "Variable %s already bound to another value" % (variable) ) def __getitem__(self, variable): assert isinstance(variable, VariableExpression) intermediate = self.d[variable] while intermediate: try: intermediate = self.d[intermediate] except KeyError: return intermediate def __contains__(self, item): return item in self.d def __add__(self, other): try: combined = BindingDict() for v in self.d: combined[v] = self.d[v] for v in other.d: combined[v] = other.d[v] return combined except VariableBindingException as e: raise VariableBindingException( "Attempting to add two contradicting" " VariableBindingsLists: %s, %s" % (self, other) ) from e def __ne__(self, other): return not self == other def __eq__(self, other): if not isinstance(other, BindingDict): raise TypeError return self.d == other.d def __str__(self): return "{" + ", ".join(f"{v}: {self.d[v]}" for v in sorted(self.d.keys())) + "}" def __repr__(self): return "BindingDict: %s" % self class VariableBindingException(Exception): pass class UnificationException(Exception): def __init__(self, a, b, bindings): Exception.__init__(self, f"Cannot unify {a} with {b} given {bindings}") class LinearLogicApplicationException(Exception): pass def demo(): lexpr = Expression.fromstring print(lexpr(r"f")) print(lexpr(r"(g -o f)")) print(lexpr(r"((g -o G) -o G)")) print(lexpr(r"g -o h -o f")) print(lexpr(r"(g -o f)(g)").simplify()) print(lexpr(r"(H -o f)(g)").simplify()) print(lexpr(r"((g -o G) -o G)((g -o f))").simplify()) print(lexpr(r"(H -o H)((g -o f))").simplify()) if __name__ == "__main__": demo()
natural language toolkit semantic interpretation ewan klein ewaninf ed ac uk c 20012023 nltk project url https www nltk org for license information see license txt skolemize the expression and convert to conjunctive normal form cnf convert this split disjunction to conjunctive normal form cnf natural language toolkit semantic interpretation ewan klein ewan inf ed ac uk c 2001 2023 nltk project url https www nltk org for license information see license txt skolemize the expression and convert to conjunctive normal form cnf convert this split disjunction to conjunctive normal form cnf
from nltk.sem.logic import ( AllExpression, AndExpression, ApplicationExpression, EqualityExpression, ExistsExpression, IffExpression, ImpExpression, NegatedExpression, OrExpression, VariableExpression, skolem_function, unique_variable, ) def skolemize(expression, univ_scope=None, used_variables=None): if univ_scope is None: univ_scope = set() if used_variables is None: used_variables = set() if isinstance(expression, AllExpression): term = skolemize( expression.term, univ_scope | {expression.variable}, used_variables | {expression.variable}, ) return term.replace( expression.variable, VariableExpression(unique_variable(ignore=used_variables)), ) elif isinstance(expression, AndExpression): return skolemize(expression.first, univ_scope, used_variables) & skolemize( expression.second, univ_scope, used_variables ) elif isinstance(expression, OrExpression): return to_cnf( skolemize(expression.first, univ_scope, used_variables), skolemize(expression.second, univ_scope, used_variables), ) elif isinstance(expression, ImpExpression): return to_cnf( skolemize(-expression.first, univ_scope, used_variables), skolemize(expression.second, univ_scope, used_variables), ) elif isinstance(expression, IffExpression): return to_cnf( skolemize(-expression.first, univ_scope, used_variables), skolemize(expression.second, univ_scope, used_variables), ) & to_cnf( skolemize(expression.first, univ_scope, used_variables), skolemize(-expression.second, univ_scope, used_variables), ) elif isinstance(expression, EqualityExpression): return expression elif isinstance(expression, NegatedExpression): negated = expression.term if isinstance(negated, AllExpression): term = skolemize( -negated.term, univ_scope, used_variables | {negated.variable} ) if univ_scope: return term.replace(negated.variable, skolem_function(univ_scope)) else: skolem_constant = VariableExpression( unique_variable(ignore=used_variables) ) return term.replace(negated.variable, skolem_constant) elif isinstance(negated, AndExpression): return to_cnf( skolemize(-negated.first, univ_scope, used_variables), skolemize(-negated.second, univ_scope, used_variables), ) elif isinstance(negated, OrExpression): return skolemize(-negated.first, univ_scope, used_variables) & skolemize( -negated.second, univ_scope, used_variables ) elif isinstance(negated, ImpExpression): return skolemize(negated.first, univ_scope, used_variables) & skolemize( -negated.second, univ_scope, used_variables ) elif isinstance(negated, IffExpression): return to_cnf( skolemize(-negated.first, univ_scope, used_variables), skolemize(-negated.second, univ_scope, used_variables), ) & to_cnf( skolemize(negated.first, univ_scope, used_variables), skolemize(negated.second, univ_scope, used_variables), ) elif isinstance(negated, EqualityExpression): return expression elif isinstance(negated, NegatedExpression): return skolemize(negated.term, univ_scope, used_variables) elif isinstance(negated, ExistsExpression): term = skolemize( -negated.term, univ_scope | {negated.variable}, used_variables | {negated.variable}, ) return term.replace( negated.variable, VariableExpression(unique_variable(ignore=used_variables)), ) elif isinstance(negated, ApplicationExpression): return expression else: raise Exception("'%s' cannot be skolemized" % expression) elif isinstance(expression, ExistsExpression): term = skolemize( expression.term, univ_scope, used_variables | {expression.variable} ) if univ_scope: return term.replace(expression.variable, skolem_function(univ_scope)) else: skolem_constant = VariableExpression(unique_variable(ignore=used_variables)) return term.replace(expression.variable, skolem_constant) elif isinstance(expression, ApplicationExpression): return expression else: raise Exception("'%s' cannot be skolemized" % expression) def to_cnf(first, second): if isinstance(first, AndExpression): r_first = to_cnf(first.first, second) r_second = to_cnf(first.second, second) return r_first & r_second elif isinstance(second, AndExpression): r_first = to_cnf(first, second.first) r_second = to_cnf(first, second.second) return r_first & r_second else: return first | second
natural language toolkit sentiment analysis c 20012023 nltk project ewan klein ewaninf ed ac uk url https www nltk org for license information see license txt nltk sentiment analysis package natural language toolkit sentiment analysis c 2001 2023 nltk project ewan klein ewan inf ed ac uk url https www nltk org for license information see license txt nltk sentiment analysis package
from nltk.sentiment.sentiment_analyzer import SentimentAnalyzer from nltk.sentiment.vader import SentimentIntensityAnalyzer
natural language toolkit sentiment analyzer c 20012023 nltk project pierpaolo pantone 24alsecondogmail com url https www nltk org for license information see license txt a sentimentanalyzer is a tool to implement and facilitate sentiment analysis tasks using nltk features and classifiers especially for teaching and demonstrative purposes a sentiment analysis tool based on machine learning approaches return all wordstokens from the documents with duplicates param documents a list of words label tuples param labeled if true assume that each document is represented by a words label tuple liststr str if false each document is considered as being a simple list of strings liststr rtype liststr return a list of all wordstokens in documents apply all feature extractor functions to the documents this is a wrapper around nltk classify util applyfeatures if labeledfalse return featuresets as featurefuncdoc for doc in documents if labeledtrue return featuresets as featurefunctok label for tok label in toks param documents a list of documents if labeledtrue the method expects a list of words label tuples rtype lazymap return most common topn word features param words a list of wordstokens param topn number of best wordstokens to use sorted by frequency rtype liststr return a list of topn wordstokens with no duplicates sorted by frequency stopwords are not removed return topn bigram features using assocmeasure note that this method is based on bigram collocations measures and not on simple bigram frequency param documents a list or iterable of tokens param topn number of best wordstokens to use sorted by association measure param assocmeasure bigram association measure to use as score function param minfreq the minimum number of occurrencies of bigrams to take into consideration return topn ngrams scored by the given association measure classify a single instance applying the features that have already been stored in the sentimentanalyzer param instance a list or iterable of tokens return the classification result given by applying the classifier add a new function to extract features from a document this function will be used in extractfeatures important in this step our kwargs are only representing additional parameters and not the document we have to parse the document will always be the first parameter in the parameter list and it will be added in the extractfeatures function param function the extractor function to add to the list of feature extractors param kwargs additional parameters required by the function function apply extractor functions and their parameters to the present document we pass document as the first parameter of the extractor functions if we want to use the same extractor function multiple times we have to add it to the extractors with addfeatextractor using multiple sets of parameters one for each call of the extractor function param document the document that will be passed as argument to the feature extractor functions return a dictionary of populated features extracted from the document rtype dict train classifier on the training set optionally saving the output in the file specified by saveclassifier additional arguments depend on the specific trainer used for example a maxentclassifier can use maxiter parameter to specify the number of iterations while a naivebayesclassifier cannot param trainer train method of a classifier e g naivebayesclassifier train param trainingset the training set to be passed as argument to the classifier train method param saveclassifier the filename of the file where the classifier will be stored optional param kwargs additional parameters that will be passed as arguments to the classifier train function return a classifier instance trained on the training set rtype store content in filename can be used to store a sentimentanalyzer the protocol2 parameter is for python2 compatibility evaluate and print classifier performance on the test set param testset a list of tokens label tuples to use as gold set param classifier a classifier instance previously trained param accuracy if true evaluate classifier accuracy param fmeasure if true evaluate classifier fmeasure param precision if true evaluate classifier precision param recall if true evaluate classifier recall return evaluation results rtype dictstr float print evaluation results in alphabetical order natural language toolkit sentiment analyzer c 2001 2023 nltk project pierpaolo pantone 24alsecondo gmail com url https www nltk org for license information see license txt a sentimentanalyzer is a tool to implement and facilitate sentiment analysis tasks using nltk features and classifiers especially for teaching and demonstrative purposes a sentiment analysis tool based on machine learning approaches return all words tokens from the documents with duplicates param documents a list of words label tuples param labeled if true assume that each document is represented by a words label tuple list str str if false each document is considered as being a simple list of strings list str rtype list str return a list of all words tokens in documents apply all feature extractor functions to the documents this is a wrapper around nltk classify util apply_features if labeled false return featuresets as feature_func doc for doc in documents if labeled true return featuresets as feature_func tok label for tok label in toks param documents a list of documents if labeled true the method expects a list of words label tuples rtype lazymap return most common top_n word features param words a list of words tokens param top_n number of best words tokens to use sorted by frequency rtype list str return a list of top_n words tokens with no duplicates sorted by frequency stopwords are not removed return top_n bigram features using assoc_measure note that this method is based on bigram collocations measures and not on simple bigram frequency param documents a list or iterable of tokens param top_n number of best words tokens to use sorted by association measure param assoc_measure bigram association measure to use as score function param min_freq the minimum number of occurrencies of bigrams to take into consideration return top_n ngrams scored by the given association measure classify a single instance applying the features that have already been stored in the sentimentanalyzer param instance a list or iterable of tokens return the classification result given by applying the classifier add a new function to extract features from a document this function will be used in extract_features important in this step our kwargs are only representing additional parameters and not the document we have to parse the document will always be the first parameter in the parameter list and it will be added in the extract_features function param function the extractor function to add to the list of feature extractors param kwargs additional parameters required by the function function apply extractor functions and their parameters to the present document we pass document as the first parameter of the extractor functions if we want to use the same extractor function multiple times we have to add it to the extractors with add_feat_extractor using multiple sets of parameters one for each call of the extractor function param document the document that will be passed as argument to the feature extractor functions return a dictionary of populated features extracted from the document rtype dict train classifier on the training set optionally saving the output in the file specified by save_classifier additional arguments depend on the specific trainer used for example a maxentclassifier can use max_iter parameter to specify the number of iterations while a naivebayesclassifier cannot param trainer train method of a classifier e g naivebayesclassifier train param training_set the training set to be passed as argument to the classifier train method param save_classifier the filename of the file where the classifier will be stored optional param kwargs additional parameters that will be passed as arguments to the classifier train function return a classifier instance trained on the training set rtype store content in filename can be used to store a sentimentanalyzer the protocol 2 parameter is for python2 compatibility evaluate and print classifier performance on the test set param test_set a list of tokens label tuples to use as gold set param classifier a classifier instance previously trained param accuracy if true evaluate classifier accuracy param f_measure if true evaluate classifier f_measure param precision if true evaluate classifier precision param recall if true evaluate classifier recall return evaluation results rtype dict str float print evaluation results in alphabetical order
import sys from collections import defaultdict from nltk.classify.util import accuracy as eval_accuracy from nltk.classify.util import apply_features from nltk.collocations import BigramCollocationFinder from nltk.metrics import BigramAssocMeasures from nltk.metrics import f_measure as eval_f_measure from nltk.metrics import precision as eval_precision from nltk.metrics import recall as eval_recall from nltk.probability import FreqDist class SentimentAnalyzer: def __init__(self, classifier=None): self.feat_extractors = defaultdict(list) self.classifier = classifier def all_words(self, documents, labeled=None): all_words = [] if labeled is None: labeled = documents and isinstance(documents[0], tuple) if labeled: for words, _sentiment in documents: all_words.extend(words) elif not labeled: for words in documents: all_words.extend(words) return all_words def apply_features(self, documents, labeled=None): return apply_features(self.extract_features, documents, labeled) def unigram_word_feats(self, words, top_n=None, min_freq=0): unigram_feats_freqs = FreqDist(word for word in words) return [ w for w, f in unigram_feats_freqs.most_common(top_n) if unigram_feats_freqs[w] > min_freq ] def bigram_collocation_feats( self, documents, top_n=None, min_freq=3, assoc_measure=BigramAssocMeasures.pmi ): finder = BigramCollocationFinder.from_documents(documents) finder.apply_freq_filter(min_freq) return finder.nbest(assoc_measure, top_n) def classify(self, instance): instance_feats = self.apply_features([instance], labeled=False) return self.classifier.classify(instance_feats[0]) def add_feat_extractor(self, function, **kwargs): self.feat_extractors[function].append(kwargs) def extract_features(self, document): all_features = {} for extractor in self.feat_extractors: for param_set in self.feat_extractors[extractor]: feats = extractor(document, **param_set) all_features.update(feats) return all_features def train(self, trainer, training_set, save_classifier=None, **kwargs): print("Training classifier") self.classifier = trainer(training_set, **kwargs) if save_classifier: self.save_file(self.classifier, save_classifier) return self.classifier def save_file(self, content, filename): print("Saving", filename, file=sys.stderr) with open(filename, "wb") as storage_file: import pickle pickle.dump(content, storage_file, protocol=2) def evaluate( self, test_set, classifier=None, accuracy=True, f_measure=True, precision=True, recall=True, verbose=False, ): if classifier is None: classifier = self.classifier print(f"Evaluating {type(classifier).__name__} results...") metrics_results = {} if accuracy: accuracy_score = eval_accuracy(classifier, test_set) metrics_results["Accuracy"] = accuracy_score gold_results = defaultdict(set) test_results = defaultdict(set) labels = set() for i, (feats, label) in enumerate(test_set): labels.add(label) gold_results[label].add(i) observed = classifier.classify(feats) test_results[observed].add(i) for label in labels: if precision: precision_score = eval_precision( gold_results[label], test_results[label] ) metrics_results[f"Precision [{label}]"] = precision_score if recall: recall_score = eval_recall(gold_results[label], test_results[label]) metrics_results[f"Recall [{label}]"] = recall_score if f_measure: f_measure_score = eval_f_measure( gold_results[label], test_results[label] ) metrics_results[f"F-measure [{label}]"] = f_measure_score if verbose: for result in sorted(metrics_results): print(f"{result}: {metrics_results[result]}") return metrics_results
natural language toolkit sentiment analyzer c 20012023 nltk project pierpaolo pantone 24alsecondogmail com url https www nltk org for license information see license txt utility methods for sentiment analysis regular expressions regular expression for negation by christopher potts happy and sad emoticons a timer decorator to measure execution performance of methods in python 2 x round will return a float so we convert it to int feature extractor functions feature extractor functions are declared outside the sentimentanalyzer class users should have the possibility to create their own feature extractors without modifying sentimentanalyzer populate a dictionary of unigram features reflecting the presenceabsence in the document of each of the tokens in unigrams param document a list of wordstokens param unigrams a list of wordstokens whose presenceabsence has to be checked in document param handlenegation if handlenegation true apply marknegation method to document before checking for unigram presenceabsence return a dictionary of unigram features unigram boolean words ice police riot document ice is melting due to global warming split sortedextractunigramfeatsdocument words items containsice true containspolice false containsriot false populate a dictionary of bigram features reflecting the presenceabsence in the document of each of the tokens in bigrams this extractor function only considers contiguous bigrams obtained by nltk bigrams param document a list of wordstokens param unigrams a list of bigrams whose presenceabsence has to be checked in document return a dictionary of bigram features bigram boolean bigrams global warming police prevented love you document ice is melting due to global warming split sortedextractbigramfeatsdocument bigrams items doctest normalizewhitespace containsglobal warming true containslove you false containspolice prevented false helper functions append neg suffix to words that appear in the scope between a negation and a punctuation mark param document a list of wordstokens or a tuple words label param shallow if true the method will modify the original document in place param doublenegflip if true double negation is considered affirmation we activatedeactivate negation scope every time we find a negation return if shallow true the method will modify the original document and return it if shallow false the method will return a modified document leaving the original unmodified sent i didn t like this movie it was bad split marknegationsent i didn t likeneg thisneg movieneg it was bad check if the document is labeled if so do not consider the label write the output of an analysis to a file randomly split n instances of the dataset into train and test sets param allinstances a list of instances e g documents that will be split param n the number of instances to consider in case we want to use only a subset return two lists of instances train set is 810 of the total and test set is 210 of the total pad margins so that markers are not clipped by the axes parsing and conversion functions convert json file to csv file preprocessing each row to obtain a suitable dataset for tweets semantic analysis param jsonfile the original json file containing tweets param outfile the output csv filename param fields a list of fields that will be extracted from the json file and kept in the output csv file param encoding the encoding of the files param errors the error handling strategy for the output writer param gzipcompress if true create a compressed gzip file param skipretweets if true remove retweets param skiptonguetweets if true remove tweets containing p and p emoticons param skipambiguoustweets if true remove tweets containing both happy and sad emoticons param stripoffemoticons if true strip off emoticons from all tweets param removeduplicates if true remove tweets appearing more than once param limit an integer to set the number of tweets to convert after the limit is reached the conversion will stop it can be useful to create subsets of the original tweets json data write the list of fields as header remove retweets remove tweets containing p and p emoticons remove tweets containing both happy and sad emoticons strip off emoticons from all tweets remove duplicate tweets parse csv file containing tweets and output data a list of text label tuples param filename the input csv filename param label the label to be appended to each tweet contained in the csv file param wordtokenizer the tokenizer instance that will be used to tokenize each sentence into tokens e g wordpuncttokenizer or blanklinetokenizer if no wordtokenizer is specified tweets will not be tokenized param senttokenizer the tokenizer that will be used to split each tweet into sentences param skipheader if true skip the first line of the csv file which usually contains headers return a list of text label tuples text text1 apply sentence and word tokenizer to text demos train and test naive bayes classifier on 10000 tweets tokenized using tweettokenizer features are composed of 1000 most frequent unigrams 100 top bigrams using bigramassocmeasures pmi param trainer train method of a classifier param ninstances the number of total tweets that have to be used for training and testing tweets will be equally split between positive and negative param output the output file where results have to be reported different customizations for the tweettokenizer tokenizer tweettokenizerpreservecasetrue striphandlestrue tokenizer tweettokenizerreducelentrue striphandlestrue we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets stopwords stopwords words english allwords word for word in sentimanalyzer allwordstrainingtweets if word lower not in stopwords add simple unigram word features add bigram collocation features classifier sentimanalyzer traintrainer trainingset maxiter4 train classifier on all instances of the movie reviews dataset the corpus has been preprocessed using the default sentence tokenizer and wordpuncttokenizer features are composed of most frequent unigrams param trainer train method of a classifier param ninstances the number of total reviews that have to be used for training and testing reviews will be equally split between positive and negative param output the output file where results have to be reported we separately split positive and negative instances to keep a balanced uniform class distribution in both train and test sets add simple unigram word features apply features to obtain a featurevalue representation of our datasets train and test a classifier on instances of the subjective dataset by pang and lee the dataset is made of 5000 subjective and 5000 objective sentences all tokens words and punctuation marks are separated by a whitespace so we use the basic whitespacetokenizer to parse the data param trainer train method of a classifier param saveanalyzer if true store the sentimentanalyzer in a pickle file param ninstances the number of total sentences that have to be used for training and testing sentences will be equally split between positive and negative param output the output file where results have to be reported we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets add simple unigram word features handling negation apply features to obtain a featurevalue representation of our datasets classify a single sentence as subjective or objective using a stored sentimentanalyzer param text a sentence whose subjectivity has to be classified tokenize and convert to lower case basic example of sentiment classification using liu and hu opinion lexicon this function simply counts the number of positive negative and neutral words in the sentence and classifies it depending on which polarity is more represented words that do not appear in the lexicon are considered as neutral param sentence a sentence whose polarity has to be classified param plot if true plot a visual representation of the sentence polarity output polarity scores for a text using vader approach param text a text whose polarity has to be evaluated classify 10000 positive and negative tweets using vader approach param ninstances the number of total tweets that have to be classified param output the output file where results have to be reported we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets demomoviereviewssvm demosubjectivitysvm demosentsubjectivityshe s an artist but hasn t picked up a brush in a year demoliuhulexiconthis movie was actually neither that funny nor super witty plottrue demovaderinstancethis movie was actually neither that funny nor super witty demovadertweets natural language toolkit sentiment analyzer c 2001 2023 nltk project pierpaolo pantone 24alsecondo gmail com url https www nltk org for license information see license txt utility methods for sentiment analysis regular expressions regular expression for negation by christopher potts never no nothing nowhere noone none not havent hasnt hadnt cant couldnt shouldnt wont wouldnt dont doesnt didnt isnt arent aint n t happy and sad emoticons a timer decorator to measure execution performance of methods in python 2 x round will return a float so we convert it to int feature extractor functions feature extractor functions are declared outside the sentimentanalyzer class users should have the possibility to create their own feature extractors without modifying sentimentanalyzer populate a dictionary of unigram features reflecting the presence absence in the document of each of the tokens in unigrams param document a list of words tokens param unigrams a list of words tokens whose presence absence has to be checked in document param handle_negation if handle_negation true apply mark_negation method to document before checking for unigram presence absence return a dictionary of unigram features unigram boolean words ice police riot document ice is melting due to global warming split sorted extract_unigram_feats document words items contains ice true contains police false contains riot false populate a dictionary of bigram features reflecting the presence absence in the document of each of the tokens in bigrams this extractor function only considers contiguous bigrams obtained by nltk bigrams param document a list of words tokens param unigrams a list of bigrams whose presence absence has to be checked in document return a dictionary of bigram features bigram boolean bigrams global warming police prevented love you document ice is melting due to global warming split sorted extract_bigram_feats document bigrams items doctest normalize_whitespace contains global warming true contains love you false contains police prevented false helper functions append _neg suffix to words that appear in the scope between a negation and a punctuation mark param document a list of words tokens or a tuple words label param shallow if true the method will modify the original document in place param double_neg_flip if true double negation is considered affirmation we activate deactivate negation scope every time we find a negation return if shallow true the method will modify the original document and return it if shallow false the method will return a modified document leaving the original unmodified sent i didn t like this movie it was bad split mark_negation sent i didn t like_neg this_neg movie_neg it was bad check if the document is labeled if so do not consider the label write the output of an analysis to a file randomly split n instances of the dataset into train and test sets param all_instances a list of instances e g documents that will be split param n the number of instances to consider in case we want to use only a subset return two lists of instances train set is 8 10 of the total and test set is 2 10 of the total pad margins so that markers are not clipped by the axes parsing and conversion functions convert json file to csv file preprocessing each row to obtain a suitable dataset for tweets semantic analysis param json_file the original json file containing tweets param outfile the output csv filename param fields a list of fields that will be extracted from the json file and kept in the output csv file param encoding the encoding of the files param errors the error handling strategy for the output writer param gzip_compress if true create a compressed gzip file param skip_retweets if true remove retweets param skip_tongue_tweets if true remove tweets containing p and p emoticons param skip_ambiguous_tweets if true remove tweets containing both happy and sad emoticons param strip_off_emoticons if true strip off emoticons from all tweets param remove_duplicates if true remove tweets appearing more than once param limit an integer to set the number of tweets to convert after the limit is reached the conversion will stop it can be useful to create subsets of the original tweets json data write the list of fields as header remove retweets remove tweets containing p and p emoticons remove tweets containing both happy and sad emoticons strip off emoticons from all tweets remove duplicate tweets parse csv file containing tweets and output data a list of text label tuples param filename the input csv filename param label the label to be appended to each tweet contained in the csv file param word_tokenizer the tokenizer instance that will be used to tokenize each sentence into tokens e g wordpuncttokenizer or blanklinetokenizer if no word_tokenizer is specified tweets will not be tokenized param sent_tokenizer the tokenizer that will be used to split each tweet into sentences param skip_header if true skip the first line of the csv file which usually contains headers return a list of text label tuples skip the header text text 1 apply sentence and word tokenizer to text demos train and test naive bayes classifier on 10000 tweets tokenized using tweettokenizer features are composed of 1000 most frequent unigrams 100 top bigrams using bigramassocmeasures pmi param trainer train method of a classifier param n_instances the number of total tweets that have to be used for training and testing tweets will be equally split between positive and negative param output the output file where results have to be reported different customizations for the tweettokenizer tokenizer tweettokenizer preserve_case true strip_handles true tokenizer tweettokenizer reduce_len true strip_handles true we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets stopwords stopwords words english all_words word for word in sentim_analyzer all_words training_tweets if word lower not in stopwords add simple unigram word features add bigram collocation features classifier sentim_analyzer train trainer training_set max_iter 4 train classifier on all instances of the movie reviews dataset the corpus has been preprocessed using the default sentence tokenizer and wordpuncttokenizer features are composed of most frequent unigrams param trainer train method of a classifier param n_instances the number of total reviews that have to be used for training and testing reviews will be equally split between positive and negative param output the output file where results have to be reported we separately split positive and negative instances to keep a balanced uniform class distribution in both train and test sets add simple unigram word features apply features to obtain a feature value representation of our datasets train and test a classifier on instances of the subjective dataset by pang and lee the dataset is made of 5000 subjective and 5000 objective sentences all tokens words and punctuation marks are separated by a whitespace so we use the basic whitespacetokenizer to parse the data param trainer train method of a classifier param save_analyzer if true store the sentimentanalyzer in a pickle file param n_instances the number of total sentences that have to be used for training and testing sentences will be equally split between positive and negative param output the output file where results have to be reported we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets add simple unigram word features handling negation apply features to obtain a feature value representation of our datasets classify a single sentence as subjective or objective using a stored sentimentanalyzer param text a sentence whose subjectivity has to be classified tokenize and convert to lower case basic example of sentiment classification using liu and hu opinion lexicon this function simply counts the number of positive negative and neutral words in the sentence and classifies it depending on which polarity is more represented words that do not appear in the lexicon are considered as neutral param sentence a sentence whose polarity has to be classified param plot if true plot a visual representation of the sentence polarity x axis for the plot positive negative neutral output polarity scores for a text using vader approach param text a text whose polarity has to be evaluated classify 10000 positive and negative tweets using vader approach param n_instances the number of total tweets that have to be classified param output the output file where results have to be reported we separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets demo_movie_reviews svm demo_subjectivity svm demo_sent_subjectivity she s an artist but hasn t picked up a brush in a year demo_liu_hu_lexicon this movie was actually neither that funny nor super witty plot true demo_vader_instance this movie was actually neither that funny nor super witty demo_vader_tweets
import codecs import csv import json import pickle import random import re import sys import time from copy import deepcopy import nltk from nltk.corpus import CategorizedPlaintextCorpusReader from nltk.data import load from nltk.tokenize.casual import EMOTICON_RE NEGATION = r NEGATION_RE = re.compile(NEGATION, re.VERBOSE) CLAUSE_PUNCT = r"^[.:;!?]$" CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT) HAPPY = { ":-)", ":)", ";)", ":o)", ":]", ":3", ":c)", ":>", "=]", "8)", "=)", ":}", ":^)", ":-D", ":D", "8-D", "8D", "x-D", "xD", "X-D", "XD", "=-D", "=D", "=-3", "=3", ":-))", ":'-)", ":')", ":*", ":^*", ">:P", ":-P", ":P", "X-P", "x-p", "xp", "XP", ":-p", ":p", "=p", ":-b", ":b", ">:)", ">;)", ">:-)", "<3", } SAD = { ":L", ":-/", ">:/", ":S", ">:[", ":@", ":-(", ":[", ":-||", "=L", ":<", ":-[", ":-<", "=\\", "=/", ">:(", ":(", ">.<", ":'-(", ":'(", ":\\", ":-c", ":c", ":{", ">:\\", ";(", } def timer(method): def timed(*args, **kw): start = time.time() result = method(*args, **kw) end = time.time() tot_time = end - start hours = tot_time // 3600 mins = tot_time // 60 % 60 secs = int(round(tot_time % 60)) if hours == 0 and mins == 0 and secs < 10: print(f"[TIMER] {method.__name__}(): {method.__name__:.3f} seconds") else: print(f"[TIMER] {method.__name__}(): {hours}h {mins}m {secs}s") return result return timed def extract_unigram_feats(document, unigrams, handle_negation=False): features = {} if handle_negation: document = mark_negation(document) for word in unigrams: features[f"contains({word})"] = word in set(document) return features def extract_bigram_feats(document, bigrams): features = {} for bigr in bigrams: features[f"contains({bigr[0]} - {bigr[1]})"] = bigr in nltk.bigrams(document) return features def mark_negation(document, double_neg_flip=False, shallow=False): if not shallow: document = deepcopy(document) labeled = document and isinstance(document[0], (tuple, list)) if labeled: doc = document[0] else: doc = document neg_scope = False for i, word in enumerate(doc): if NEGATION_RE.search(word): if not neg_scope or (neg_scope and double_neg_flip): neg_scope = not neg_scope continue else: doc[i] += "_NEG" elif neg_scope and CLAUSE_PUNCT_RE.search(word): neg_scope = not neg_scope elif neg_scope and not CLAUSE_PUNCT_RE.search(word): doc[i] += "_NEG" return document def output_markdown(filename, **kwargs): with codecs.open(filename, "at") as outfile: text = "\n*** \n\n" text += "{} \n\n".format(time.strftime("%d/%m/%Y, %H:%M")) for k in sorted(kwargs): if isinstance(kwargs[k], dict): dictionary = kwargs[k] text += f" - **{k}:**\n" for entry in sorted(dictionary): text += f" - {entry}: {dictionary[entry]} \n" elif isinstance(kwargs[k], list): text += f" - **{k}:**\n" for entry in kwargs[k]: text += f" - {entry}\n" else: text += f" - **{k}:** {kwargs[k]} \n" outfile.write(text) def split_train_test(all_instances, n=None): random.seed(12345) random.shuffle(all_instances) if not n or n > len(all_instances): n = len(all_instances) train_set = all_instances[: int(0.8 * n)] test_set = all_instances[int(0.8 * n) : n] return train_set, test_set def _show_plot(x_values, y_values, x_labels=None, y_labels=None): try: import matplotlib.pyplot as plt except ImportError as e: raise ImportError( "The plot function requires matplotlib to be installed." "See https://matplotlib.org/" ) from e plt.locator_params(axis="y", nbins=3) axes = plt.axes() axes.yaxis.grid() plt.plot(x_values, y_values, "ro", color="red") plt.ylim(ymin=-1.2, ymax=1.2) plt.tight_layout(pad=5) if x_labels: plt.xticks(x_values, x_labels, rotation="vertical") if y_labels: plt.yticks([-1, 0, 1], y_labels, rotation="horizontal") plt.margins(0.2) plt.show() def json2csv_preprocess( json_file, outfile, fields, encoding="utf8", errors="replace", gzip_compress=False, skip_retweets=True, skip_tongue_tweets=True, skip_ambiguous_tweets=True, strip_off_emoticons=True, remove_duplicates=True, limit=None, ): with codecs.open(json_file, encoding=encoding) as fp: (writer, outf) = _outf_writer(outfile, encoding, errors, gzip_compress) writer.writerow(fields) if remove_duplicates == True: tweets_cache = [] i = 0 for line in fp: tweet = json.loads(line) row = extract_fields(tweet, fields) try: text = row[fields.index("text")] if skip_retweets == True: if re.search(r"\bRT\b", text): continue if skip_tongue_tweets == True: if re.search(r"\:\-?P\b", text): continue if skip_ambiguous_tweets == True: all_emoticons = EMOTICON_RE.findall(text) if all_emoticons: if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD): continue if strip_off_emoticons == True: row[fields.index("text")] = re.sub( r"(?!\n)\s+", " ", EMOTICON_RE.sub("", text) ) if remove_duplicates == True: if row[fields.index("text")] in tweets_cache: continue else: tweets_cache.append(row[fields.index("text")]) except ValueError: pass writer.writerow(row) i += 1 if limit and i >= limit: break outf.close() def parse_tweets_set( filename, label, word_tokenizer=None, sent_tokenizer=None, skip_header=True ): tweets = [] if not sent_tokenizer: sent_tokenizer = load("tokenizers/punkt/english.pickle") with codecs.open(filename, "rt") as csvfile: reader = csv.reader(csvfile) if skip_header == True: next(reader, None) i = 0 for tweet_id, text in reader: i += 1 sys.stdout.write(f"Loaded {i} tweets\r") if word_tokenizer: tweet = [ w for sent in sent_tokenizer.tokenize(text) for w in word_tokenizer.tokenize(sent) ] else: tweet = text tweets.append((tweet, label)) print(f"Loaded {i} tweets") return tweets def demo_tweets(trainer, n_instances=None, output=None): from nltk.corpus import stopwords, twitter_samples from nltk.sentiment import SentimentAnalyzer from nltk.tokenize import TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) if n_instances is not None: n_instances = int(n_instances / 2) fields = ["id", "text"] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = "positive_tweets.csv" json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = "negative_tweets.csv" json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label="neg", word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label="pos", word_tokenizer=tokenizer) train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() all_words = [word for word in sentim_analyzer.all_words(training_tweets)] unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12 ) sentim_analyzer.add_feat_extractor( extract_bigram_feats, bigrams=bigram_collocs_feats ) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="labeled_tweets", Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances, ) def demo_movie_reviews(trainer, n_instances=None, output=None): from nltk.corpus import movie_reviews from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) pos_docs = [ (list(movie_reviews.words(pos_id)), "pos") for pos_id in movie_reviews.fileids("pos")[:n_instances] ] neg_docs = [ (list(movie_reviews.words(neg_id)), "neg") for neg_id in movie_reviews.fileids("neg")[:n_instances] ] train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_docs = train_pos_docs + train_neg_docs testing_docs = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() all_words = sentim_analyzer.all_words(training_docs) unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="Movie_reviews", Classifier=type(classifier).__name__, Tokenizer="WordPunctTokenizer", Feats=extr, Results=results, Instances=n_instances, ) def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) subj_docs = [ (sent, "subj") for sent in subjectivity.sents(categories="subj")[:n_instances] ] obj_docs = [ (sent, "obj") for sent in subjectivity.sents(categories="obj")[:n_instances] ] train_subj_docs, test_subj_docs = split_train_test(subj_docs) train_obj_docs, test_obj_docs = split_train_test(obj_docs) training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs] ) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print( "Your classifier does not provide a show_most_informative_features() method." ) results = sentim_analyzer.evaluate(test_set) if save_analyzer == True: sentim_analyzer.save_file(sentim_analyzer, "sa_subjectivity.pickle") if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown( output, Dataset="subjectivity", Classifier=type(classifier).__name__, Tokenizer="WhitespaceTokenizer", Feats=extr, Instances=n_instances, Results=results, ) return sentim_analyzer def demo_sent_subjectivity(text): from nltk.classify import NaiveBayesClassifier from nltk.tokenize import regexp word_tokenizer = regexp.WhitespaceTokenizer() try: sentim_analyzer = load("sa_subjectivity.pickle") except LookupError: print("Cannot find the sentiment analyzer you want to load.") print("Training a new one using NaiveBayesClassifier.") sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True) tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)] print(sentim_analyzer.classify(tokenized_text)) def demo_liu_hu_lexicon(sentence, plot=False): from nltk.corpus import opinion_lexicon from nltk.tokenize import treebank tokenizer = treebank.TreebankWordTokenizer() pos_words = 0 neg_words = 0 tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)] x = list(range(len(tokenized_sent))) y = [] for word in tokenized_sent: if word in opinion_lexicon.positive(): pos_words += 1 y.append(1) elif word in opinion_lexicon.negative(): neg_words += 1 y.append(-1) else: y.append(0) if pos_words > neg_words: print("Positive") elif pos_words < neg_words: print("Negative") elif pos_words == neg_words: print("Neutral") if plot == True: _show_plot( x, y, x_labels=tokenized_sent, y_labels=["Negative", "Neutral", "Positive"] ) def demo_vader_instance(text): from nltk.sentiment import SentimentIntensityAnalyzer vader_analyzer = SentimentIntensityAnalyzer() print(vader_analyzer.polarity_scores(text)) def demo_vader_tweets(n_instances=None, output=None): from collections import defaultdict from nltk.corpus import twitter_samples from nltk.metrics import accuracy as eval_accuracy from nltk.metrics import f_measure as eval_f_measure from nltk.metrics import precision as eval_precision from nltk.metrics import recall as eval_recall from nltk.sentiment import SentimentIntensityAnalyzer if n_instances is not None: n_instances = int(n_instances / 2) fields = ["id", "text"] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = "positive_tweets.csv" json2csv_preprocess( positive_json, positive_csv, fields, strip_off_emoticons=False, limit=n_instances, ) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = "negative_tweets.csv" json2csv_preprocess( negative_json, negative_csv, fields, strip_off_emoticons=False, limit=n_instances, ) pos_docs = parse_tweets_set(positive_csv, label="pos") neg_docs = parse_tweets_set(negative_csv, label="neg") train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs vader_analyzer = SentimentIntensityAnalyzer() gold_results = defaultdict(set) test_results = defaultdict(set) acc_gold_results = [] acc_test_results = [] labels = set() num = 0 for i, (text, label) in enumerate(testing_tweets): labels.add(label) gold_results[label].add(i) acc_gold_results.append(label) score = vader_analyzer.polarity_scores(text)["compound"] if score > 0: observed = "pos" else: observed = "neg" num += 1 acc_test_results.append(observed) test_results[observed].add(i) metrics_results = {} for label in labels: accuracy_score = eval_accuracy(acc_gold_results, acc_test_results) metrics_results["Accuracy"] = accuracy_score precision_score = eval_precision(gold_results[label], test_results[label]) metrics_results[f"Precision [{label}]"] = precision_score recall_score = eval_recall(gold_results[label], test_results[label]) metrics_results[f"Recall [{label}]"] = recall_score f_measure_score = eval_f_measure(gold_results[label], test_results[label]) metrics_results[f"F-measure [{label}]"] = f_measure_score for result in sorted(metrics_results): print(f"{result}: {metrics_results[result]}") if output: output_markdown( output, Approach="Vader", Dataset="labeled_tweets", Instances=n_instances, Results=metrics_results, ) if __name__ == "__main__": from sklearn.svm import LinearSVC from nltk.classify import MaxentClassifier, NaiveBayesClassifier from nltk.classify.scikitlearn import SklearnClassifier from nltk.twitter.common import _outf_writer, extract_fields naive_bayes = NaiveBayesClassifier.train svm = SklearnClassifier(LinearSVC()).train maxent = MaxentClassifier.train demo_tweets(naive_bayes)
natural language toolkit vader c 20012023 nltk project c j hutto clayton huttogtri gatech edu ewan klein ewaninf ed ac uk modifications pierpaolo pantone 24alsecondogmail com modifications george berry geb97cornell edu modifications malavika suresh malavika suresh0794gmail com modifications url https www nltk org for license information see license txt modifications to the original vader code have been made in order to integrate it into nltk these have involved changes to ensure python 3 compatibility and refactoring to achieve greater modularity if you use the vader sentiment analysis tools please cite hutto c j gilbert e e 2014 vader a parsimonious rulebased model for sentiment analysis of social media text eighth international conference on weblogs and social media icwsm14 ann arbor mi june 2014 a class to keep the vader lists and constants constants empirically derived mean sentiment intensity rating increase for booster words empirically derived mean sentiment intensity rating increase for using allcaps to emphasize a word boosterdampener intensifiers or degree adverbs https en wiktionary orgwikicategory englishdegreeadverbs check for special case idioms using a sentimentladen keyword known to sage for removing punctuation determine if input contains negation words normalize the score to be between 1 and 1 using an alpha that approximates the max expected value check if the preceding words increase decrease or negatenullify the valence check if boosterdampener word is in allcaps while others aren t identify sentimentrelevant stringlevel properties of input text doesn t separate words from adjacent punctuation keeps emoticons contractions returns mapping of form cat cat cat cat removes punctuation but loses emoticons contractions remove singletons the product gives cat and cat removes leading and trailing puncutation leaves contractions and most emoticons does not preserve puncplusletter emoticons e g d check whether just some words in the input are all caps param list words the words to inspect returns true if some but not all items in words are all caps give a sentiment intensity score to sentences convert lexicon file to a dictionary return a float for sentiment strength based on the input text positive values are positive valence negative value are negative valence note hashtags are not taken into consideration e g bad is neutral if you are interested in processing the text in the hashtags too then we recommend preprocessing your data to remove the after which the hashtag text may be matched as if it was a normal word in the sentence text wordsandemoticons iscapdiff self preprocesstext get the sentiment valence check if sentiment laden word is in all caps while others aren t dampen the scalar modifier of preceding words and emoticons excluding the ones that immediately preceed the item based on their distance from the current item future work consider other sentimentladen idioms otheridioms back handed 2 blow smoke 2 blowing smoke 2 upper hand 1 break a leg 2 cooking with gas 2 in the black 2 in the red 2 on the ball 2 under the weather 2 check for negation case using least check for boosterdampener bigrams such as sort of or kind of add emphasis from exclamation points and question marks check for added emphasis resulting from exclamation points up to 4 of them empirically derived mean sentiment intensity rating increase for exclamation points check for added emphasis resulting from question marks 2 or 3 empirically derived mean sentiment intensity rating increase for question marks want separate positive versus negative sentiment scores compute and add emphasis from punctuation in text discriminate between positive negative and neutral sentiment scores natural language toolkit vader c 2001 2023 nltk project c j hutto clayton hutto gtri gatech edu ewan klein ewan inf ed ac uk modifications pierpaolo pantone 24alsecondo gmail com modifications george berry geb97 cornell edu modifications malavika suresh malavika suresh0794 gmail com modifications url https www nltk org for license information see license txt modifications to the original vader code have been made in order to integrate it into nltk these have involved changes to ensure python 3 compatibility and refactoring to achieve greater modularity if you use the vader sentiment analysis tools please cite hutto c j gilbert e e 2014 vader a parsimonious rule based model for sentiment analysis of social media text eighth international conference on weblogs and social media icwsm 14 ann arbor mi june 2014 a class to keep the vader lists and constants constants empirically derived mean sentiment intensity rating increase for booster words empirically derived mean sentiment intensity rating increase for using allcaps to emphasize a word booster dampener intensifiers or degree adverbs https en wiktionary org wiki category english_degree_adverbs check for special case idioms using a sentiment laden keyword known to sage for removing punctuation determine if input contains negation words normalize the score to be between 1 and 1 using an alpha that approximates the max expected value check if the preceding words increase decrease or negate nullify the valence check if booster dampener word is in allcaps while others aren t identify sentiment relevant string level properties of input text doesn t separate words from adjacent punctuation keeps emoticons contractions returns mapping of form cat cat cat cat removes punctuation but loses emoticons contractions remove singletons the product gives cat and cat removes leading and trailing puncutation leaves contractions and most emoticons does not preserve punc plus letter emoticons e g d check whether just some words in the input are all caps param list words the words to inspect returns true if some but not all items in words are all caps give a sentiment intensity score to sentences convert lexicon file to a dictionary return a float for sentiment strength based on the input text positive values are positive valence negative value are negative valence note hashtags are not taken into consideration e g bad is neutral if you are interested in processing the text in the hashtags too then we recommend preprocessing your data to remove the after which the hashtag text may be matched as if it was a normal word in the sentence text words_and_emoticons is_cap_diff self preprocess text get the sentiment valence check if sentiment laden word is in all caps while others aren t dampen the scalar modifier of preceding words and emoticons excluding the ones that immediately preceed the item based on their distance from the current item future work consider other sentiment laden idioms other_idioms back handed 2 blow smoke 2 blowing smoke 2 upper hand 1 break a leg 2 cooking with gas 2 in the black 2 in the red 2 on the ball 2 under the weather 2 check for negation case using least check for booster dampener bi grams such as sort of or kind of add emphasis from exclamation points and question marks check for added emphasis resulting from exclamation points up to 4 of them empirically derived mean sentiment intensity rating increase for exclamation points check for added emphasis resulting from question marks 2 or 3 empirically derived mean sentiment intensity rating increase for question marks want separate positive versus negative sentiment scores compensates for neutral words that are counted as 1 when used with math fabs compensates for neutrals compute and add emphasis from punctuation in text discriminate between positive negative and neutral sentiment scores
import math import re import string from itertools import product import nltk.data from nltk.util import pairwise class VaderConstants: B_INCR = 0.293 B_DECR = -0.293 C_INCR = 0.733 N_SCALAR = -0.74 NEGATE = { "aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt", "ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't", "dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither", "don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't", "neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere", "oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent", "oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't", "without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite", } BOOSTER_DICT = { "absolutely": B_INCR, "amazingly": B_INCR, "awfully": B_INCR, "completely": B_INCR, "considerably": B_INCR, "decidedly": B_INCR, "deeply": B_INCR, "effing": B_INCR, "enormously": B_INCR, "entirely": B_INCR, "especially": B_INCR, "exceptionally": B_INCR, "extremely": B_INCR, "fabulously": B_INCR, "flipping": B_INCR, "flippin": B_INCR, "fricking": B_INCR, "frickin": B_INCR, "frigging": B_INCR, "friggin": B_INCR, "fully": B_INCR, "fucking": B_INCR, "greatly": B_INCR, "hella": B_INCR, "highly": B_INCR, "hugely": B_INCR, "incredibly": B_INCR, "intensely": B_INCR, "majorly": B_INCR, "more": B_INCR, "most": B_INCR, "particularly": B_INCR, "purely": B_INCR, "quite": B_INCR, "really": B_INCR, "remarkably": B_INCR, "so": B_INCR, "substantially": B_INCR, "thoroughly": B_INCR, "totally": B_INCR, "tremendously": B_INCR, "uber": B_INCR, "unbelievably": B_INCR, "unusually": B_INCR, "utterly": B_INCR, "very": B_INCR, "almost": B_DECR, "barely": B_DECR, "hardly": B_DECR, "just enough": B_DECR, "kind of": B_DECR, "kinda": B_DECR, "kindof": B_DECR, "kind-of": B_DECR, "less": B_DECR, "little": B_DECR, "marginally": B_DECR, "occasionally": B_DECR, "partly": B_DECR, "scarcely": B_DECR, "slightly": B_DECR, "somewhat": B_DECR, "sort of": B_DECR, "sorta": B_DECR, "sortof": B_DECR, "sort-of": B_DECR, } SPECIAL_CASE_IDIOMS = { "the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2, "cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2, } REGEX_REMOVE_PUNCTUATION = re.compile(f"[{re.escape(string.punctuation)}]") PUNC_LIST = [ ".", "!", "?", ",", ";", ":", "-", "'", '"', "!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?", ] def __init__(self): pass def negated(self, input_words, include_nt=True): neg_words = self.NEGATE if any(word.lower() in neg_words for word in input_words): return True if include_nt: if any("n't" in word.lower() for word in input_words): return True for first, second in pairwise(input_words): if second.lower() == "least" and first.lower() != "at": return True return False def normalize(self, score, alpha=15): norm_score = score / math.sqrt((score * score) + alpha) return norm_score def scalar_inc_dec(self, word, valence, is_cap_diff): scalar = 0.0 word_lower = word.lower() if word_lower in self.BOOSTER_DICT: scalar = self.BOOSTER_DICT[word_lower] if valence < 0: scalar *= -1 if word.isupper() and is_cap_diff: if valence > 0: scalar += self.C_INCR else: scalar -= self.C_INCR return scalar class SentiText: def __init__(self, text, punc_list, regex_remove_punctuation): if not isinstance(text, str): text = str(text.encode("utf-8")) self.text = text self.PUNC_LIST = punc_list self.REGEX_REMOVE_PUNCTUATION = regex_remove_punctuation self.words_and_emoticons = self._words_and_emoticons() self.is_cap_diff = self.allcap_differential(self.words_and_emoticons) def _words_plus_punc(self): no_punc_text = self.REGEX_REMOVE_PUNCTUATION.sub("", self.text) words_only = no_punc_text.split() words_only = {w for w in words_only if len(w) > 1} punc_before = {"".join(p): p[1] for p in product(self.PUNC_LIST, words_only)} punc_after = {"".join(p): p[0] for p in product(words_only, self.PUNC_LIST)} words_punc_dict = punc_before words_punc_dict.update(punc_after) return words_punc_dict def _words_and_emoticons(self): wes = self.text.split() words_punc_dict = self._words_plus_punc() wes = [we for we in wes if len(we) > 1] for i, we in enumerate(wes): if we in words_punc_dict: wes[i] = words_punc_dict[we] return wes def allcap_differential(self, words): is_different = False allcap_words = 0 for word in words: if word.isupper(): allcap_words += 1 cap_differential = len(words) - allcap_words if 0 < cap_differential < len(words): is_different = True return is_different class SentimentIntensityAnalyzer: def __init__( self, lexicon_file="sentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt", ): self.lexicon_file = nltk.data.load(lexicon_file) self.lexicon = self.make_lex_dict() self.constants = VaderConstants() def make_lex_dict(self): lex_dict = {} for line in self.lexicon_file.split("\n"): (word, measure) = line.strip().split("\t")[0:2] lex_dict[word] = float(measure) return lex_dict def polarity_scores(self, text): sentitext = SentiText( text, self.constants.PUNC_LIST, self.constants.REGEX_REMOVE_PUNCTUATION ) sentiments = [] words_and_emoticons = sentitext.words_and_emoticons for item in words_and_emoticons: valence = 0 i = words_and_emoticons.index(item) if ( i < len(words_and_emoticons) - 1 and item.lower() == "kind" and words_and_emoticons[i + 1].lower() == "of" ) or item.lower() in self.constants.BOOSTER_DICT: sentiments.append(valence) continue sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) sentiments = self._but_check(words_and_emoticons, sentiments) return self.score_valence(sentiments, text) def sentiment_valence(self, valence, sentitext, item, i, sentiments): is_cap_diff = sentitext.is_cap_diff words_and_emoticons = sentitext.words_and_emoticons item_lowercase = item.lower() if item_lowercase in self.lexicon: valence = self.lexicon[item_lowercase] if item.isupper() and is_cap_diff: if valence > 0: valence += self.constants.C_INCR else: valence -= self.constants.C_INCR for start_i in range(0, 3): if ( i > start_i and words_and_emoticons[i - (start_i + 1)].lower() not in self.lexicon ): s = self.constants.scalar_inc_dec( words_and_emoticons[i - (start_i + 1)], valence, is_cap_diff ) if start_i == 1 and s != 0: s = s * 0.95 if start_i == 2 and s != 0: s = s * 0.9 valence = valence + s valence = self._never_check( valence, words_and_emoticons, start_i, i ) if start_i == 2: valence = self._idioms_check(valence, words_and_emoticons, i) valence = self._least_check(valence, words_and_emoticons, i) sentiments.append(valence) return sentiments def _least_check(self, valence, words_and_emoticons, i): if ( i > 1 and words_and_emoticons[i - 1].lower() not in self.lexicon and words_and_emoticons[i - 1].lower() == "least" ): if ( words_and_emoticons[i - 2].lower() != "at" and words_and_emoticons[i - 2].lower() != "very" ): valence = valence * self.constants.N_SCALAR elif ( i > 0 and words_and_emoticons[i - 1].lower() not in self.lexicon and words_and_emoticons[i - 1].lower() == "least" ): valence = valence * self.constants.N_SCALAR return valence def _but_check(self, words_and_emoticons, sentiments): words_and_emoticons = [w_e.lower() for w_e in words_and_emoticons] but = {"but"} & set(words_and_emoticons) if but: bi = words_and_emoticons.index(next(iter(but))) for sidx, sentiment in enumerate(sentiments): if sidx < bi: sentiments[sidx] = sentiment * 0.5 elif sidx > bi: sentiments[sidx] = sentiment * 1.5 return sentiments def _idioms_check(self, valence, words_and_emoticons, i): onezero = f"{words_and_emoticons[i - 1]} {words_and_emoticons[i]}" twoonezero = "{} {} {}".format( words_and_emoticons[i - 2], words_and_emoticons[i - 1], words_and_emoticons[i], ) twoone = f"{words_and_emoticons[i - 2]} {words_and_emoticons[i - 1]}" threetwoone = "{} {} {}".format( words_and_emoticons[i - 3], words_and_emoticons[i - 2], words_and_emoticons[i - 1], ) threetwo = "{} {}".format( words_and_emoticons[i - 3], words_and_emoticons[i - 2] ) sequences = [onezero, twoonezero, twoone, threetwoone, threetwo] for seq in sequences: if seq in self.constants.SPECIAL_CASE_IDIOMS: valence = self.constants.SPECIAL_CASE_IDIOMS[seq] break if len(words_and_emoticons) - 1 > i: zeroone = f"{words_and_emoticons[i]} {words_and_emoticons[i + 1]}" if zeroone in self.constants.SPECIAL_CASE_IDIOMS: valence = self.constants.SPECIAL_CASE_IDIOMS[zeroone] if len(words_and_emoticons) - 1 > i + 1: zeroonetwo = "{} {} {}".format( words_and_emoticons[i], words_and_emoticons[i + 1], words_and_emoticons[i + 2], ) if zeroonetwo in self.constants.SPECIAL_CASE_IDIOMS: valence = self.constants.SPECIAL_CASE_IDIOMS[zeroonetwo] if ( threetwo in self.constants.BOOSTER_DICT or twoone in self.constants.BOOSTER_DICT ): valence = valence + self.constants.B_DECR return valence def _never_check(self, valence, words_and_emoticons, start_i, i): if start_i == 0: if self.constants.negated([words_and_emoticons[i - 1]]): valence = valence * self.constants.N_SCALAR if start_i == 1: if words_and_emoticons[i - 2] == "never" and ( words_and_emoticons[i - 1] == "so" or words_and_emoticons[i - 1] == "this" ): valence = valence * 1.5 elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): valence = valence * self.constants.N_SCALAR if start_i == 2: if ( words_and_emoticons[i - 3] == "never" and ( words_and_emoticons[i - 2] == "so" or words_and_emoticons[i - 2] == "this" ) or ( words_and_emoticons[i - 1] == "so" or words_and_emoticons[i - 1] == "this" ) ): valence = valence * 1.25 elif self.constants.negated([words_and_emoticons[i - (start_i + 1)]]): valence = valence * self.constants.N_SCALAR return valence def _punctuation_emphasis(self, sum_s, text): ep_amplifier = self._amplify_ep(text) qm_amplifier = self._amplify_qm(text) punct_emph_amplifier = ep_amplifier + qm_amplifier return punct_emph_amplifier def _amplify_ep(self, text): ep_count = text.count("!") if ep_count > 4: ep_count = 4 ep_amplifier = ep_count * 0.292 return ep_amplifier def _amplify_qm(self, text): qm_count = text.count("?") qm_amplifier = 0 if qm_count > 1: if qm_count <= 3: qm_amplifier = qm_count * 0.18 else: qm_amplifier = 0.96 return qm_amplifier def _sift_sentiment_scores(self, sentiments): pos_sum = 0.0 neg_sum = 0.0 neu_count = 0 for sentiment_score in sentiments: if sentiment_score > 0: pos_sum += ( float(sentiment_score) + 1 ) if sentiment_score < 0: neg_sum += ( float(sentiment_score) - 1 ) if sentiment_score == 0: neu_count += 1 return pos_sum, neg_sum, neu_count def score_valence(self, sentiments, text): if sentiments: sum_s = float(sum(sentiments)) punct_emph_amplifier = self._punctuation_emphasis(sum_s, text) if sum_s > 0: sum_s += punct_emph_amplifier elif sum_s < 0: sum_s -= punct_emph_amplifier compound = self.constants.normalize(sum_s) pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments) if pos_sum > math.fabs(neg_sum): pos_sum += punct_emph_amplifier elif pos_sum < math.fabs(neg_sum): neg_sum -= punct_emph_amplifier total = pos_sum + math.fabs(neg_sum) + neu_count pos = math.fabs(pos_sum / total) neg = math.fabs(neg_sum / total) neu = math.fabs(neu_count / total) else: compound = 0.0 pos = 0.0 neg = 0.0 neu = 0.0 sentiment_dict = { "neg": round(neg, 3), "neu": round(neu, 3), "pos": round(pos, 3), "compound": round(compound, 4), } return sentiment_dict
natural language toolkit stemmers c 20012023 nltk project trevor cohn tacohncs mu oz au edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt nltk stemmers interfaces used to remove morphological affixes from words leaving only the word stem stemming algorithms aim to remove those affixes required for eg grammatical role tense derivational morphology leaving only the stem of the word this is a difficult problem due to irregular words eg common verbs in english complicated morphological rules and partofspeech and sense ambiguities eg ceil is not the stem of ceiling stemmeri defines a standard interface for stemmers natural language toolkit stemmers c 2001 2023 nltk project trevor cohn tacohn cs mu oz au edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt nltk stemmers interfaces used to remove morphological affixes from words leaving only the word stem stemming algorithms aim to remove those affixes required for eg grammatical role tense derivational morphology leaving only the stem of the word this is a difficult problem due to irregular words eg common verbs in english complicated morphological rules and part of speech and sense ambiguities eg ceil is not the stem of ceiling stemmeri defines a standard interface for stemmers
from nltk.stem.api import StemmerI from nltk.stem.arlstem import ARLSTem from nltk.stem.arlstem2 import ARLSTem2 from nltk.stem.cistem import Cistem from nltk.stem.isri import ISRIStemmer from nltk.stem.lancaster import LancasterStemmer from nltk.stem.porter import PorterStemmer from nltk.stem.regexp import RegexpStemmer from nltk.stem.rslp import RSLPStemmer from nltk.stem.snowball import SnowballStemmer from nltk.stem.wordnet import WordNetLemmatizer
natural language toolkit arlstem stemmer c 20012023 nltk project kheireddine abainia xprogramer k abainiagmail com algorithms kheireddine abainia k abainiagmail com siham ouamour halim sayoud url https www nltk org for license information see license txt arlstem arabic stemmer the details about the implementation of this algorithm are described in k abainia s ouamour and h sayoud a novel robust arabic light stemmer journal of experimental theoretical artificial intelligence jetai 17 vol 29 no 3 2017 pp 557573 the arlstem is a light arabic stemmer that is based on removing the affixes from the word i e prefixes suffixes and infixes it was evaluated and compared to several other stemmers using paice s parameters understemming index overstemming index and stemming weight and the results showed that arlstem is promising and producing high performances this stemmer is not based on any dictionary and can be used online effectively arlstem stemmer a light arabic stemming algorithm without any dictionary department of telecommunication information processing usthb university algiers algeria arlstem stemtoken returns the arabic stem for the input token the arlstem stemmer requires that all tokens are encoded using unicode encoding different alif with hamza alif laam laam laam fa laam fa ba ba alif laam kaaf alif laam waaw alif laam fa laam laam waaw laam laam fa ba alif laam waaw ba alif laam fa kaaf alif laam kaf yaa kaf miim ha alif ha miim kaf miim alif kaf noon shadda ha miim alif ha noon shadda alif noon ya noon waaw noon taa alif noon taa ya noon alif noon waaw noon siin taa siin yaa siin alif siin noon lam noon lam taa lam yaa lam hamza taa miim alif taa noon shadda noon alif taa miim taa alif waaw alif taa alif noon call this function to get the word s stem based on arlstem remove arabic diacritics and replace some letters with others strip common prefixes of the nouns strip the suffixes which are common to nouns and verbs transform a plural noun to a singular noun transform from the feminine form to the masculine form strip the verb prefixes and suffixes normalize the word by removing diacritics replacing hamzated alif with alif replacing alifmaqsura with yaa and removing waaw at the beginning strip arabic diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa strip the waaw from the word beginning if the remaining is 3 letters at least remove prefixes from the words beginning remove suffixes from the word s end transform the word from the feminine form to the masculine form transform the word from the plural form to the singular form stem the verb prefixes and suffixes or both stem the present prefixes and suffixes waaw alif yaa alif noon yaa noon taa noon stem the future prefixes and suffixes siin taa siin yaa alif noon siin yaa waaw noon siin taa noon siin yaa noon stem the present suffixes stem the present prefixes stem the future prefixes stem the order prefixes natural language toolkit arlstem stemmer c 2001 2023 nltk project kheireddine abainia x programer k abainia gmail com algorithms kheireddine abainia k abainia gmail com siham ouamour halim sayoud url https www nltk org for license information see license txt arlstem arabic stemmer the details about the implementation of this algorithm are described in k abainia s ouamour and h sayoud a novel robust arabic light stemmer journal of experimental theoretical artificial intelligence jetai 17 vol 29 no 3 2017 pp 557 573 the arlstem is a light arabic stemmer that is based on removing the affixes from the word i e prefixes suffixes and infixes it was evaluated and compared to several other stemmers using paice s parameters under stemming index over stemming index and stemming weight and the results showed that arlstem is promising and producing high performances this stemmer is not based on any dictionary and can be used on line effectively arlstem stemmer a light arabic stemming algorithm without any dictionary department of telecommunication information processing usthb university algiers algeria arlstem stem token returns the arabic stem for the input token the arlstem stemmer requires that all tokens are encoded using unicode encoding different alif with hamza alif laam laam laam fa laam fa ba ba alif laam kaaf alif laam waaw alif laam fa laam laam waaw laam laam fa ba alif laam waaw ba alif laam fa kaaf alif laam kaf yaa kaf miim ha alif ha miim kaf miim alif kaf noon shadda ha miim alif ha noon shadda alif noon ya noon waaw noon taa alif noon taa ya noon alif noon waaw noon siin taa siin yaa siin alif siin noon lam noon lam taa lam yaa lam hamza taa miim alif taa noon shadda noon alif taa miim taa alif waaw alif taa alif noon call this function to get the word s stem based on arlstem remove arabic diacritics and replace some letters with others strip common prefixes of the nouns strip the suffixes which are common to nouns and verbs transform a plural noun to a singular noun transform from the feminine form to the masculine form if the prefixes are not stripped strip the verb prefixes and suffixes normalize the word by removing diacritics replacing hamzated alif with alif replacing alifmaqsura with yaa and removing waaw at the beginning strip arabic diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa strip the waaw from the word beginning if the remaining is 3 letters at least remove prefixes from the words beginning remove suffixes from the word s end transform the word from the feminine form to the masculine form transform the word from the plural form to the singular form stem the verb prefixes and suffixes or both stem the present prefixes and suffixes taa yaa alif waaw alif yaa alif noon yaa noon taa noon stem the future prefixes and suffixes siin taa siin yaa alif noon siin yaa waaw noon siin taa noon siin yaa noon stem the present suffixes stem the present prefixes stem the future prefixes stem the order prefixes
import re from nltk.stem.api import StemmerI class ARLSTem(StemmerI): def __init__(self): self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") self.re_alifMaqsura = re.compile(r"[\u0649]") self.re_diacritics = re.compile(r"[\u064B-\u065F]") self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] self.pr4 = [ "\u0641\u0628\u0627\u0644", "\u0648\u0628\u0627\u0644", "\u0641\u0643\u0627\u0644", ] self.su2 = ["\u0643\u064A", "\u0643\u0645"] self.su22 = ["\u0647\u0627", "\u0647\u0645"] self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] self.verb_pr33 = [ "\u0644\u0646", "\u0644\u062A", "\u0644\u064A", "\u0644\u0623", ] self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] self.verb_suf2 = [ "\u0646\u0627", "\u062A\u0645", "\u062A\u0627", "\u0648\u0627", ] self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] def stem(self, token): try: if token is None: raise ValueError( "The word could not be stemmed, because \ it is empty !" ) token = self.norm(token) pre = self.pref(token) if pre is not None: token = pre token = self.suff(token) ps = self.plur2sing(token) if ps is None: fm = self.fem2masc(token) if fm is not None: return fm else: if pre is None: return self.verb(token) else: return ps return token except ValueError as e: print(e) def norm(self, token): token = self.re_diacritics.sub("", token) token = self.re_hamzated_alif.sub("\u0627", token) token = self.re_alifMaqsura.sub("\u064A", token) if token.startswith("\u0648") and len(token) > 3: token = token[1:] return token def pref(self, token): if len(token) > 5: for p3 in self.pr3: if token.startswith(p3): return token[3:] if len(token) > 6: for p4 in self.pr4: if token.startswith(p4): return token[4:] if len(token) > 5: for p3 in self.pr32: if token.startswith(p3): return token[3:] if len(token) > 4: for p2 in self.pr2: if token.startswith(p2): return token[2:] def suff(self, token): if token.endswith("\u0643") and len(token) > 3: return token[:-1] if len(token) > 4: for s2 in self.su2: if token.endswith(s2): return token[:-2] if len(token) > 5: for s3 in self.su3: if token.endswith(s3): return token[:-3] if token.endswith("\u0647") and len(token) > 3: token = token[:-1] return token if len(token) > 4: for s2 in self.su22: if token.endswith(s2): return token[:-2] if len(token) > 5: for s3 in self.su32: if token.endswith(s3): return token[:-3] if token.endswith("\u0646\u0627") and len(token) > 4: return token[:-2] return token def fem2masc(self, token): if token.endswith("\u0629") and len(token) > 3: return token[:-1] def plur2sing(self, token): if len(token) > 4: for ps2 in self.pl_si2: if token.endswith(ps2): return token[:-2] if len(token) > 5: for ps3 in self.pl_si3: if token.endswith(ps3): return token[:-3] if len(token) > 3 and token.endswith("\u0627\u062A"): return token[:-2] if len(token) > 3 and token.startswith("\u0627") and token[2] == "\u0627": return token[:2] + token[3:] if len(token) > 4 and token.startswith("\u0627") and token[-2] == "\u0627": return token[1:-2] + token[-1] def verb(self, token): vb = self.verb_t1(token) if vb is not None: return vb vb = self.verb_t2(token) if vb is not None: return vb vb = self.verb_t3(token) if vb is not None: return vb vb = self.verb_t4(token) if vb is not None: return vb vb = self.verb_t5(token) if vb is not None: return vb return self.verb_t6(token) def verb_t1(self, token): if len(token) > 5 and token.startswith("\u062A"): for s2 in self.pl_si2: if token.endswith(s2): return token[1:-2] if len(token) > 5 and token.startswith("\u064A"): for s2 in self.verb_su2: if token.endswith(s2): return token[1:-2] if len(token) > 4 and token.startswith("\u0627"): if len(token) > 5 and token.endswith("\u0648\u0627"): return token[1:-2] if token.endswith("\u064A"): return token[1:-1] if token.endswith("\u0627"): return token[1:-1] if token.endswith("\u0646"): return token[1:-1] if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): return token[1:-1] if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): return token[1:-1] def verb_t2(self, token): if len(token) > 6: for s2 in self.pl_si2: if token.startswith(self.verb_pr2[0]) and token.endswith(s2): return token[2:-2] if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): return token[2:-2] if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): return token[2:-2] if ( len(token) > 5 and token.startswith(self.verb_pr2[0]) and token.endswith("\u0646") ): return token[2:-1] if ( len(token) > 5 and token.startswith(self.verb_pr2[1]) and token.endswith("\u0646") ): return token[2:-1] def verb_t3(self, token): if len(token) > 5: for su3 in self.verb_suf3: if token.endswith(su3): return token[:-3] if len(token) > 4: for su2 in self.verb_suf2: if token.endswith(su2): return token[:-2] if len(token) > 3: for su1 in self.verb_suf1: if token.endswith(su1): return token[:-1] def verb_t4(self, token): if len(token) > 3: for pr1 in self.verb_suf1: if token.startswith(pr1): return token[1:] if token.startswith("\u064A"): return token[1:] def verb_t5(self, token): if len(token) > 4: for pr2 in self.verb_pr22: if token.startswith(pr2): return token[2:] for pr2 in self.verb_pr2: if token.startswith(pr2): return token[2:] return token def verb_t6(self, token): if len(token) > 4: for pr3 in self.verb_pr33: if token.startswith(pr3): return token[2:] return token
natural language toolkit arlstem stemmer v2 c 20012023 nltk project kheireddine abainia xprogramer k abainiagmail com algorithms kheireddine abainia k abainiagmail com hamza rebbani hamrebbanigmail com url https www nltk org for license information see license txt arlstem2 arabic light stemmer the details about the implementation of this algorithm are described in k abainia and h rebbani comparing the effectiveness of the improved arlstem algorithm with existing arabic light stemmers international conference on theoretical and applicative aspects of computer science ictaacs 19 skikda algeria december 1516 2019 arlstem2 is an arabic light stemmer based on removing the affixes from the words i e prefixes suffixes and infixes it is an improvement of the previous arabic light stemmer arlstem the new version was compared to the original algorithm and several existing arabic light stemmers where the results showed that the new version considerably improves the understemming errors that are common to light stemmers both arlstem and arlstem2 can be run online and do not use any dictionary return a stemmed arabic word after removing affixes this an improved version of the previous algorithm which reduces understemming errors typically used in arabic search engine information retrieval and nlp from nltk stem import arlstem2 stemmer arlstem2 word stemmer stem printword param token the input arabic word unicode to be stemmed type token unicode return a unicode arabic word different alif with hamza alif laam laam laam fa laam fa ba ba alif laam kaaf alif laam waaw alif laam fa laam laam waaw laam laam fa ba alif laam waaw ba alif laam fa kaaf alif laam kaf yaa kaf miim ha alif ha miim kaf miim alif kaf noon shadda ha miim alif ha noon shadda alif noon ya noon waaw noon taa alif noon taa ya noon alif noon waaw noon siin taa siin yaa siin alif siin noon lam noon lam taa lam yaa lam hamza taa miim alif taa noon shadda noon alif taa miim taa alif waaw alif taa alif noon call this function to get the first stem remove arabic diacritics and replace some letters with others strip the common noun prefixes transform the feminine form to masculine form strip the adjective affixes strip the suffixes that are common to nouns and verbs transform a plural noun to a singular noun strip the verb prefixes and suffixes stem the input word run the first round of stemming check if there is some additional noun affixes taa yaa char miim waaw char alif yaa laam normalize the word by removing diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa and remove waaw at the beginning strip arabic diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa strip the waaw from the word beginning if the remaining is triliteral at least remove prefixes from the words beginning remove the infixes from adjectives alif alif yaa remove the suffixes from the word s ending noon and alif transform the word from the feminine form to the masculine form taa yaa yaa and taa marbuta alif yaa yaa and taa marbuta alif yaa and taa marbuta alif taa marbuta yaa and taa marbuta taa marbuta transform the word from the plural form to the singular form haa noon waaw alif taa alif alif alif alif stem the verb prefixes and suffixes or both stem the present tense cooccurred prefixes and suffixes waaw alif yaa alif noon yaa noon taa noon stem the future tense cooccurred prefixes and suffixes siin taa siin yaa alif noon siin yaa waaw noon siin taa noon siin yaa noon stem the present tense suffixes stem the present tense prefixes stem the future tense prefixes stem the imperative tense prefixes natural language toolkit arlstem stemmer v2 c 2001 2023 nltk project kheireddine abainia x programer k abainia gmail com algorithms kheireddine abainia k abainia gmail com hamza rebbani hamrebbani gmail com url https www nltk org for license information see license txt arlstem2 arabic light stemmer the details about the implementation of this algorithm are described in k abainia and h rebbani comparing the effectiveness of the improved arlstem algorithm with existing arabic light stemmers international conference on theoretical and applicative aspects of computer science ictaacs 19 skikda algeria december 15 16 2019 arlstem2 is an arabic light stemmer based on removing the affixes from the words i e prefixes suffixes and infixes it is an improvement of the previous arabic light stemmer arlstem the new version was compared to the original algorithm and several existing arabic light stemmers where the results showed that the new version considerably improves the under stemming errors that are common to light stemmers both arlstem and arlstem2 can be run online and do not use any dictionary return a stemmed arabic word after removing affixes this an improved version of the previous algorithm which reduces under stemming errors typically used in arabic search engine information retrieval and nlp from nltk stem import arlstem2 stemmer arlstem2 word stemmer stem يعمل print word عمل param token the input arabic word unicode to be stemmed type token unicode return a unicode arabic word different alif with hamza alif laam laam laam fa laam fa ba ba alif laam kaaf alif laam waaw alif laam fa laam laam waaw laam laam fa ba alif laam waaw ba alif laam fa kaaf alif laam kaf yaa kaf miim ha alif ha miim kaf miim alif kaf noon shadda ha miim alif ha noon shadda alif noon ya noon waaw noon taa alif noon taa ya noon alif noon waaw noon siin taa siin yaa siin alif siin noon lam noon lam taa lam yaa lam hamza taa miim alif taa noon shadda noon alif taa miim taa alif waaw alif taa alif noon call this function to get the first stem remove arabic diacritics and replace some letters with others strip the common noun prefixes transform the feminine form to masculine form strip the adjective affixes strip the suffixes that are common to nouns and verbs transform a plural noun to a singular noun if the noun prefixes are not stripped strip the verb prefixes and suffixes stem the input word run the first round of stemming check if there is some additional noun affixes taa yaa char miim waaw char alif yaa laam normalize the word by removing diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa and remove waaw at the beginning strip arabic diacritics replace hamzated alif with alif bare replace alifmaqsura with yaa strip the waaw from the word beginning if the remaining is tri literal at least remove prefixes from the words beginning remove the infixes from adjectives alif alif yaa remove the suffixes from the word s ending noon and alif transform the word from the feminine form to the masculine form taa yaa yaa and taa marbuta alif yaa yaa and taa marbuta alif yaa and taa marbuta alif taa marbuta yaa and taa marbuta taa marbuta transform the word from the plural form to the singular form haa noon waaw alif taa alif alif alif alif stem the verb prefixes and suffixes or both stem the present tense co occurred prefixes and suffixes taa yaa alif waaw alif yaa alif noon yaa noon taa noon stem the future tense co occurred prefixes and suffixes siin taa siin yaa alif noon siin yaa waaw noon siin taa noon siin yaa noon stem the present tense suffixes stem the present tense prefixes stem the future tense prefixes stem the imperative tense prefixes
import re from nltk.stem.api import StemmerI class ARLSTem2(StemmerI): def __init__(self): self.re_hamzated_alif = re.compile(r"[\u0622\u0623\u0625]") self.re_alifMaqsura = re.compile(r"[\u0649]") self.re_diacritics = re.compile(r"[\u064B-\u065F]") self.pr2 = ["\u0627\u0644", "\u0644\u0644", "\u0641\u0644", "\u0641\u0628"] self.pr3 = ["\u0628\u0627\u0644", "\u0643\u0627\u0644", "\u0648\u0627\u0644"] self.pr32 = ["\u0641\u0644\u0644", "\u0648\u0644\u0644"] self.pr4 = [ "\u0641\u0628\u0627\u0644", "\u0648\u0628\u0627\u0644", "\u0641\u0643\u0627\u0644", ] self.su2 = ["\u0643\u064A", "\u0643\u0645"] self.su22 = ["\u0647\u0627", "\u0647\u0645"] self.su3 = ["\u0643\u0645\u0627", "\u0643\u0646\u0651"] self.su32 = ["\u0647\u0645\u0627", "\u0647\u0646\u0651"] self.pl_si2 = ["\u0627\u0646", "\u064A\u0646", "\u0648\u0646"] self.pl_si3 = ["\u062A\u0627\u0646", "\u062A\u064A\u0646"] self.verb_su2 = ["\u0627\u0646", "\u0648\u0646"] self.verb_pr2 = ["\u0633\u062A", "\u0633\u064A"] self.verb_pr22 = ["\u0633\u0627", "\u0633\u0646"] self.verb_pr33 = [ "\u0644\u0646", "\u0644\u062A", "\u0644\u064A", "\u0644\u0623", ] self.verb_suf3 = ["\u062A\u0645\u0627", "\u062A\u0646\u0651"] self.verb_suf2 = [ "\u0646\u0627", "\u062A\u0645", "\u062A\u0627", "\u0648\u0627", ] self.verb_suf1 = ["\u062A", "\u0627", "\u0646"] def stem1(self, token): try: if token is None: raise ValueError( "The word could not be stemmed, because \ it is empty !" ) self.is_verb = False token = self.norm(token) pre = self.pref(token) if pre is not None: token = pre fm = self.fem2masc(token) if fm is not None: return fm adj = self.adjective(token) if adj is not None: return adj token = self.suff(token) ps = self.plur2sing(token) if ps is None: if pre is None: verb = self.verb(token) if verb is not None: self.is_verb = True return verb else: return ps return token except ValueError as e: print(e) def stem(self, token): try: if token is None: raise ValueError( "The word could not be stemmed, because \ it is empty !" ) token = self.stem1(token) if len(token) > 4: if token.startswith("\u062A") and token[-2] == "\u064A": token = token[1:-2] + token[-1] return token if token.startswith("\u0645") and token[-2] == "\u0648": token = token[1:-2] + token[-1] return token if len(token) > 3: if not token.startswith("\u0627") and token.endswith("\u064A"): token = token[:-1] return token if token.startswith("\u0644"): return token[1:] return token except ValueError as e: print(e) def norm(self, token): token = self.re_diacritics.sub("", token) token = self.re_hamzated_alif.sub("\u0627", token) token = self.re_alifMaqsura.sub("\u064A", token) if token.startswith("\u0648") and len(token) > 3: token = token[1:] return token def pref(self, token): if len(token) > 5: for p3 in self.pr3: if token.startswith(p3): return token[3:] if len(token) > 6: for p4 in self.pr4: if token.startswith(p4): return token[4:] if len(token) > 5: for p3 in self.pr32: if token.startswith(p3): return token[3:] if len(token) > 4: for p2 in self.pr2: if token.startswith(p2): return token[2:] def adjective(self, token): if len(token) > 5: if ( token.startswith("\u0627") and token[-3] == "\u0627" and token.endswith("\u064A") ): return token[:-3] + token[-2] def suff(self, token): if token.endswith("\u0643") and len(token) > 3: return token[:-1] if len(token) > 4: for s2 in self.su2: if token.endswith(s2): return token[:-2] if len(token) > 5: for s3 in self.su3: if token.endswith(s3): return token[:-3] if token.endswith("\u0647") and len(token) > 3: token = token[:-1] return token if len(token) > 4: for s2 in self.su22: if token.endswith(s2): return token[:-2] if len(token) > 5: for s3 in self.su32: if token.endswith(s3): return token[:-3] if token.endswith("\u0646\u0627") and len(token) > 4: return token[:-2] return token def fem2masc(self, token): if len(token) > 6: if ( token.startswith("\u062A") and token[-4] == "\u064A" and token.endswith("\u064A\u0629") ): return token[1:-4] + token[-3] if ( token.startswith("\u0627") and token[-4] == "\u0627" and token.endswith("\u064A\u0629") ): return token[:-4] + token[-3] if token.endswith("\u0627\u064A\u0629") and len(token) > 5: return token[:-2] if len(token) > 4: if token[1] == "\u0627" and token.endswith("\u0629"): return token[0] + token[2:-1] if token.endswith("\u064A\u0629"): return token[:-2] if token.endswith("\u0629") and len(token) > 3: return token[:-1] def plur2sing(self, token): if len(token) > 5: if token.startswith("\u0645") and token.endswith("\u0648\u0646"): return token[1:-2] if len(token) > 4: for ps2 in self.pl_si2: if token.endswith(ps2): return token[:-2] if len(token) > 5: for ps3 in self.pl_si3: if token.endswith(ps3): return token[:-3] if len(token) > 4: if token.endswith("\u0627\u062A"): return token[:-2] if token.startswith("\u0627") and token[2] == "\u0627": return token[:2] + token[3:] if token.startswith("\u0627") and token[-2] == "\u0627": return token[1:-2] + token[-1] def verb(self, token): vb = self.verb_t1(token) if vb is not None: return vb vb = self.verb_t2(token) if vb is not None: return vb vb = self.verb_t3(token) if vb is not None: return vb vb = self.verb_t4(token) if vb is not None: return vb vb = self.verb_t5(token) if vb is not None: return vb vb = self.verb_t6(token) return vb def verb_t1(self, token): if len(token) > 5 and token.startswith("\u062A"): for s2 in self.pl_si2: if token.endswith(s2): return token[1:-2] if len(token) > 5 and token.startswith("\u064A"): for s2 in self.verb_su2: if token.endswith(s2): return token[1:-2] if len(token) > 4 and token.startswith("\u0627"): if len(token) > 5 and token.endswith("\u0648\u0627"): return token[1:-2] if token.endswith("\u064A"): return token[1:-1] if token.endswith("\u0627"): return token[1:-1] if token.endswith("\u0646"): return token[1:-1] if len(token) > 4 and token.startswith("\u064A") and token.endswith("\u0646"): return token[1:-1] if len(token) > 4 and token.startswith("\u062A") and token.endswith("\u0646"): return token[1:-1] def verb_t2(self, token): if len(token) > 6: for s2 in self.pl_si2: if token.startswith(self.verb_pr2[0]) and token.endswith(s2): return token[2:-2] if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[0]): return token[2:-2] if token.startswith(self.verb_pr2[1]) and token.endswith(self.pl_si2[2]): return token[2:-2] if ( len(token) > 5 and token.startswith(self.verb_pr2[0]) and token.endswith("\u0646") ): return token[2:-1] if ( len(token) > 5 and token.startswith(self.verb_pr2[1]) and token.endswith("\u0646") ): return token[2:-1] def verb_t3(self, token): if len(token) > 5: for su3 in self.verb_suf3: if token.endswith(su3): return token[:-3] if len(token) > 4: for su2 in self.verb_suf2: if token.endswith(su2): return token[:-2] if len(token) > 3: for su1 in self.verb_suf1: if token.endswith(su1): return token[:-1] def verb_t4(self, token): if len(token) > 3: for pr1 in self.verb_suf1: if token.startswith(pr1): return token[1:] if token.startswith("\u064A"): return token[1:] def verb_t5(self, token): if len(token) > 4: for pr2 in self.verb_pr22: if token.startswith(pr2): return token[2:] for pr2 in self.verb_pr2: if token.startswith(pr2): return token[2:] def verb_t6(self, token): if len(token) > 4: for pr3 in self.verb_pr33: if token.startswith(pr3): return token[2:] return token
natural language toolkit cistem stemmer for german c 20012023 nltk project leonie weissweiler l weissweileroutlook de tom aarsen modifications algorithm leonie weissweiler l weissweileroutlook de alexander fraser frasercis lmu de url https www nltk org for license information see license txt cistem stemmer for german this is the official python implementation of the cistem stemmer it is based on the paper leonie weissweiler alexander fraser 2017 developing a stemmer for german based on a comparative analysis of publicly available stemmers in proceedings of the german society for computational linguistics and language technology gscl which can be read here https www cis lmu deweissweilercistem in the paper we conducted an analysis of publicly available stemmers developed two gold standards for german stemming and evaluated the stemmers based on the two gold standards we then proposed the stemmer implemented here and show that it achieves slightly better fmeasure than the other stemmers and is thrice as fast as the snowball stemmer for german while being about as fast as most other stemmers caseinsensitive is a a boolean specifying if caseinsensitive stemming should be used case insensitivity improves performance only if words in the text may be incorrectly upper case for alllowercase and correctly cased text best performance is achieved by setting caseinsensitive for false param caseinsensitive if true the stemming is case insensitive false by default type caseinsensitive bool stems the input word param word the word that is to be stemmed type word str return the stemmed word rtype str from nltk stem cistem import cistem stemmer cistem s1 speicherbehltern stemmer stems1 speicherbehalt s2 grenzpostens stemmer stems2 grenzpost s3 ausgefeiltere stemmer stems3 ausgefeilt stemmer cistemtrue stemmer stems1 speicherbehal stemmer stems2 grenzpo stemmer stems3 ausgefeil this method works very similarly to stem func cistem stem the difference is that in addition to returning the stem it also returns the rest that was removed at the end to be able to return the stem unchanged so the stem and the rest can be concatenated to form the original word all subsitutions that altered the stem in any other way than by removing letters at the end were left out param word the word that is to be stemmed type word str return a tuple of the stemmed word and the removed suffix rtype tuplestr str from nltk stem cistem import cistem stemmer cistem s1 speicherbehltern stemmer segments1 speicherbehlt ern s2 grenzpostens stemmer segments2 grenzpost ens s3 ausgefeiltere stemmer segments3 ausgefeilt ere stemmer cistemtrue stemmer segments1 speicherbehl tern stemmer segments2 grenzpo stens stemmer segments3 ausgefeil tere inner method for iteratively applying the code stemming regexes this method receives a preprocessed variant of the word to be stemmed or the word to be segmented and returns a tuple of the word and the removed suffix param word a preprocessed variant of the word that is to be stemmed type word str param upper whether the original word started with a capital letter type upper bool return a tuple of the stemmed word and the removed suffix rtype tuplestr str preprocessing before applying the substitution patterns apply the substitution patterns postprocessing after applying the substitution patterns natural language toolkit cistem stemmer for german c 2001 2023 nltk project leonie weissweiler l weissweiler outlook de tom aarsen modifications algorithm leonie weissweiler l weissweiler outlook de alexander fraser fraser cis lmu de url https www nltk org for license information see license txt cistem stemmer for german this is the official python implementation of the cistem stemmer it is based on the paper leonie weissweiler alexander fraser 2017 developing a stemmer for german based on a comparative analysis of publicly available stemmers in proceedings of the german society for computational linguistics and language technology gscl which can be read here https www cis lmu de weissweiler cistem in the paper we conducted an analysis of publicly available stemmers developed two gold standards for german stemming and evaluated the stemmers based on the two gold standards we then proposed the stemmer implemented here and show that it achieves slightly better f measure than the other stemmers and is thrice as fast as the snowball stemmer for german while being about as fast as most other stemmers case_insensitive is a a boolean specifying if case insensitive stemming should be used case insensitivity improves performance only if words in the text may be incorrectly upper case for all lowercase and correctly cased text best performance is achieved by setting case_insensitive for false param case_insensitive if true the stemming is case insensitive false by default type case_insensitive bool stems the input word param word the word that is to be stemmed type word str return the stemmed word rtype str from nltk stem cistem import cistem stemmer cistem s1 speicherbehältern stemmer stem s1 speicherbehalt s2 grenzpostens stemmer stem s2 grenzpost s3 ausgefeiltere stemmer stem s3 ausgefeilt stemmer cistem true stemmer stem s1 speicherbehal stemmer stem s2 grenzpo stemmer stem s3 ausgefeil this method works very similarly to stem func cistem stem the difference is that in addition to returning the stem it also returns the rest that was removed at the end to be able to return the stem unchanged so the stem and the rest can be concatenated to form the original word all subsitutions that altered the stem in any other way than by removing letters at the end were left out param word the word that is to be stemmed type word str return a tuple of the stemmed word and the removed suffix rtype tuple str str from nltk stem cistem import cistem stemmer cistem s1 speicherbehältern stemmer segment s1 speicherbehält ern s2 grenzpostens stemmer segment s2 grenzpost ens s3 ausgefeiltere stemmer segment s3 ausgefeilt ere stemmer cistem true stemmer segment s1 speicherbehäl tern stemmer segment s2 grenzpo stens stemmer segment s3 ausgefeil tere inner method for iteratively applying the code stemming regexes this method receives a pre processed variant of the word to be stemmed or the word to be segmented and returns a tuple of the word and the removed suffix param word a pre processed variant of the word that is to be stemmed type word str param upper whether the original word started with a capital letter type upper bool return a tuple of the stemmed word and the removed suffix rtype tuple str str pre processing before applying the substitution patterns apply the substitution patterns post processing after applying the substitution patterns
import re from typing import Tuple from nltk.stem.api import StemmerI class Cistem(StemmerI): strip_ge = re.compile(r"^ge(.{4,})") repl_xx = re.compile(r"(.)\1") strip_emr = re.compile(r"e[mr]$") strip_nd = re.compile(r"nd$") strip_t = re.compile(r"t$") strip_esn = re.compile(r"[esn]$") repl_xx_back = re.compile(r"(.)\*") def __init__(self, case_insensitive: bool = False): self._case_insensitive = case_insensitive @staticmethod def replace_to(word: str) -> str: word = word.replace("sch", "$") word = word.replace("ei", "%") word = word.replace("ie", "&") word = Cistem.repl_xx.sub(r"\1*", word) return word @staticmethod def replace_back(word: str) -> str: word = Cistem.repl_xx_back.sub(r"\1\1", word) word = word.replace("%", "ei") word = word.replace("&", "ie") word = word.replace("$", "sch") return word def stem(self, word: str) -> str: if len(word) == 0: return word upper = word[0].isupper() word = word.lower() word = word.replace("ü", "u") word = word.replace("ö", "o") word = word.replace("ä", "a") word = word.replace("ß", "ss") word = Cistem.strip_ge.sub(r"\1", word) return self._segment_inner(word, upper)[0] def segment(self, word: str) -> Tuple[str, str]: if len(word) == 0: return ("", "") upper = word[0].isupper() word = word.lower() return self._segment_inner(word, upper) def _segment_inner(self, word: str, upper: bool): rest_length = 0 word_copy = word[:] word = Cistem.replace_to(word) rest = "" while len(word) > 3: if len(word) > 5: word, n = Cistem.strip_emr.subn("", word) if n != 0: rest_length += 2 continue word, n = Cistem.strip_nd.subn("", word) if n != 0: rest_length += 2 continue if not upper or self._case_insensitive: word, n = Cistem.strip_t.subn("", word) if n != 0: rest_length += 1 continue word, n = Cistem.strip_esn.subn("", word) if n != 0: rest_length += 1 continue else: break word = Cistem.replace_back(word) if rest_length: rest = word_copy[-rest_length:] return (word, rest)
natural language toolkit the isri arabic stemmer c 20012023 nltk project algorithm kazem taghva rania elkhoury and jeffrey coombs 2005 hosam algasaier hosamhmeyahoo com url https www nltk org for license information see license txt isri arabic stemmer the algorithm for this stemmer is described in taghva k elkoury r and coombs j 2005 arabic stemming without a root dictionary information science research institute university of nevada las vegas usa the information science research institutes isri arabic stemmer shares many features with the khoja stemmer however the main difference is that isri stemmer does not use root dictionary also if a root is not found isri stemmer returned normalized form rather than returning the original unmodified word additional adjustments were made to improve the algorithm 1 adding 60 stop words 2 adding the pattern to isri pattern set 3 the step 2 in the original algorithm was normalizing all hamza this step is discarded because it increases the word ambiguities and changes the original root isri arabic stemmer based on algorithm arabic stemming without a root dictionary information science research institute university of nevada las vegas usa a few minor modifications have been made to isri basic algorithm see the source code of this module for more information isri stemtoken returns arabic root for the given token the isri stemmer requires that all tokens have unicode string types if you use python idle on arabic windows you have to decode text first using arabic 1256 coding length three prefixes length two prefixes length one prefixes length three suffixes length two suffixes length one suffixes groups of length four patterns groups of length five patterns and length three roots stemming a word token using the isri stemmer if 4 word length 7 then stem otherwise no stemming normalization num1 normalize diacritics num2 normalize initial hamza num3 both 12 remove length three and length two prefixes in this order if lenword 6 for pre3 in self p3 if word startswithpre3 return word3 if lenword 5 for pre2 in self p2 if word startswithpre2 return word2 return word def suf32self word remove connective if it precedes a word beginning with if lenword 4 and word 2 u0648u0648 word word1 return word def prow4self word process length five patterns and extract length three roots if word2 in self pr530 and word0 u0627 word word1 word3 elif word3 in self pr531 and word0 u0645 word word1 3 word4 elif word0 in self pr532 and word4 u0629 word word1 4 elif word0 in self pr533 and word2 u062a word word1 word3 elif word0 in self pr534 and word2 u0627 word word1 word3 elif word2 in self pr535 and word4 u0629 word word 2 word3 elif word0 in self pr536 and word1 u0646 word word2 elif word3 u0627 and word0 u0627 word word1 3 word4 elif word4 u0646 and word3 u0627 word word 3 elif word3 u064a and word0 u062a word word1 3 word4 elif word3 u0648 and word1 u0627 word word0 word2 word4 elif word2 u0627 and word1 u0648 word word0 word3 elif word3 u0626 and word2 u0627 word word 2 word4 elif word4 u0629 and word1 u0627 word word0 word2 4 elif word4 u064a and word2 u0627 word word 2 word3 else word self suf1word do normalize short sufix if lenword 5 word self pre1word do normalize short prefix return word def prow54self word ending step word of length five if lenword 4 word self prow4word elif lenword 5 word self prow54word return word def prow6self word process length six patterns and extract length four roots if word0 u0627 and word4 u0627 word word1 4 word5 elif word startswithu0645u062a word word2 return word def endw6self word normalize short sufix for sf1 in self s1 if word endswithsf1 return word 1 return word def pre1self word natural language toolkit the isri arabic stemmer c 2001 2023 nltk project algorithm kazem taghva rania elkhoury and jeffrey coombs 2005 hosam algasaier hosam_hme yahoo com url https www nltk org for license information see license txt isri arabic stemmer the algorithm for this stemmer is described in taghva k elkoury r and coombs j 2005 arabic stemming without a root dictionary information science research institute university of nevada las vegas usa the information science research institute s isri arabic stemmer shares many features with the khoja stemmer however the main difference is that isri stemmer does not use root dictionary also if a root is not found isri stemmer returned normalized form rather than returning the original unmodified word additional adjustments were made to improve the algorithm 1 adding 60 stop words 2 adding the pattern تفاعيل to isri pattern set 3 the step 2 in the original algorithm was normalizing all hamza this step is discarded because it increases the word ambiguities and changes the original root isri arabic stemmer based on algorithm arabic stemming without a root dictionary information science research institute university of nevada las vegas usa a few minor modifications have been made to isri basic algorithm see the source code of this module for more information isri stem token returns arabic root for the given token the isri stemmer requires that all tokens have unicode string types if you use python idle on arabic windows you have to decode text first using arabic 1256 coding length three prefixes length two prefixes length one prefixes length three suffixes length two suffixes length one suffixes groups of length four patterns groups of length five patterns and length three roots stemming a word token using the isri stemmer remove diacritics which representing arabic short vowels exclude stop words from being processed remove length three and length two prefixes in this order remove length three and length two suffixes in this order remove connective و if it precedes a word beginning with و normalize initial hamza to bare alif if 4 word length 7 then stem otherwise no stemming length 4 word length 5 word length 6 word length 7 word normalization num 1 normalize diacritics num 2 normalize initial hamza num 3 both 1 2 remove length three and length two prefixes in this order remove length three and length two suffixes in this order remove connective و if it precedes a word beginning with و process length four patterns and extract length three roots مفعل فاعل فعال فعول فعيل فعلة do normalize short sufix do normalize short prefix process length five patterns and extract length three roots افتعل افاعل مفعول مفعال مفعيل مفعلة تفعلة افعلة مفتعل يفتعل تفتعل مفاعل تفاعل فعولة فعالة انفعل منفعل افعال فعلان تفعيل فاعول فواعل فعائل فاعلة فعالي do normalize short sufix do normalize short prefix process length five patterns and extract length four roots تفعلل افعلل مفعلل فعللة فعالل ending step word of length five process length six patterns and extract length three roots مستفعل استفعل مفعالة افتعال افعوعل تفاعيل new pattern do normalize short sufix do normalize short prefix process length six patterns and extract length four roots افعلال متفعلل ending step word of length six normalize short sufix normalize short prefix
import re from nltk.stem.api import StemmerI class ISRIStemmer(StemmerI): def __init__(self): self.p3 = [ "\u0643\u0627\u0644", "\u0628\u0627\u0644", "\u0648\u0644\u0644", "\u0648\u0627\u0644", ] self.p2 = ["\u0627\u0644", "\u0644\u0644"] self.p1 = [ "\u0644", "\u0628", "\u0641", "\u0633", "\u0648", "\u064a", "\u062a", "\u0646", "\u0627", ] self.s3 = [ "\u062a\u0645\u0644", "\u0647\u0645\u0644", "\u062a\u0627\u0646", "\u062a\u064a\u0646", "\u0643\u0645\u0644", ] self.s2 = [ "\u0648\u0646", "\u0627\u062a", "\u0627\u0646", "\u064a\u0646", "\u062a\u0646", "\u0643\u0645", "\u0647\u0646", "\u0646\u0627", "\u064a\u0627", "\u0647\u0627", "\u062a\u0645", "\u0643\u0646", "\u0646\u064a", "\u0648\u0627", "\u0645\u0627", "\u0647\u0645", ] self.s1 = ["\u0629", "\u0647", "\u064a", "\u0643", "\u062a", "\u0627", "\u0646"] self.pr4 = { 0: ["\u0645"], 1: ["\u0627"], 2: ["\u0627", "\u0648", "\u064A"], 3: ["\u0629"], } self.pr53 = { 0: ["\u0627", "\u062a"], 1: ["\u0627", "\u064a", "\u0648"], 2: ["\u0627", "\u062a", "\u0645"], 3: ["\u0645", "\u064a", "\u062a"], 4: ["\u0645", "\u062a"], 5: ["\u0627", "\u0648"], 6: ["\u0627", "\u0645"], } self.re_short_vowels = re.compile(r"[\u064B-\u0652]") self.re_hamza = re.compile(r"[\u0621\u0624\u0626]") self.re_initial_hamza = re.compile(r"^[\u0622\u0623\u0625]") self.stop_words = [ "\u064a\u0643\u0648\u0646", "\u0648\u0644\u064a\u0633", "\u0648\u0643\u0627\u0646", "\u0643\u0630\u0644\u0643", "\u0627\u0644\u062a\u064a", "\u0648\u0628\u064a\u0646", "\u0639\u0644\u064a\u0647\u0627", "\u0645\u0633\u0627\u0621", "\u0627\u0644\u0630\u064a", "\u0648\u0643\u0627\u0646\u062a", "\u0648\u0644\u0643\u0646", "\u0648\u0627\u0644\u062a\u064a", "\u062a\u0643\u0648\u0646", "\u0627\u0644\u064a\u0648\u0645", "\u0627\u0644\u0644\u0630\u064a\u0646", "\u0639\u0644\u064a\u0647", "\u0643\u0627\u0646\u062a", "\u0644\u0630\u0644\u0643", "\u0623\u0645\u0627\u0645", "\u0647\u0646\u0627\u0643", "\u0645\u0646\u0647\u0627", "\u0645\u0627\u0632\u0627\u0644", "\u0644\u0627\u0632\u0627\u0644", "\u0644\u0627\u064a\u0632\u0627\u0644", "\u0645\u0627\u064a\u0632\u0627\u0644", "\u0627\u0635\u0628\u062d", "\u0623\u0635\u0628\u062d", "\u0623\u0645\u0633\u0649", "\u0627\u0645\u0633\u0649", "\u0623\u0636\u062d\u0649", "\u0627\u0636\u062d\u0649", "\u0645\u0627\u0628\u0631\u062d", "\u0645\u0627\u0641\u062a\u0626", "\u0645\u0627\u0627\u0646\u0641\u0643", "\u0644\u0627\u0633\u064a\u0645\u0627", "\u0648\u0644\u0627\u064a\u0632\u0627\u0644", "\u0627\u0644\u062d\u0627\u0644\u064a", "\u0627\u0644\u064a\u0647\u0627", "\u0627\u0644\u0630\u064a\u0646", "\u0641\u0627\u0646\u0647", "\u0648\u0627\u0644\u0630\u064a", "\u0648\u0647\u0630\u0627", "\u0644\u0647\u0630\u0627", "\u0641\u0643\u0627\u0646", "\u0633\u062a\u0643\u0648\u0646", "\u0627\u0644\u064a\u0647", "\u064a\u0645\u0643\u0646", "\u0628\u0647\u0630\u0627", "\u0627\u0644\u0630\u0649", ] def stem(self, token): token = self.norm( token, 1 ) if token in self.stop_words: return token token = self.pre32( token ) token = self.suf32( token ) token = self.waw( token ) token = self.norm(token, 2) if len(token) == 4: token = self.pro_w4(token) elif len(token) == 5: token = self.pro_w53(token) token = self.end_w5(token) elif len(token) == 6: token = self.pro_w6(token) token = self.end_w6(token) elif len(token) == 7: token = self.suf1(token) if len(token) == 7: token = self.pre1(token) if len(token) == 6: token = self.pro_w6(token) token = self.end_w6(token) return token def norm(self, word, num=3): if num == 1: word = self.re_short_vowels.sub("", word) elif num == 2: word = self.re_initial_hamza.sub("\u0627", word) elif num == 3: word = self.re_short_vowels.sub("", word) word = self.re_initial_hamza.sub("\u0627", word) return word def pre32(self, word): if len(word) >= 6: for pre3 in self.p3: if word.startswith(pre3): return word[3:] if len(word) >= 5: for pre2 in self.p2: if word.startswith(pre2): return word[2:] return word def suf32(self, word): if len(word) >= 6: for suf3 in self.s3: if word.endswith(suf3): return word[:-3] if len(word) >= 5: for suf2 in self.s2: if word.endswith(suf2): return word[:-2] return word def waw(self, word): if len(word) >= 4 and word[:2] == "\u0648\u0648": word = word[1:] return word def pro_w4(self, word): if word[0] in self.pr4[0]: word = word[1:] elif word[1] in self.pr4[1]: word = word[:1] + word[2:] elif word[2] in self.pr4[2]: word = word[:2] + word[3] elif word[3] in self.pr4[3]: word = word[:-1] else: word = self.suf1(word) if len(word) == 4: word = self.pre1(word) return word def pro_w53(self, word): if word[2] in self.pr53[0] and word[0] == "\u0627": word = word[1] + word[3:] elif word[3] in self.pr53[1] and word[0] == "\u0645": word = word[1:3] + word[4] elif word[0] in self.pr53[2] and word[4] == "\u0629": word = word[1:4] elif word[0] in self.pr53[3] and word[2] == "\u062a": word = word[1] + word[3:] elif word[0] in self.pr53[4] and word[2] == "\u0627": word = word[1] + word[3:] elif word[2] in self.pr53[5] and word[4] == "\u0629": word = word[:2] + word[3] elif word[0] in self.pr53[6] and word[1] == "\u0646": word = word[2:] elif word[3] == "\u0627" and word[0] == "\u0627": word = word[1:3] + word[4] elif word[4] == "\u0646" and word[3] == "\u0627": word = word[:3] elif word[3] == "\u064a" and word[0] == "\u062a": word = word[1:3] + word[4] elif word[3] == "\u0648" and word[1] == "\u0627": word = word[0] + word[2] + word[4] elif word[2] == "\u0627" and word[1] == "\u0648": word = word[0] + word[3:] elif word[3] == "\u0626" and word[2] == "\u0627": word = word[:2] + word[4] elif word[4] == "\u0629" and word[1] == "\u0627": word = word[0] + word[2:4] elif word[4] == "\u064a" and word[2] == "\u0627": word = word[:2] + word[3] else: word = self.suf1(word) if len(word) == 5: word = self.pre1(word) return word def pro_w54(self, word): if word[0] in self.pr53[2]: word = word[1:] elif word[4] == "\u0629": word = word[:4] elif word[2] == "\u0627": word = word[:2] + word[3:] return word def end_w5(self, word): if len(word) == 4: word = self.pro_w4(word) elif len(word) == 5: word = self.pro_w54(word) return word def pro_w6(self, word): if word.startswith("\u0627\u0633\u062a") or word.startswith( "\u0645\u0633\u062a" ): word = word[3:] elif ( word[0] == "\u0645" and word[3] == "\u0627" and word[5] == "\u0629" ): word = word[1:3] + word[4] elif ( word[0] == "\u0627" and word[2] == "\u062a" and word[4] == "\u0627" ): word = word[1] + word[3] + word[5] elif ( word[0] == "\u0627" and word[3] == "\u0648" and word[2] == word[4] ): word = word[1] + word[4:] elif ( word[0] == "\u062a" and word[2] == "\u0627" and word[4] == "\u064a" ): word = word[1] + word[3] + word[5] else: word = self.suf1(word) if len(word) == 6: word = self.pre1(word) return word def pro_w64(self, word): if word[0] == "\u0627" and word[4] == "\u0627": word = word[1:4] + word[5] elif word.startswith("\u0645\u062a"): word = word[2:] return word def end_w6(self, word): if len(word) == 5: word = self.pro_w53(word) word = self.end_w5(word) elif len(word) == 6: word = self.pro_w64(word) return word def suf1(self, word): for sf1 in self.s1: if word.endswith(sf1): return word[:-1] return word def pre1(self, word): for sp1 in self.p1: if word.startswith(sp1): return word[1:] return word
natural language toolkit stemmers c 20012023 nltk project steven tomcavage stomcavalaw upenn edu url https www nltk org for license information see license txt a word stemmer based on the lancaster paicehusk stemming algorithm paice chris d another stemmer acm sigir forum 24 3 1990 5661 lancaster stemmer from nltk stem lancaster import lancasterstemmer st lancasterstemmer st stem maximum remove um when word is intact maxim st stem presumably don t remove um when word is not intact presum st stem multiply no action taken if word ends with ply multiply st stem provision replace sion with j to trigger j set of rules provid st stem owed word starting with vowel must contain at least 2 letters ow st stem ear ditto ear st stem saying words starting with consonant must contain at least 3 say st stem crying letters and one of those letters must be a vowel cry st stem string ditto string st stem meant ditto meant st stem cement ditto cem stpre lancasterstemmerstripprefixflagtrue stpre stem kilometer test prefix met stcustom lancasterstemmerruletuplessen4 s1t stcustom stemness change s to t nest the rule list is static since it doesn t change between instances create an instance of the lancaster stemmer setup an empty rule dictionary this will be filled in later self ruledictionary check if a user wants to strip prefix self stripprefix stripprefixflag check if a user wants to use hisher own rule tuples self ruletuple ruletuple if ruletuple else self defaultruletuple def parserulesself ruletuplenone if there is no argument for the function use class own rule tuple ruletuple ruletuple if ruletuple else self ruletuple validrule re compileraz daz empty any old rules from the rule set before adding new ones self ruledictionary for rule in ruletuple if not validrule matchrule raise valueerrorfthe rule rule is invalid firstletter rule0 1 if firstletter in self ruledictionary self ruledictionaryfirstletter appendrule else self ruledictionaryfirstletter rule def stemself word lowercase the word since all the rules are lowercased save a copy of the original word if rule dictionary is empty parse rule tuple perform the actual word stemming validrule re compileraz daz proceed true while proceed find the position of the last letter of the word to be stemmed lastletterposition self getlastletterword only stem the word if it has a last letter and a rule matching that last letter if lastletterposition 0 or wordlastletterposition not in self ruledictionary proceed false else rulewasapplied false go through each rule that matches the word s final letter for rule in self ruledictionarywordlastletterposition rulematch validrule matchrule if rulematch endingstring intactflag removetotal appendstring contflag rulematch groups convert the number of chars to remove when stemming from a string to an integer removetotal intremovetotal proceed if word s ending matches rule s word ending if word endswithendingstring 1 if intactflag if word intactword and self isacceptable word removetotal word self applyrule word removetotal appendstring rulewasapplied true if contflag proceed false break elif self isacceptableword removetotal word self applyrule word removetotal appendstring rulewasapplied true if contflag proceed false break if no rules apply the word doesn t need any more stemming if rulewasapplied false proceed false return word def getlastletterself word determine if the word is acceptable for stemming wordisacceptable false if the word starts with a vowel it must be at least 2 characters long to be stemmed if word0 in aeiouy if lenword removetotal 2 wordisacceptable true if the word starts with a consonant it must be at least 3 characters long including one vowel to be stemmed elif lenword removetotal 3 if word1 in aeiouy wordisacceptable true elif word2 in aeiouy wordisacceptable true return wordisacceptable def applyruleself word removetotal appendstring remove letters from the end of the word and add new letters to the end of the truncated word remove prefix from a word this function originally taken from whoosh natural language toolkit stemmers c 2001 2023 nltk project steven tomcavage stomcava law upenn edu url https www nltk org for license information see license txt a word stemmer based on the lancaster paice husk stemming algorithm paice chris d another stemmer acm sigir forum 24 3 1990 56 61 lancaster stemmer from nltk stem lancaster import lancasterstemmer st lancasterstemmer st stem maximum remove um when word is intact maxim st stem presumably don t remove um when word is not intact presum st stem multiply no action taken if word ends with ply multiply st stem provision replace sion with j to trigger j set of rules provid st stem owed word starting with vowel must contain at least 2 letters ow st stem ear ditto ear st stem saying words starting with consonant must contain at least 3 say st stem crying letters and one of those letters must be a vowel cry st stem string ditto string st stem meant ditto meant st stem cement ditto cem st_pre lancasterstemmer strip_prefix_flag true st_pre stem kilometer test prefix met st_custom lancasterstemmer rule_tuple ssen4 s1t st_custom stem ness change s to t nest the rule list is static since it doesn t change between instances ia if intact a if intact bb b ytic ys ic nc nt dd d ied y ceed cess eed ee ed hood e lief liev if ing iag y ag gg g th if intact guish ct ish i if intact i y ij id see nois4j vis3j fuj fus uj ud oj od hej her verj vert misj mit nj nd j s ifiabl iabl y abl ibl bil bl cl c iful y ful ul ial ual al ll l ium um if intact ism mm m sion j xion ct ion ian an protect een en nn n ship pp p er protect ear ar or ur rr r tr t ier y ies y sis s is ness protect ss ous us if intact s if intact s s plicat ply at ment ent ant ript rib orpt orb duct duc sumpt sum cept ceiv olut olv protect sist ist tt t iqu ogu og siv j protect eiv iv bly bl ily y protect ply ly ogy og phy ph omy om opy op ity ety lty l istry ary ory ify ncy nt acy iz yz ys create an instance of the lancaster stemmer setup an empty rule dictionary this will be filled in later check if a user wants to strip prefix check if a user wants to use his her own rule tuples validate the set of rules used in this stemmer if this function is called as an individual method without using stem method rule_tuple argument will be compiled into self rule_dictionary if this function is called within stem self _rule_tuple will be used if there is no argument for the function use class own rule tuple empty any old rules from the rule set before adding new ones stem a word using the lancaster stemmer lower case the word since all the rules are lower cased save a copy of the original word if rule dictionary is empty parse rule tuple perform the actual word stemming find the position of the last letter of the word to be stemmed only stem the word if it has a last letter and a rule matching that last letter go through each rule that matches the word s final letter convert the number of chars to remove when stemming from a string to an integer proceed if word s ending matches rule s word ending if no rules apply the word doesn t need any more stemming get the zero based index of the last alphabetic character in this string determine if the word is acceptable for stemming if the word starts with a vowel it must be at least 2 characters long to be stemmed if the word starts with a consonant it must be at least 3 characters long including one vowel to be stemmed apply the stemming rule to the word remove letters from the end of the word and add new letters to the end of the truncated word remove prefix from a word this function originally taken from whoosh
import re from nltk.stem.api import StemmerI class LancasterStemmer(StemmerI): default_rule_tuple = ( "ai*2.", "a*1.", "bb1.", "city3s.", "ci2>", "cn1t>", "dd1.", "dei3y>", "deec2ss.", "dee1.", "de2>", "dooh4>", "e1>", "feil1v.", "fi2>", "gni3>", "gai3y.", "ga2>", "gg1.", "ht*2.", "hsiug5ct.", "hsi3>", "i*1.", "i1y>", "ji1d.", "juf1s.", "ju1d.", "jo1d.", "jeh1r.", "jrev1t.", "jsim2t.", "jn1d.", "j1s.", "lbaifi6.", "lbai4y.", "lba3>", "lbi3.", "lib2l>", "lc1.", "lufi4y.", "luf3>", "lu2.", "lai3>", "lau3>", "la2>", "ll1.", "mui3.", "mu*2.", "msi3>", "mm1.", "nois4j>", "noix4ct.", "noi3>", "nai3>", "na2>", "nee0.", "ne2>", "nn1.", "pihs4>", "pp1.", "re2>", "rae0.", "ra2.", "ro2>", "ru2>", "rr1.", "rt1>", "rei3y>", "sei3y>", "sis2.", "si2>", "ssen4>", "ss0.", "suo3>", "su*2.", "s*1>", "s0.", "tacilp4y.", "ta2>", "tnem4>", "tne3>", "tna3>", "tpir2b.", "tpro2b.", "tcud1.", "tpmus2.", "tpec2iv.", "tulo2v.", "tsis0.", "tsi3>", "tt1.", "uqi3.", "ugo1.", "vis3j>", "vie0.", "vi2>", "ylb1>", "yli3y>", "ylp0.", "yl2>", "ygo1.", "yhp1.", "ymo1.", "ypo1.", "yti3>", "yte3>", "ytl2.", "yrtsi5.", "yra3>", "yro3>", "yfi3.", "ycn2t>", "yca3>", "zi2>", "zy1s.", ) def __init__(self, rule_tuple=None, strip_prefix_flag=False): self.rule_dictionary = {} self._strip_prefix = strip_prefix_flag self._rule_tuple = rule_tuple if rule_tuple else self.default_rule_tuple def parseRules(self, rule_tuple=None): rule_tuple = rule_tuple if rule_tuple else self._rule_tuple valid_rule = re.compile(r"^[a-z]+\*?\d[a-z]*[>\.]?$") self.rule_dictionary = {} for rule in rule_tuple: if not valid_rule.match(rule): raise ValueError(f"The rule {rule} is invalid") first_letter = rule[0:1] if first_letter in self.rule_dictionary: self.rule_dictionary[first_letter].append(rule) else: self.rule_dictionary[first_letter] = [rule] def stem(self, word): word = word.lower() word = self.__stripPrefix(word) if self._strip_prefix else word intact_word = word if not self.rule_dictionary: self.parseRules() return self.__doStemming(word, intact_word) def __doStemming(self, word, intact_word): valid_rule = re.compile(r"^([a-z]+)(\*?)(\d)([a-z]*)([>\.]?)$") proceed = True while proceed: last_letter_position = self.__getLastLetter(word) if ( last_letter_position < 0 or word[last_letter_position] not in self.rule_dictionary ): proceed = False else: rule_was_applied = False for rule in self.rule_dictionary[word[last_letter_position]]: rule_match = valid_rule.match(rule) if rule_match: ( ending_string, intact_flag, remove_total, append_string, cont_flag, ) = rule_match.groups() remove_total = int(remove_total) if word.endswith(ending_string[::-1]): if intact_flag: if word == intact_word and self.__isAcceptable( word, remove_total ): word = self.__applyRule( word, remove_total, append_string ) rule_was_applied = True if cont_flag == ".": proceed = False break elif self.__isAcceptable(word, remove_total): word = self.__applyRule( word, remove_total, append_string ) rule_was_applied = True if cont_flag == ".": proceed = False break if rule_was_applied == False: proceed = False return word def __getLastLetter(self, word): last_letter = -1 for position in range(len(word)): if word[position].isalpha(): last_letter = position else: break return last_letter def __isAcceptable(self, word, remove_total): word_is_acceptable = False if word[0] in "aeiouy": if len(word) - remove_total >= 2: word_is_acceptable = True elif len(word) - remove_total >= 3: if word[1] in "aeiouy": word_is_acceptable = True elif word[2] in "aeiouy": word_is_acceptable = True return word_is_acceptable def __applyRule(self, word, remove_total, append_string): new_word_length = len(word) - remove_total word = word[0:new_word_length] if append_string: word += append_string return word def __stripPrefix(self, word): for prefix in ( "kilo", "micro", "milli", "intra", "ultra", "mega", "nano", "pico", "pseudo", ): if word.startswith(prefix): return word[len(prefix) :] return word def __repr__(self): return "<LancasterStemmer>"
porter stemmer this is the porter stemming algorithm it follows the algorithm presented in porter m an algorithm for suffix stripping program 14 3 1980 130137 with some optional deviations that can be turned on or off with the mode argument to the constructor martin porter the algorithm s inventor maintains a web page about the algorithm at https www tartarus orgmartinporterstemmer which includes another python implementation and other implementations in many languages a word stemmer based on the porter stemming algorithm porter m an algorithm for suffix stripping program 14 3 1980 130137 see https www tartarus orgmartinporterstemmer for the homepage of the algorithm martin porter has endorsed several modifications to the porter algorithm since writing his original paper and those extensions are included in the implementations on his website additionally others have proposed further improvements to the algorithm including nltk contributors there are thus three modes that can be selected by passing the appropriate constant to the class constructor s mode attribute porterstemmer originalalgorithm an implementation that is faithful to the original paper note that martin porter has deprecated this version of the algorithm martin distributes implementations of the porter stemmer in many languages hosted at https www tartarus orgmartinporterstemmer and all of these implementations include his extensions he strongly recommends against using the original published version of the algorithm only use this mode if you clearly understand why you are choosing to do so porterstemmer martinextensions an implementation that only uses the modifications to the algorithm that are included in the implementations on martin porter s website he has declared porter frozen so the behaviour of those implementations should never change porterstemmer nltkextensions default an implementation that includes further improvements devised by nltk contributors or taken from other modified implementations found on the web for the best stemming you should use the default nltkextensions version however if you need to get the same results as either the original algorithm or one of martin porter s hosted versions for compatibility with an existing implementation or dataset you can use one of the other modes instead modes the stemmer can be instantiated in this is a table of irregular forms it is quite short but still reflects the errors actually drawn to martin porter s attention over a 20 year period returns true if wordi is a consonant false otherwise a consonant is defined in the paper as follows a consonant in a word is a letter other than a e i o or u and other than y preceded by a consonant the fact that the term consonant is defined to some extent in terms of itself does not make it ambiguous so in toy the consonants are t and y and in syzygy they are s z and g if a letter is not a consonant it is a vowel cvsequence construct a string of c s and v s representing whether each character in stem is a consonant or a vowel e g falafel becomes cvcvcvc architecture becomes vcccvcvccvcv for i in rangelenstem if self isconsonantstem i cvsequence c else cvsequence v count the number of vc occurrences which is equivalent to the number of vc occurrences in porter s reduced form in the docstring above which is in turn equivalent to m return cvsequence countvc def haspositivemeasureself stem return self measurestem 0 def containsvowelself stem implements condition d from the paper returns true if word ends with a double consonant implements condition o from the paper from the paper o the stem ends cvc where the second c is not w x or y e g wil hop replaces suffix of word with replacement assert word endswithsuffix given word doesn t end with given suffix if suffix return word replacement else return word lensuffix replacement def applyrulelistself word rules for rule in rules suffix replacement condition rule if suffix d and self endsdoubleconsonantword stem word 2 if condition is none or conditionstem return stem replacement else don t try any further rules return word if word endswithsuffix stem self replacesuffixword suffix if condition is none or conditionstem return stem replacement else don t try any further rules return word return word def step1aself word this nltkonly rule extends the original algorithm so that flies fli but dies die etc if self mode self nltkextensions if word endswithies and lenword 4 return self replacesuffixword ies ie return self applyrulelist word sses ss none sses ss ies i none ies i ss ss none ss ss s none s def step1bself word this nltkonly block extends the original algorithm so that spied spi but died die etc if self mode self nltkextensions if word endswithied if lenword 4 return self replacesuffixword ied ie else return self replacesuffixword ied i m0 eed ee if word endswitheed stem self replacesuffixword eed if self measurestem 0 return stem ee else return word rule2or3succeeded false for suffix in ed ing if word endswithsuffix intermediatestem self replacesuffixword suffix if self containsvowelintermediatestem rule2or3succeeded true break if not rule2or3succeeded return word return self applyrulelist intermediatestem at ate none at ate bl ble none bl ble iz ize none iz ize d and not l or s or z single letter d intermediatestem1 lambda stem intermediatestem1 not in l s z m1 and o e e lambda stem self measurestem 1 and self endscvcstem def step1cself word def nltkconditionstem return lenstem 1 and self isconsonantstem lenstem 1 def originalconditionstem return self containsvowelstem return self applyrulelist word y i nltkcondition if self mode self nltkextensions else originalcondition def step2self word if self mode self nltkextensions instead of applying the alli al rule after abli per the published algorithm instead we apply it first and if it succeeds run the result through step2 again if word endswithalli and self haspositivemeasure self replacesuffixword alli return self step2self replacesuffixword alli al blirule bli ble self haspositivemeasure ablirule abli able self haspositivemeasure rules ational ate self haspositivemeasure tional tion self haspositivemeasure enci ence self haspositivemeasure anci ance self haspositivemeasure izer ize self haspositivemeasure ablirule if self mode self originalalgorithm else blirule alli al self haspositivemeasure entli ent self haspositivemeasure eli e self haspositivemeasure ousli ous self haspositivemeasure ization ize self haspositivemeasure ation ate self haspositivemeasure ator ate self haspositivemeasure alism al self haspositivemeasure iveness ive self haspositivemeasure fulness ful self haspositivemeasure ousness ous self haspositivemeasure aliti al self haspositivemeasure iviti ive self haspositivemeasure biliti ble self haspositivemeasure if self mode self nltkextensions rules appendfulli ful self haspositivemeasure the l of the logi log rule is put with the stem so that short stems like geo theo etc work like archaeo philo etc rules append logi log lambda stem self haspositivemeasureword 3 if self mode self martinextensions rules appendlogi log self haspositivemeasure return self applyrulelistword rules def step3self word return self applyrulelist word icate ic self haspositivemeasure ative self haspositivemeasure alize al self haspositivemeasure iciti ic self haspositivemeasure ical ic self haspositivemeasure ful self haspositivemeasure ness self haspositivemeasure def step4self word measuregt1 lambda stem self measurestem 1 return self applyrulelist word al measuregt1 ance measuregt1 ence measuregt1 er measuregt1 ic measuregt1 able measuregt1 ible measuregt1 ant measuregt1 ement measuregt1 ment measuregt1 ent measuregt1 m1 and s or t ion ion lambda stem self measurestem 1 and stem1 in s t ou measuregt1 ism measuregt1 ate measuregt1 iti measuregt1 ous measuregt1 ive measuregt1 ize measuregt1 def step5aself word note that martin s test vocabulary and reference implementations are inconsistent in how they handle the case where two rules both refer to a suffix that matches the word to be stemmed but only the condition of the second one is true earlier in step2b we had the rules m0 eed ee v ed but the examples in the paper included feedfeed even though v is true for fe and therefore the second rule alone would map feedfe however in this case we need to handle the consecutive rules differently and try both conditions obviously the second rule here would be redundant otherwise martin s paper makes no explicit mention of the inconsistency you have to infer it from the examples for this reason we can t use applyrulelist here if word endswithe stem self replacesuffixword e if self measurestem 1 return stem if self measurestem 1 and not self endscvcstem return stem return word def step5bself word return self applyrulelist word ll l lambda stem self measureword 1 1 def stemself word tolowercasetrue stem word lower if tolowercase else word if self mode self nltkextensions and word in self pool return self poolstem if self mode self originalalgorithm and lenword 2 with this line strings of length 1 or 2 don t go through the stemming process although no mention is made of this in the published algorithm return stem stem self step1astem stem self step1bstem stem self step1cstem stem self step2stem stem self step3stem stem self step4stem stem self step5astem stem self step5bstem return stem def reprself return porterstemmer def demo from nltk import stem from nltk corpus import treebank stemmer stem porterstemmer orig stemmed for item in treebank fileids 3 for word tag in treebank taggedwordsitem orig appendword stemmed appendstemmer stemword convert the results to a string and wordwrap them results joinstemmed results re subr 70s r1n results rstrip convert the original to a string and word wrap it original joinorig original re subr 70s r1n original rstrip print the results printoriginal center70 replace replace printoriginal printresults center70 replace replace printresults print 70 a word stemmer based on the porter stemming algorithm porter m an algorithm for suffix stripping program 14 3 1980 130 137 see https www tartarus org martin porterstemmer for the homepage of the algorithm martin porter has endorsed several modifications to the porter algorithm since writing his original paper and those extensions are included in the implementations on his website additionally others have proposed further improvements to the algorithm including nltk contributors there are thus three modes that can be selected by passing the appropriate constant to the class constructor s mode attribute porterstemmer original_algorithm an implementation that is faithful to the original paper note that martin porter has deprecated this version of the algorithm martin distributes implementations of the porter stemmer in many languages hosted at https www tartarus org martin porterstemmer and all of these implementations include his extensions he strongly recommends against using the original published version of the algorithm only use this mode if you clearly understand why you are choosing to do so porterstemmer martin_extensions an implementation that only uses the modifications to the algorithm that are included in the implementations on martin porter s website he has declared porter frozen so the behaviour of those implementations should never change porterstemmer nltk_extensions default an implementation that includes further improvements devised by nltk contributors or taken from other modified implementations found on the web for the best stemming you should use the default nltk_extensions version however if you need to get the same results as either the original algorithm or one of martin porter s hosted versions for compatibility with an existing implementation or dataset you can use one of the other modes instead modes the stemmer can be instantiated in this is a table of irregular forms it is quite short but still reflects the errors actually drawn to martin porter s attention over a 20 year period returns true if word i is a consonant false otherwise a consonant is defined in the paper as follows a consonant in a word is a letter other than a e i o or u and other than y preceded by a consonant the fact that the term consonant is defined to some extent in terms of itself does not make it ambiguous so in toy the consonants are t and y and in syzygy they are s z and g if a letter is not a consonant it is a vowel returns the measure of stem per definition in the paper from the paper a consonant will be denoted by c a vowel by v a list ccc of length greater than 0 will be denoted by c and a list vvv of length greater than 0 will be denoted by v any word or part of a word therefore has one of the four forms cvcv c cvcv v vcvc c vcvc v these may all be represented by the single form c vcvc v where the square brackets denote arbitrary presence of their contents using vc m to denote vc repeated m times this may again be written as c vc m v m will be called the measure of any word or word part when represented in this form the case m 0 covers the null word here are some examples m 0 tr ee tree y by m 1 trouble oats trees ivy m 2 troubles private oaten orrery construct a string of c s and v s representing whether each character in stem is a consonant or a vowel e g falafel becomes cvcvcvc architecture becomes vcccvcvccvcv count the number of vc occurrences which is equivalent to the number of vc occurrences in porter s reduced form in the docstring above which is in turn equivalent to m returns true if stem contains a vowel else false implements condition d from the paper returns true if word ends with a double consonant implements condition o from the paper from the paper o the stem ends cvc where the second c is not w x or y e g wil hop replaces suffix of word with replacement applies the first applicable suffix removal rule to the word takes a word and a list of suffix removal rules represented as 3 tuples with the first element being the suffix to remove the second element being the string to replace it with and the final element being the condition for the rule to be applicable or none if the rule is unconditional don t try any further rules don t try any further rules implements step 1a from an algorithm for suffix stripping from the paper sses ss caresses caress ies i ponies poni ties ti ss ss caress caress s cats cat this nltk only rule extends the original algorithm so that flies fli but dies die etc sses ss ies i ss ss s implements step 1b from an algorithm for suffix stripping from the paper m 0 eed ee feed feed agreed agree v ed plastered plaster bled bled v ing motoring motor sing sing if the second or third of the rules in step 1b is successful the following is done at ate conflat ed conflate bl ble troubl ed trouble iz ize siz ed size d and not l or s or z single letter hopp ing hop tann ed tan fall ing fall hiss ing hiss fizz ed fizz m 1 and o e fail ing fail fil ing file the rule to map to a single letter causes the removal of one of the double letter pair the e is put back on at bl and iz so that the suffixes ate ble and ize can be recognised later this e may be removed in step 4 this nltk only block extends the original algorithm so that spied spi but died die etc m 0 eed ee at ate bl ble iz ize d and not l or s or z single letter m 1 and o e implements step 1c from an algorithm for suffix stripping from the paper step 1c v y i happy happi sky sky this has been modified from the original porter algorithm so that y i is only done when y is preceded by a consonant but not if the stem is only a single consonant i e c and not c y i so happy happi but enjoy enjoy etc this is a much better rule formerly enjoy enjoi and enjoyment enjoy step 1c is perhaps done too soon but with this modification that no longer really matters also the removal of the contains_vowel z condition means that spy fly try stem to spi fli tri and conflate with spied tried flies implements step 2 from an algorithm for suffix stripping from the paper step 2 m 0 ational ate relational relate m 0 tional tion conditional condition rational rational m 0 enci ence valenci valence m 0 anci ance hesitanci hesitance m 0 izer ize digitizer digitize m 0 abli able conformabli conformable m 0 alli al radicalli radical m 0 entli ent differentli different m 0 eli e vileli vile m 0 ousli ous analogousli analogous m 0 ization ize vietnamization vietnamize m 0 ation ate predication predicate m 0 ator ate operator operate m 0 alism al feudalism feudal m 0 iveness ive decisiveness decisive m 0 fulness ful hopefulness hopeful m 0 ousness ous callousness callous m 0 aliti al formaliti formal m 0 iviti ive sensitiviti sensitive m 0 biliti ble sensibiliti sensible instead of applying the alli al rule after a bli per the published algorithm instead we apply it first and if it succeeds run the result through step2 again the l of the logi log rule is put with the stem so that short stems like geo theo etc work like archaeo philo etc implements step 3 from an algorithm for suffix stripping from the paper step 3 m 0 icate ic triplicate triplic m 0 ative formative form m 0 alize al formalize formal m 0 iciti ic electriciti electric m 0 ical ic electrical electric m 0 ful hopeful hope m 0 ness goodness good implements step 4 from an algorithm for suffix stripping step 4 m 1 al revival reviv m 1 ance allowance allow m 1 ence inference infer m 1 er airliner airlin m 1 ic gyroscopic gyroscop m 1 able adjustable adjust m 1 ible defensible defens m 1 ant irritant irrit m 1 ement replacement replac m 1 ment adjustment adjust m 1 ent dependent depend m 1 and s or t ion adoption adopt m 1 ou homologou homolog m 1 ism communism commun m 1 ate activate activ m 1 iti angulariti angular m 1 ous homologous homolog m 1 ive effective effect m 1 ize bowdlerize bowdler the suffixes are now removed all that remains is a little tidying up m 1 and s or t ion implements step 5a from an algorithm for suffix stripping from the paper step 5a m 1 e probate probat rate rate m 1 and not o e cease ceas note that martin s test vocabulary and reference implementations are inconsistent in how they handle the case where two rules both refer to a suffix that matches the word to be stemmed but only the condition of the second one is true earlier in step2b we had the rules m 0 eed ee v ed but the examples in the paper included feed feed even though v is true for fe and therefore the second rule alone would map feed fe however in this case we need to handle the consecutive rules differently and try both conditions obviously the second rule here would be redundant otherwise martin s paper makes no explicit mention of the inconsistency you have to infer it from the examples for this reason we can t use _apply_rule_list here implements step 5a from an algorithm for suffix stripping from the paper step 5b m 1 and d and l single letter controll control roll roll param to_lowercase if to_lowercase true the word always lowercase with this line strings of length 1 or 2 don t go through the stemming process although no mention is made of this in the published algorithm a demonstration of the porter stemmer on a sample from the penn treebank corpus convert the results to a string and word wrap them convert the original to a string and word wrap it print the results
__docformat__ = "plaintext" import re from nltk.stem.api import StemmerI class PorterStemmer(StemmerI): NLTK_EXTENSIONS = "NLTK_EXTENSIONS" MARTIN_EXTENSIONS = "MARTIN_EXTENSIONS" ORIGINAL_ALGORITHM = "ORIGINAL_ALGORITHM" def __init__(self, mode=NLTK_EXTENSIONS): if mode not in ( self.NLTK_EXTENSIONS, self.MARTIN_EXTENSIONS, self.ORIGINAL_ALGORITHM, ): raise ValueError( "Mode must be one of PorterStemmer.NLTK_EXTENSIONS, " "PorterStemmer.MARTIN_EXTENSIONS, or " "PorterStemmer.ORIGINAL_ALGORITHM" ) self.mode = mode if self.mode == self.NLTK_EXTENSIONS: irregular_forms = { "sky": ["sky", "skies"], "die": ["dying"], "lie": ["lying"], "tie": ["tying"], "news": ["news"], "inning": ["innings", "inning"], "outing": ["outings", "outing"], "canning": ["cannings", "canning"], "howe": ["howe"], "proceed": ["proceed"], "exceed": ["exceed"], "succeed": ["succeed"], } self.pool = {} for key in irregular_forms: for val in irregular_forms[key]: self.pool[val] = key self.vowels = frozenset(["a", "e", "i", "o", "u"]) def _is_consonant(self, word, i): if word[i] in self.vowels: return False if word[i] == "y": if i == 0: return True else: return not self._is_consonant(word, i - 1) return True def _measure(self, stem): r cv_sequence = "" for i in range(len(stem)): if self._is_consonant(stem, i): cv_sequence += "c" else: cv_sequence += "v" return cv_sequence.count("vc") def _has_positive_measure(self, stem): return self._measure(stem) > 0 def _contains_vowel(self, stem): for i in range(len(stem)): if not self._is_consonant(stem, i): return True return False def _ends_double_consonant(self, word): return ( len(word) >= 2 and word[-1] == word[-2] and self._is_consonant(word, len(word) - 1) ) def _ends_cvc(self, word): return ( len(word) >= 3 and self._is_consonant(word, len(word) - 3) and not self._is_consonant(word, len(word) - 2) and self._is_consonant(word, len(word) - 1) and word[-1] not in ("w", "x", "y") ) or ( self.mode == self.NLTK_EXTENSIONS and len(word) == 2 and not self._is_consonant(word, 0) and self._is_consonant(word, 1) ) def _replace_suffix(self, word, suffix, replacement): assert word.endswith(suffix), "Given word doesn't end with given suffix" if suffix == "": return word + replacement else: return word[: -len(suffix)] + replacement def _apply_rule_list(self, word, rules): for rule in rules: suffix, replacement, condition = rule if suffix == "*d" and self._ends_double_consonant(word): stem = word[:-2] if condition is None or condition(stem): return stem + replacement else: return word if word.endswith(suffix): stem = self._replace_suffix(word, suffix, "") if condition is None or condition(stem): return stem + replacement else: return word return word def _step1a(self, word): if self.mode == self.NLTK_EXTENSIONS: if word.endswith("ies") and len(word) == 4: return self._replace_suffix(word, "ies", "ie") return self._apply_rule_list( word, [ ("sses", "ss", None), ("ies", "i", None), ("ss", "ss", None), ("s", "", None), ], ) def _step1b(self, word): if self.mode == self.NLTK_EXTENSIONS: if word.endswith("ied"): if len(word) == 4: return self._replace_suffix(word, "ied", "ie") else: return self._replace_suffix(word, "ied", "i") if word.endswith("eed"): stem = self._replace_suffix(word, "eed", "") if self._measure(stem) > 0: return stem + "ee" else: return word rule_2_or_3_succeeded = False for suffix in ["ed", "ing"]: if word.endswith(suffix): intermediate_stem = self._replace_suffix(word, suffix, "") if self._contains_vowel(intermediate_stem): rule_2_or_3_succeeded = True break if not rule_2_or_3_succeeded: return word return self._apply_rule_list( intermediate_stem, [ ("at", "ate", None), ("bl", "ble", None), ("iz", "ize", None), ( "*d", intermediate_stem[-1], lambda stem: intermediate_stem[-1] not in ("l", "s", "z"), ), ( "", "e", lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)), ), ], ) def _step1c(self, word): def nltk_condition(stem): return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1) def original_condition(stem): return self._contains_vowel(stem) return self._apply_rule_list( word, [ ( "y", "i", nltk_condition if self.mode == self.NLTK_EXTENSIONS else original_condition, ) ], ) def _step2(self, word): if self.mode == self.NLTK_EXTENSIONS: if word.endswith("alli") and self._has_positive_measure( self._replace_suffix(word, "alli", "") ): return self._step2(self._replace_suffix(word, "alli", "al")) bli_rule = ("bli", "ble", self._has_positive_measure) abli_rule = ("abli", "able", self._has_positive_measure) rules = [ ("ational", "ate", self._has_positive_measure), ("tional", "tion", self._has_positive_measure), ("enci", "ence", self._has_positive_measure), ("anci", "ance", self._has_positive_measure), ("izer", "ize", self._has_positive_measure), abli_rule if self.mode == self.ORIGINAL_ALGORITHM else bli_rule, ("alli", "al", self._has_positive_measure), ("entli", "ent", self._has_positive_measure), ("eli", "e", self._has_positive_measure), ("ousli", "ous", self._has_positive_measure), ("ization", "ize", self._has_positive_measure), ("ation", "ate", self._has_positive_measure), ("ator", "ate", self._has_positive_measure), ("alism", "al", self._has_positive_measure), ("iveness", "ive", self._has_positive_measure), ("fulness", "ful", self._has_positive_measure), ("ousness", "ous", self._has_positive_measure), ("aliti", "al", self._has_positive_measure), ("iviti", "ive", self._has_positive_measure), ("biliti", "ble", self._has_positive_measure), ] if self.mode == self.NLTK_EXTENSIONS: rules.append(("fulli", "ful", self._has_positive_measure)) rules.append( ("logi", "log", lambda stem: self._has_positive_measure(word[:-3])) ) if self.mode == self.MARTIN_EXTENSIONS: rules.append(("logi", "log", self._has_positive_measure)) return self._apply_rule_list(word, rules) def _step3(self, word): return self._apply_rule_list( word, [ ("icate", "ic", self._has_positive_measure), ("ative", "", self._has_positive_measure), ("alize", "al", self._has_positive_measure), ("iciti", "ic", self._has_positive_measure), ("ical", "ic", self._has_positive_measure), ("ful", "", self._has_positive_measure), ("ness", "", self._has_positive_measure), ], ) def _step4(self, word): measure_gt_1 = lambda stem: self._measure(stem) > 1 return self._apply_rule_list( word, [ ("al", "", measure_gt_1), ("ance", "", measure_gt_1), ("ence", "", measure_gt_1), ("er", "", measure_gt_1), ("ic", "", measure_gt_1), ("able", "", measure_gt_1), ("ible", "", measure_gt_1), ("ant", "", measure_gt_1), ("ement", "", measure_gt_1), ("ment", "", measure_gt_1), ("ent", "", measure_gt_1), ( "ion", "", lambda stem: self._measure(stem) > 1 and stem[-1] in ("s", "t"), ), ("ou", "", measure_gt_1), ("ism", "", measure_gt_1), ("ate", "", measure_gt_1), ("iti", "", measure_gt_1), ("ous", "", measure_gt_1), ("ive", "", measure_gt_1), ("ize", "", measure_gt_1), ], ) def _step5a(self, word): if word.endswith("e"): stem = self._replace_suffix(word, "e", "") if self._measure(stem) > 1: return stem if self._measure(stem) == 1 and not self._ends_cvc(stem): return stem return word def _step5b(self, word): return self._apply_rule_list( word, [("ll", "l", lambda stem: self._measure(word[:-1]) > 1)] ) def stem(self, word, to_lowercase=True): stem = word.lower() if to_lowercase else word if self.mode == self.NLTK_EXTENSIONS and word in self.pool: return self.pool[stem] if self.mode != self.ORIGINAL_ALGORITHM and len(word) <= 2: return stem stem = self._step1a(stem) stem = self._step1b(stem) stem = self._step1c(stem) stem = self._step2(stem) stem = self._step3(stem) stem = self._step4(stem) stem = self._step5a(stem) stem = self._step5b(stem) return stem def __repr__(self): return "<PorterStemmer>" def demo(): from nltk import stem from nltk.corpus import treebank stemmer = stem.PorterStemmer() orig = [] stemmed = [] for item in treebank.fileids()[:3]: for (word, tag) in treebank.tagged_words(item): orig.append(word) stemmed.append(stemmer.stem(word)) results = " ".join(stemmed) results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip() original = " ".join(orig) original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip() print("-Original-".center(70).replace(" ", "*").replace("-", " ")) print(original) print("-Results-".center(70).replace(" ", "*").replace("-", " ")) print(results) print("*" * 70)
natural language toolkit stemmers c 20012023 nltk project trevor cohn tacohncs mu oz au edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt a stemmer that uses regular expressions to identify morphological affixes any substrings that match the regular expressions will be removed from nltk stem import regexpstemmer st regexpstemmer ingseable min4 st stem cars car st stem mass mas st stem was was st stem bee bee st stem compute comput st stem advisable advis type regexp str or regexp param regexp the regular expression that should be used to identify morphological affixes type min int param min the minimum length of string to stem natural language toolkit stemmers c 2001 2023 nltk project trevor cohn tacohn cs mu oz au edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt a stemmer that uses regular expressions to identify morphological affixes any substrings that match the regular expressions will be removed from nltk stem import regexpstemmer st regexpstemmer ing s e able min 4 st stem cars car st stem mass mas st stem was was st stem bee bee st stem compute comput st stem advisable advis type regexp str or regexp param regexp the regular expression that should be used to identify morphological affixes type min int param min the minimum length of string to stem
import re from nltk.stem.api import StemmerI class RegexpStemmer(StemmerI): def __init__(self, regexp, min=0): if not hasattr(regexp, "pattern"): regexp = re.compile(regexp) self._regexp = regexp self._min = min def stem(self, word): if len(word) < self._min: return word else: return self._regexp.sub("", word) def __repr__(self): return f"<RegexpStemmer: {self._regexp.pattern!r}>"
natural language toolkit snowball stemmer c 20012023 nltk project peter michael stahl pemistahlgmail com peter ljunglof peter ljunglofheatherleaf se revisions lakhdar benzahia lakhdar benzahiagmail com cowriter assem chelli assem chgmail com reviewer arabicstemmer abdelkrim aries abariesesi dz reviewer arabicstemmer algorithms dr martin porter martintartarus org assem chelli assem chgmail com arabic stemming algorithm benzahia lakhdar lakhdar benzahiagmail com url https www nltk org for license information see license txt snowball stemmers this module provides a port of the snowball stemmers developed by martin porter there is also a demo function snowball demo snowball stemmer the following languages are supported arabic danish dutch english finnish french german hungarian italian norwegian portuguese romanian russian spanish and swedish the algorithm for english is documented here porter m an algorithm for suffix stripping program 14 3 1980 130137 the algorithms have been developed by martin porter these stemmers are called snowball because porter created a programming language with this name for creating new stemming algorithms there is more information available at http snowball tartarus org the stemmer is invoked as shown below from nltk stem import snowballstemmer see which languages are supported print joinsnowballstemmer languages doctest normalizewhitespace arabic danish dutch english finnish french german hungarian italian norwegian porter portuguese romanian russian spanish swedish stemmer snowballstemmergerman choose a language stemmer stemautobahnen stem a word autobahn invoking the stemmers that way is useful if you do not know the language to be stemmed at runtime alternatively if you already know the language then you can invoke the language specific stemmer directly from nltk stem snowball import germanstemmer stemmer germanstemmer stemmer stemautobahnen autobahn param language the language whose subclass is instantiated type language str or unicode param ignorestopwords if set to true stopwords are not stemmed and returned unchanged set to false by default type ignorestopwords bool raise valueerror if there is no stemmer for the specified language a valueerror is raised this helper subclass offers the possibility to invoke a specific stemmer directly this is useful if you already know the language to be stemmed at runtime create an instance of the snowball stemmer param ignorestopwords if set to true stopwords are not stemmed and returned unchanged set to false by default type ignorestopwords bool the language is the name of the class minus the final stemmer print out the string representation of the respective class a word stemmer based on the original porter stemming algorithm porter m an algorithm for suffix stripping program 14 3 1980 130137 a few minor modifications have been made to porter s basic algorithm see the source code of the module nltk stem porter for more information this subclass encapsulates a method for defining the string region r1 it is used by the danish norwegian and swedish stemmer return the region r1 that is used by the scandinavian stemmers r1 is the region after the first nonvowel following a vowel or is the null region at the end of the word if there is no such nonvowel but then r1 is adjusted so that the region before it contains at least three letters param word the word whose region r1 is determined type word str or unicode param vowels the vowels of the respective language that are used to determine the region r1 type vowels unicode return the region r1 for the respective word rtype unicode note this helper method is invoked by the respective stem method of the subclasses danishstemmer norwegianstemmer and swedishstemmer it is not to be invoked directly this subclass encapsulates two methods for defining the standard versions of the string regions r1 r2 and rv return the standard interpretations of the string regions r1 and r2 r1 is the region after the first nonvowel following a vowel or is the null region at the end of the word if there is no such nonvowel r2 is the region after the first nonvowel following a vowel in r1 or is the null region at the end of the word if there is no such nonvowel param word the word whose regions r1 and r2 are determined type word str or unicode param vowels the vowels of the respective language that are used to determine the regions r1 and r2 type vowels unicode return r1 r2 the regions r1 and r2 for the respective word rtype tuple note this helper method is invoked by the respective stem method of the subclasses dutchstemmer finnishstemmer frenchstemmer germanstemmer italianstemmer portuguesestemmer romanianstemmer and spanishstemmer it is not to be invoked directly note a detailed description of how to define r1 and r2 can be found at http snowball tartarus orgtextsr1r2 html return the standard interpretation of the string region rv if the second letter is a consonant rv is the region after the next following vowel if the first two letters are vowels rv is the region after the next following consonant otherwise rv is the region after the third letter param word the word whose region rv is determined type word str or unicode param vowels the vowels of the respective language that are used to determine the region rv type vowels unicode return the region rv for the respective word rtype unicode note this helper method is invoked by the respective stem method of the subclasses italianstemmer portuguesestemmer romanianstemmer and spanishstemmer it is not to be invoked directly https github comsnowballstemsnowballblobmasteralgorithmsarabicstemunicode sbl original algorithm the snowball arabic light stemmer algorithm assem chelli abdelkrim aries lakhdar benzahia nltk version lakhdar benzahia normalizepre stes normalizepost normalize other hamza s checks suffixes prefixes suffixes added due to conjugation verbs suffixes added due to derivation names prefixes added due to derivation names prepositions letters param token string return normalized token type string strip diacritics strip kasheeda strip punctuation marks normalize last hamza normalize other hamzat stem an arabic word and return the stemmed form param word string return string set initial values guess type and properties checks1 checks2 prenormalization avoid stopwords start stemming or next todo how to deal with or next instruction if self suffixnounstep1asuccess or next todo how to deal with or next prefixes post normalization stemming the danish snowball stemmer cvar vowels the danish vowels type vowels unicode cvar consonants the danish consonants type consonants unicode cvar doubleconsonants the danish double consonants type doubleconsonants tuple cvar sending letters that may directly appear before a word final s type sending unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the danish stemming algorithm can be found under http snowball tartarus orgalgorithmsdanishstemmer html the language s vowels and other important characters are defined the different suffixes divided into the algorithm s steps and organized by length are listed in tuples stem a danish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every word is put into lower case for normalization after this the required regions are generated by the respective helper method then the actual stemming process starts every new step is explicitly indicated according to the descriptions on the snowball website step 1 step 2 step 3 step 4 undouble the dutch snowball stemmer cvar vowels the dutch vowels type vowels unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step3bsuffixes suffixes to be deleted in step 3b of the algorithm type step3bsuffixes tuple note a detailed description of the dutch stemming algorithm can be found under http snowball tartarus orgalgorithmsdutchstemmer html stem a dutch word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode vowel accents are removed an initial y a y after a vowel and an i between self vowels is put into upper case as from now these are treated as consonants r1 is adjusted so that the region before it contains at least 3 letters step 1 step 2 step 3a step 3b derivational suffixes step 4 undouble vowel all occurrences of i and y are put back into lower case the english snowball stemmer cvar vowels the english vowels type vowels unicode cvar doubleconsonants the english double consonants type doubleconsonants tuple cvar liending letters that may directly appear before a word final li type liending unicode cvar step0suffixes suffixes to be deleted in step 0 of the algorithm type step0suffixes tuple cvar step1asuffixes suffixes to be deleted in step 1a of the algorithm type step1asuffixes tuple cvar step1bsuffixes suffixes to be deleted in step 1b of the algorithm type step1bsuffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple cvar step4suffixes suffixes to be deleted in step 4 of the algorithm type step4suffixes tuple cvar step5suffixes suffixes to be deleted in step 5 of the algorithm type step5suffixes tuple cvar specialwords a dictionary containing words which have to be stemmed specially type specialwords dict note a detailed description of the english stemming algorithm can be found under http snowball tartarus orgalgorithmsenglishstemmer html stem an english word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode map the different apostrophe characters to a single consistent one step 0 step 1a step 1b step 1c step 2 step 3 step 4 step 5 the finnish snowball stemmer cvar vowels the finnish vowels type vowels unicode cvar restrictedvowels a subset of the finnish vowels type restrictedvowels unicode cvar longvowels the finnish vowels in their long forms type longvowels tuple cvar consonants the finnish consonants type consonants unicode cvar doubleconsonants the finnish double consonants type doubleconsonants tuple cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple cvar step4suffixes suffixes to be deleted in step 4 of the algorithm type step4suffixes tuple note a detailed description of the finnish stemming algorithm can be found under http snowball tartarus orgalgorithmsfinnishstemmer html stem a finnish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 particles etc step 2 possessives step 3 cases step 4 other endings step 5 plurals step 6 tidying up if the word ends with a double consonant followed by zero or more vowels the last consonant is removed the french snowball stemmer cvar vowels the french vowels type vowels unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2asuffixes suffixes to be deleted in step 2a of the algorithm type step2asuffixes tuple cvar step2bsuffixes suffixes to be deleted in step 2b of the algorithm type step2bsuffixes tuple cvar step4suffixes suffixes to be deleted in step 4 of the algorithm type step4suffixes tuple note a detailed description of the french stemming algorithm can be found under http snowball tartarus orgalgorithmsfrenchstemmer html stem a french word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every occurrence of u after q is put into upper case every occurrence of u and i between vowels is put into upper case every occurrence of y preceded or followed by a vowel is also put into upper case step 1 standard suffix removal step 2a verb suffixes beginning i step 2b other verb suffixes step 3 step 4 residual suffixes step 5 undouble step 6 unaccent return the region rv that is used by the french stemmer if the word begins with two vowels rv is the region after the third letter otherwise it is the region after the first vowel not at the beginning of the word or the end of the word if these positions cannot be found exceptionally u par u col or u tap at the beginning of a word is also taken to define rv as the region to their right param word the french word whose region rv is determined type word str or unicode param vowels the french vowels that are used to determine the region rv type vowels unicode return the region rv for the respective french word rtype unicode note this helper method is invoked by the stem method of the subclass frenchstemmer it is not to be invoked directly the german snowball stemmer cvar vowels the german vowels type vowels unicode cvar sending letters that may directly appear before a word final s type sending unicode cvar stending letter that may directly appear before a word final st type stending unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the german stemming algorithm can be found under http snowball tartarus orgalgorithmsgermanstemmer html stem a german word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every occurrence of u and y between vowels is put into upper case r1 is adjusted so that the region before it contains at least 3 letters step 1 step 2 step 3 derivational suffixes umlaut accents are removed and u and y are put back into lower case the hungarian snowball stemmer cvar vowels the hungarian vowels type vowels unicode cvar digraphs the hungarian digraphs type digraphs tuple cvar doubleconsonants the hungarian double consonants type doubleconsonants tuple cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple cvar step4suffixes suffixes to be deleted in step 4 of the algorithm type step4suffixes tuple cvar step5suffixes suffixes to be deleted in step 5 of the algorithm type step5suffixes tuple cvar step6suffixes suffixes to be deleted in step 6 of the algorithm type step6suffixes tuple cvar step7suffixes suffixes to be deleted in step 7 of the algorithm type step7suffixes tuple cvar step8suffixes suffixes to be deleted in step 8 of the algorithm type step8suffixes tuple cvar step9suffixes suffixes to be deleted in step 9 of the algorithm type step9suffixes tuple note a detailed description of the hungarian stemming algorithm can be found under http snowball tartarus orgalgorithmshungarianstemmer html stem an hungarian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 remove instrumental case step 2 remove frequent cases step 3 remove special cases step 4 remove other cases step 5 remove factive case step 6 remove owned step 7 remove singular owner suffixes step 8 remove plural owner suffixes step 9 remove plural suffixes return the region r1 that is used by the hungarian stemmer if the word begins with a vowel r1 is defined as the region after the first consonant or digraph two letters stand for one phoneme in the word if the word begins with a consonant it is defined as the region after the first vowel in the word if the word does not contain both a vowel and consonant r1 is the null region at the end of the word param word the hungarian word whose region r1 is determined type word str or unicode param vowels the hungarian vowels that are used to determine the region r1 type vowels unicode param digraphs the digraphs that are used to determine the region r1 type digraphs tuple return the region r1 for the respective word rtype unicode note this helper method is invoked by the stem method of the subclass hungarianstemmer it is not to be invoked directly the italian snowball stemmer cvar vowels the italian vowels type vowels unicode cvar step0suffixes suffixes to be deleted in step 0 of the algorithm type step0suffixes tuple cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple note a detailed description of the italian stemming algorithm can be found under http snowball tartarus orgalgorithmsitalianstemmer html stem an italian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode all acute accents are replaced by grave accents every occurrence of u after q is put into upper case every occurrence of u and i between vowels is put into upper case step 0 attached pronoun step 1 standard suffix removal step 2 verb suffixes step 3a step 3b the norwegian snowball stemmer cvar vowels the norwegian vowels type vowels unicode cvar sending letters that may directly appear before a word final s type sending unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the norwegian stemming algorithm can be found under http snowball tartarus orgalgorithmsnorwegianstemmer html stem a norwegian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 the portuguese snowball stemmer cvar vowels the portuguese vowels type vowels unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step4suffixes suffixes to be deleted in step 4 of the algorithm type step4suffixes tuple note a detailed description of the portuguese stemming algorithm can be found under http snowball tartarus orgalgorithmsportuguesestemmer html stem a portuguese word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 standard suffix removal step 2 verb suffixes step 3 step 4 residual suffix step 5 the romanian snowball stemmer cvar vowels the romanian vowels type vowels unicode cvar step0suffixes suffixes to be deleted in step 0 of the algorithm type step0suffixes tuple cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the romanian stemming algorithm can be found under http snowball tartarus orgalgorithmsromanianstemmer html stem a romanian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 0 removal of plurals and other simplifications step 1 reduction of combining suffixes step 2 removal of standard suffixes step 3 removal of verb suffixes step 4 removal of final vowel the russian snowball stemmer cvar perfectivegerundsuffixes suffixes to be deleted type perfectivegerundsuffixes tuple cvar adjectivalsuffixes suffixes to be deleted type adjectivalsuffixes tuple cvar reflexivesuffixes suffixes to be deleted type reflexivesuffixes tuple cvar verbsuffixes suffixes to be deleted type verbsuffixes tuple cvar nounsuffixes suffixes to be deleted type nounsuffixes tuple cvar superlativesuffixes suffixes to be deleted type superlativesuffixes tuple cvar derivationalsuffixes suffixes to be deleted type derivationalsuffixes tuple note a detailed description of the russian stemming algorithm can be found under http snowball tartarus orgalgorithmsrussianstemmer html stem a russian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 step 4 return the regions rv and r2 which are used by the russian stemmer in any word rv is the region after the first vowel or the end of the word if it contains no vowel r2 is the region after the first nonvowel following a vowel in r1 or the end of the word if there is no such nonvowel r1 is the region after the first nonvowel following a vowel or the end of the word if there is no such nonvowel param word the russian word whose regions rv and r2 are determined type word str or unicode return the regions rv and r2 for the respective russian word rtype tuple note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly transliterate a russian word into the roman alphabet a russian word whose letters consist of the cyrillic alphabet are transliterated into the roman alphabet in order to ease the forthcoming stemming process param word the word that is transliterated type word unicode return the transliterated word rtype unicode note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly transliterate a russian word back into the cyrillic alphabet a russian word formerly transliterated into the roman alphabet in order to ease the stemming process is transliterated back into the cyrillic alphabet its original form param word the word that is transliterated type word str or unicode return word the transliterated word rtype unicode note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly the spanish snowball stemmer cvar vowels the spanish vowels type vowels unicode cvar step0suffixes suffixes to be deleted in step 0 of the algorithm type step0suffixes tuple cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2asuffixes suffixes to be deleted in step 2a of the algorithm type step2asuffixes tuple cvar step2bsuffixes suffixes to be deleted in step 2b of the algorithm type step2bsuffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the spanish stemming algorithm can be found under http snowball tartarus orgalgorithmsspanishstemmer html stem a spanish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 0 attached pronoun step 1 standard suffix removal step 2a verb suffixes beginning y step 2b other verb suffixes step 3 residual suffix replaces all accented letters on a word with their nonaccented counterparts param word a spanish word with or without accents type word str or unicode return a word with the accented letters replaced with their nonaccented counterparts a e i o u rtype str or unicode the swedish snowball stemmer cvar vowels the swedish vowels type vowels unicode cvar sending letters that may directly appear before a word final s type sending unicode cvar step1suffixes suffixes to be deleted in step 1 of the algorithm type step1suffixes tuple cvar step2suffixes suffixes to be deleted in step 2 of the algorithm type step2suffixes tuple cvar step3suffixes suffixes to be deleted in step 3 of the algorithm type step3suffixes tuple note a detailed description of the swedish stemming algorithm can be found under http snowball tartarus orgalgorithmsswedishstemmer html stem a swedish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 this function provides a demonstration of the snowball stemmers after invoking this function and specifying a language it stems an excerpt of the universal declaration of human rights which is a part of the nltk corpus collection and then prints out the original and the stemmed text natural language toolkit snowball stemmer c 2001 2023 nltk project peter michael stahl pemistahl gmail com peter ljunglof peter ljunglof heatherleaf se revisions lakhdar benzahia lakhdar benzahia gmail com co writer assem chelli assem ch gmail com reviewer arabicstemmer abdelkrim aries ab_aries esi dz reviewer arabicstemmer algorithms dr martin porter martin tartarus org assem chelli assem ch gmail com arabic stemming algorithm benzahia lakhdar lakhdar benzahia gmail com url https www nltk org for license information see license txt snowball stemmers this module provides a port of the snowball stemmers developed by martin porter there is also a demo function snowball demo snowball stemmer the following languages are supported arabic danish dutch english finnish french german hungarian italian norwegian portuguese romanian russian spanish and swedish the algorithm for english is documented here porter m an algorithm for suffix stripping program 14 3 1980 130 137 the algorithms have been developed by martin porter these stemmers are called snowball because porter created a programming language with this name for creating new stemming algorithms there is more information available at http snowball tartarus org the stemmer is invoked as shown below from nltk stem import snowballstemmer see which languages are supported print join snowballstemmer languages doctest normalize_whitespace arabic danish dutch english finnish french german hungarian italian norwegian porter portuguese romanian russian spanish swedish stemmer snowballstemmer german choose a language stemmer stem autobahnen stem a word autobahn invoking the stemmers that way is useful if you do not know the language to be stemmed at runtime alternatively if you already know the language then you can invoke the language specific stemmer directly from nltk stem snowball import germanstemmer stemmer germanstemmer stemmer stem autobahnen autobahn param language the language whose subclass is instantiated type language str or unicode param ignore_stopwords if set to true stopwords are not stemmed and returned unchanged set to false by default type ignore_stopwords bool raise valueerror if there is no stemmer for the specified language a valueerror is raised this helper subclass offers the possibility to invoke a specific stemmer directly this is useful if you already know the language to be stemmed at runtime create an instance of the snowball stemmer param ignore_stopwords if set to true stopwords are not stemmed and returned unchanged set to false by default type ignore_stopwords bool the language is the name of the class minus the final stemmer print out the string representation of the respective class a word stemmer based on the original porter stemming algorithm porter m an algorithm for suffix stripping program 14 3 1980 130 137 a few minor modifications have been made to porter s basic algorithm see the source code of the module nltk stem porter for more information this subclass encapsulates a method for defining the string region r1 it is used by the danish norwegian and swedish stemmer return the region r1 that is used by the scandinavian stemmers r1 is the region after the first non vowel following a vowel or is the null region at the end of the word if there is no such non vowel but then r1 is adjusted so that the region before it contains at least three letters param word the word whose region r1 is determined type word str or unicode param vowels the vowels of the respective language that are used to determine the region r1 type vowels unicode return the region r1 for the respective word rtype unicode note this helper method is invoked by the respective stem method of the subclasses danishstemmer norwegianstemmer and swedishstemmer it is not to be invoked directly this subclass encapsulates two methods for defining the standard versions of the string regions r1 r2 and rv return the standard interpretations of the string regions r1 and r2 r1 is the region after the first non vowel following a vowel or is the null region at the end of the word if there is no such non vowel r2 is the region after the first non vowel following a vowel in r1 or is the null region at the end of the word if there is no such non vowel param word the word whose regions r1 and r2 are determined type word str or unicode param vowels the vowels of the respective language that are used to determine the regions r1 and r2 type vowels unicode return r1 r2 the regions r1 and r2 for the respective word rtype tuple note this helper method is invoked by the respective stem method of the subclasses dutchstemmer finnishstemmer frenchstemmer germanstemmer italianstemmer portuguesestemmer romanianstemmer and spanishstemmer it is not to be invoked directly note a detailed description of how to define r1 and r2 can be found at http snowball tartarus org texts r1r2 html return the standard interpretation of the string region rv if the second letter is a consonant rv is the region after the next following vowel if the first two letters are vowels rv is the region after the next following consonant otherwise rv is the region after the third letter param word the word whose region rv is determined type word str or unicode param vowels the vowels of the respective language that are used to determine the region rv type vowels unicode return the region rv for the respective word rtype unicode note this helper method is invoked by the respective stem method of the subclasses italianstemmer portuguesestemmer romanianstemmer and spanishstemmer it is not to be invoked directly https github com snowballstem snowball blob master algorithms arabic stem_unicode sbl original algorithm the snowball arabic light stemmer algorithm assem chelli abdelkrim aries lakhdar benzahia nltk version lakhdar benzahia normalize_pre stes ـ tatweel kasheeda normalize_post أ إ آ ؤ ئ normalize other hamza s أ إ آ ؤ ئ أ إ آ checks بال كال لل ال ة female plural ات suffixes ي ك ه نا كم ها هن هم كما هما ن ا ي و ات ت ة ي ه ك ني نا ها هم هن كم كن هما كما كمو ت ا ن ي نا تا تن past ان هن ين present تما وا تم و تمو ى prefixes أ أأ أآ أؤ أا أإ فال وال ف و لل ال بال كال ب ك ل بب كك سي ست سن سأ يست نست تست suffixes added due to conjugation verbs ه ك ني نا ها هم هن كم كن هما كما كمو ا ن ي نا تا تن ان ون ين suffixes added due to derivation names ي ك ه نا كم ها هن هم كما هما prefixes added due to derivation names فا وا بال كال ال لل prepositions letters ك ل بب كك param token string return normalized token type string strip diacritics strip kasheeda strip punctuation marks normalize last hamza normalize other hamzat past present ya nisbiya bug cause confusion stem an arabic word and return the stemmed form param word string return string set initial values guess type and properties checks1 checks2 pre_normalization avoid stopwords start stemming or next todo how to deal with or next instruction if self suffix_noun_step1a_success or next todo how to deal with or next prefixes post normalization stemming the danish snowball stemmer cvar __vowels the danish vowels type __vowels unicode cvar __consonants the danish consonants type __consonants unicode cvar __double_consonants the danish double consonants type __double_consonants tuple cvar __s_ending letters that may directly appear before a word final s type __s_ending unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the danish stemming algorithm can be found under http snowball tartarus org algorithms danish stemmer html the language s vowels and other important characters are defined the different suffixes divided into the algorithm s steps and organized by length are listed in tuples stem a danish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every word is put into lower case for normalization after this the required regions are generated by the respective helper method then the actual stemming process starts every new step is explicitly indicated according to the descriptions on the snowball website step 1 step 2 step 3 step 4 undouble the dutch snowball stemmer cvar __vowels the dutch vowels type __vowels unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step3b_suffixes suffixes to be deleted in step 3b of the algorithm type __step3b_suffixes tuple note a detailed description of the dutch stemming algorithm can be found under http snowball tartarus org algorithms dutch stemmer html stem a dutch word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode vowel accents are removed an initial y a y after a vowel and an i between self __vowels is put into upper case as from now these are treated as consonants r1 is adjusted so that the region before it contains at least 3 letters step 1 step 2 step 3a step 3b derivational suffixes step 4 undouble vowel all occurrences of i and y are put back into lower case the english snowball stemmer cvar __vowels the english vowels type __vowels unicode cvar __double_consonants the english double consonants type __double_consonants tuple cvar __li_ending letters that may directly appear before a word final li type __li_ending unicode cvar __step0_suffixes suffixes to be deleted in step 0 of the algorithm type __step0_suffixes tuple cvar __step1a_suffixes suffixes to be deleted in step 1a of the algorithm type __step1a_suffixes tuple cvar __step1b_suffixes suffixes to be deleted in step 1b of the algorithm type __step1b_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple cvar __step4_suffixes suffixes to be deleted in step 4 of the algorithm type __step4_suffixes tuple cvar __step5_suffixes suffixes to be deleted in step 5 of the algorithm type __step5_suffixes tuple cvar __special_words a dictionary containing words which have to be stemmed specially type __special_words dict note a detailed description of the english stemming algorithm can be found under http snowball tartarus org algorithms english stemmer html stem an english word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode map the different apostrophe characters to a single consistent one step 0 step 1a step 1b step 1c step 2 step 3 step 4 step 5 the finnish snowball stemmer cvar __vowels the finnish vowels type __vowels unicode cvar __restricted_vowels a subset of the finnish vowels type __restricted_vowels unicode cvar __long_vowels the finnish vowels in their long forms type __long_vowels tuple cvar __consonants the finnish consonants type __consonants unicode cvar __double_consonants the finnish double consonants type __double_consonants tuple cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple cvar __step4_suffixes suffixes to be deleted in step 4 of the algorithm type __step4_suffixes tuple note a detailed description of the finnish stemming algorithm can be found under http snowball tartarus org algorithms finnish stemmer html stem a finnish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 particles etc step 2 possessives step 3 cases step 4 other endings step 5 plurals step 6 tidying up if the word ends with a double consonant followed by zero or more vowels the last consonant is removed the french snowball stemmer cvar __vowels the french vowels type __vowels unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2a_suffixes suffixes to be deleted in step 2a of the algorithm type __step2a_suffixes tuple cvar __step2b_suffixes suffixes to be deleted in step 2b of the algorithm type __step2b_suffixes tuple cvar __step4_suffixes suffixes to be deleted in step 4 of the algorithm type __step4_suffixes tuple note a detailed description of the french stemming algorithm can be found under http snowball tartarus org algorithms french stemmer html stem a french word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every occurrence of u after q is put into upper case every occurrence of u and i between vowels is put into upper case every occurrence of y preceded or followed by a vowel is also put into upper case step 1 standard suffix removal step 2a verb suffixes beginning i step 2b other verb suffixes step 3 step 4 residual suffixes step 5 undouble step 6 un accent return the region rv that is used by the french stemmer if the word begins with two vowels rv is the region after the third letter otherwise it is the region after the first vowel not at the beginning of the word or the end of the word if these positions cannot be found exceptionally u par u col or u tap at the beginning of a word is also taken to define rv as the region to their right param word the french word whose region rv is determined type word str or unicode param vowels the french vowels that are used to determine the region rv type vowels unicode return the region rv for the respective french word rtype unicode note this helper method is invoked by the stem method of the subclass frenchstemmer it is not to be invoked directly the german snowball stemmer cvar __vowels the german vowels type __vowels unicode cvar __s_ending letters that may directly appear before a word final s type __s_ending unicode cvar __st_ending letter that may directly appear before a word final st type __st_ending unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the german stemming algorithm can be found under http snowball tartarus org algorithms german stemmer html stem a german word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode every occurrence of u and y between vowels is put into upper case r1 is adjusted so that the region before it contains at least 3 letters step 1 step 2 step 3 derivational suffixes umlaut accents are removed and u and y are put back into lower case the hungarian snowball stemmer cvar __vowels the hungarian vowels type __vowels unicode cvar __digraphs the hungarian digraphs type __digraphs tuple cvar __double_consonants the hungarian double consonants type __double_consonants tuple cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple cvar __step4_suffixes suffixes to be deleted in step 4 of the algorithm type __step4_suffixes tuple cvar __step5_suffixes suffixes to be deleted in step 5 of the algorithm type __step5_suffixes tuple cvar __step6_suffixes suffixes to be deleted in step 6 of the algorithm type __step6_suffixes tuple cvar __step7_suffixes suffixes to be deleted in step 7 of the algorithm type __step7_suffixes tuple cvar __step8_suffixes suffixes to be deleted in step 8 of the algorithm type __step8_suffixes tuple cvar __step9_suffixes suffixes to be deleted in step 9 of the algorithm type __step9_suffixes tuple note a detailed description of the hungarian stemming algorithm can be found under http snowball tartarus org algorithms hungarian stemmer html stem an hungarian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 remove instrumental case step 2 remove frequent cases step 3 remove special cases step 4 remove other cases step 5 remove factive case step 6 remove owned step 7 remove singular owner suffixes step 8 remove plural owner suffixes step 9 remove plural suffixes return the region r1 that is used by the hungarian stemmer if the word begins with a vowel r1 is defined as the region after the first consonant or digraph two letters stand for one phoneme in the word if the word begins with a consonant it is defined as the region after the first vowel in the word if the word does not contain both a vowel and consonant r1 is the null region at the end of the word param word the hungarian word whose region r1 is determined type word str or unicode param vowels the hungarian vowels that are used to determine the region r1 type vowels unicode param digraphs the digraphs that are used to determine the region r1 type digraphs tuple return the region r1 for the respective word rtype unicode note this helper method is invoked by the stem method of the subclass hungarianstemmer it is not to be invoked directly the italian snowball stemmer cvar __vowels the italian vowels type __vowels unicode cvar __step0_suffixes suffixes to be deleted in step 0 of the algorithm type __step0_suffixes tuple cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple note a detailed description of the italian stemming algorithm can be found under http snowball tartarus org algorithms italian stemmer html stem an italian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode all acute accents are replaced by grave accents every occurrence of u after q is put into upper case every occurrence of u and i between vowels is put into upper case step 0 attached pronoun step 1 standard suffix removal step 2 verb suffixes step 3a step 3b the norwegian snowball stemmer cvar __vowels the norwegian vowels type __vowels unicode cvar __s_ending letters that may directly appear before a word final s type __s_ending unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the norwegian stemming algorithm can be found under http snowball tartarus org algorithms norwegian stemmer html stem a norwegian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 the portuguese snowball stemmer cvar __vowels the portuguese vowels type __vowels unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step4_suffixes suffixes to be deleted in step 4 of the algorithm type __step4_suffixes tuple note a detailed description of the portuguese stemming algorithm can be found under http snowball tartarus org algorithms portuguese stemmer html stem a portuguese word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 standard suffix removal step 2 verb suffixes step 3 step 4 residual suffix step 5 the romanian snowball stemmer cvar __vowels the romanian vowels type __vowels unicode cvar __step0_suffixes suffixes to be deleted in step 0 of the algorithm type __step0_suffixes tuple cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the romanian stemming algorithm can be found under http snowball tartarus org algorithms romanian stemmer html stem a romanian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 0 removal of plurals and other simplifications step 1 reduction of combining suffixes step 2 removal of standard suffixes step 3 removal of verb suffixes step 4 removal of final vowel the russian snowball stemmer cvar __perfective_gerund_suffixes suffixes to be deleted type __perfective_gerund_suffixes tuple cvar __adjectival_suffixes suffixes to be deleted type __adjectival_suffixes tuple cvar __reflexive_suffixes suffixes to be deleted type __reflexive_suffixes tuple cvar __verb_suffixes suffixes to be deleted type __verb_suffixes tuple cvar __noun_suffixes suffixes to be deleted type __noun_suffixes tuple cvar __superlative_suffixes suffixes to be deleted type __superlative_suffixes tuple cvar __derivational_suffixes suffixes to be deleted type __derivational_suffixes tuple note a detailed description of the russian stemming algorithm can be found under http snowball tartarus org algorithms russian stemmer html stem a russian word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 step 4 return the regions rv and r2 which are used by the russian stemmer in any word rv is the region after the first vowel or the end of the word if it contains no vowel r2 is the region after the first non vowel following a vowel in r1 or the end of the word if there is no such non vowel r1 is the region after the first non vowel following a vowel or the end of the word if there is no such non vowel param word the russian word whose regions rv and r2 are determined type word str or unicode return the regions rv and r2 for the respective russian word rtype tuple note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly transliterate a russian word into the roman alphabet a russian word whose letters consist of the cyrillic alphabet are transliterated into the roman alphabet in order to ease the forthcoming stemming process param word the word that is transliterated type word unicode return the transliterated word rtype unicode note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly transliterate a russian word back into the cyrillic alphabet a russian word formerly transliterated into the roman alphabet in order to ease the stemming process is transliterated back into the cyrillic alphabet its original form param word the word that is transliterated type word str or unicode return word the transliterated word rtype unicode note this helper method is invoked by the stem method of the subclass russianstemmer it is not to be invoked directly the spanish snowball stemmer cvar __vowels the spanish vowels type __vowels unicode cvar __step0_suffixes suffixes to be deleted in step 0 of the algorithm type __step0_suffixes tuple cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2a_suffixes suffixes to be deleted in step 2a of the algorithm type __step2a_suffixes tuple cvar __step2b_suffixes suffixes to be deleted in step 2b of the algorithm type __step2b_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the spanish stemming algorithm can be found under http snowball tartarus org algorithms spanish stemmer html stem a spanish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 0 attached pronoun step 1 standard suffix removal step 2a verb suffixes beginning y step 2b other verb suffixes step 3 residual suffix replaces all accented letters on a word with their non accented counterparts param word a spanish word with or without accents type word str or unicode return a word with the accented letters á é í ó ú replaced with their non accented counterparts a e i o u rtype str or unicode the swedish snowball stemmer cvar __vowels the swedish vowels type __vowels unicode cvar __s_ending letters that may directly appear before a word final s type __s_ending unicode cvar __step1_suffixes suffixes to be deleted in step 1 of the algorithm type __step1_suffixes tuple cvar __step2_suffixes suffixes to be deleted in step 2 of the algorithm type __step2_suffixes tuple cvar __step3_suffixes suffixes to be deleted in step 3 of the algorithm type __step3_suffixes tuple note a detailed description of the swedish stemming algorithm can be found under http snowball tartarus org algorithms swedish stemmer html stem a swedish word and return the stemmed form param word the word that is stemmed type word str or unicode return the stemmed form rtype unicode step 1 step 2 step 3 this function provides a demonstration of the snowball stemmers after invoking this function and specifying a language it stems an excerpt of the universal declaration of human rights which is a part of the nltk corpus collection and then prints out the original and the stemmed text
import re from nltk.corpus import stopwords from nltk.stem import porter from nltk.stem.api import StemmerI from nltk.stem.util import prefix_replace, suffix_replace class SnowballStemmer(StemmerI): languages = ( "arabic", "danish", "dutch", "english", "finnish", "french", "german", "hungarian", "italian", "norwegian", "porter", "portuguese", "romanian", "russian", "spanish", "swedish", ) def __init__(self, language, ignore_stopwords=False): if language not in self.languages: raise ValueError(f"The language '{language}' is not supported.") stemmerclass = globals()[language.capitalize() + "Stemmer"] self.stemmer = stemmerclass(ignore_stopwords) self.stem = self.stemmer.stem self.stopwords = self.stemmer.stopwords def stem(self, token): return self.stemmer.stem(self, token) class _LanguageSpecificStemmer(StemmerI): def __init__(self, ignore_stopwords=False): language = type(self).__name__.lower() if language.endswith("stemmer"): language = language[:-7] self.stopwords = set() if ignore_stopwords: try: for word in stopwords.words(language): self.stopwords.add(word) except OSError as e: raise ValueError( "{!r} has no list of stopwords. Please set" " 'ignore_stopwords' to 'False'.".format(self) ) from e def __repr__(self): return f"<{type(self).__name__}>" class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer): def __init__(self, ignore_stopwords=False): _LanguageSpecificStemmer.__init__(self, ignore_stopwords) porter.PorterStemmer.__init__(self) class _ScandinavianStemmer(_LanguageSpecificStemmer): def _r1_scandinavian(self, word, vowels): r1 = "" for i in range(1, len(word)): if word[i] not in vowels and word[i - 1] in vowels: if 3 > len(word[: i + 1]) > 0: r1 = word[3:] elif len(word[: i + 1]) >= 3: r1 = word[i + 1 :] else: return word break return r1 class _StandardStemmer(_LanguageSpecificStemmer): def _r1r2_standard(self, word, vowels): r1 = "" r2 = "" for i in range(1, len(word)): if word[i] not in vowels and word[i - 1] in vowels: r1 = word[i + 1 :] break for i in range(1, len(r1)): if r1[i] not in vowels and r1[i - 1] in vowels: r2 = r1[i + 1 :] break return (r1, r2) def _rv_standard(self, word, vowels): rv = "" if len(word) >= 2: if word[1] not in vowels: for i in range(2, len(word)): if word[i] in vowels: rv = word[i + 1 :] break elif word[0] in vowels and word[1] in vowels: for i in range(2, len(word)): if word[i] not in vowels: rv = word[i + 1 :] break else: rv = word[3:] return rv class ArabicStemmer(_StandardStemmer): __vocalization = re.compile( r"[\u064b-\u064c-\u064d-\u064e-\u064f-\u0650-\u0651-\u0652]" ) __kasheeda = re.compile(r"[\u0640]") __arabic_punctuation_marks = re.compile(r"[\u060C-\u061B-\u061F]") __last_hamzat = ("\u0623", "\u0625", "\u0622", "\u0624", "\u0626") __initial_hamzat = re.compile(r"^[\u0622\u0623\u0625]") __waw_hamza = re.compile(r"[\u0624]") __yeh_hamza = re.compile(r"[\u0626]") __alefat = re.compile(r"[\u0623\u0622\u0625]") __checks1 = ( "\u0643\u0627\u0644", "\u0628\u0627\u0644", "\u0627\u0644", "\u0644\u0644", ) __checks2 = ("\u0629", "\u0627\u062a") __suffix_noun_step1a = ( "\u064a", "\u0643", "\u0647", "\u0646\u0627", "\u0643\u0645", "\u0647\u0627", "\u0647\u0646", "\u0647\u0645", "\u0643\u0645\u0627", "\u0647\u0645\u0627", ) __suffix_noun_step1b = "\u0646" __suffix_noun_step2a = ("\u0627", "\u064a", "\u0648") __suffix_noun_step2b = "\u0627\u062a" __suffix_noun_step2c1 = "\u062a" __suffix_noun_step2c2 = "\u0629" __suffix_noun_step3 = "\u064a" __suffix_verb_step1 = ( "\u0647", "\u0643", "\u0646\u064a", "\u0646\u0627", "\u0647\u0627", "\u0647\u0645", "\u0647\u0646", "\u0643\u0645", "\u0643\u0646", "\u0647\u0645\u0627", "\u0643\u0645\u0627", "\u0643\u0645\u0648", ) __suffix_verb_step2a = ( "\u062a", "\u0627", "\u0646", "\u064a", "\u0646\u0627", "\u062a\u0627", "\u062a\u0646", "\u0627\u0646", "\u0648\u0646", "\u064a\u0646", "\u062a\u0645\u0627", ) __suffix_verb_step2b = ("\u0648\u0627", "\u062a\u0645") __suffix_verb_step2c = ("\u0648", "\u062a\u0645\u0648") __suffix_all_alef_maqsura = "\u0649" __prefix_step1 = ( "\u0623", "\u0623\u0623", "\u0623\u0622", "\u0623\u0624", "\u0623\u0627", "\u0623\u0625", ) __prefix_step2a = ("\u0641\u0627\u0644", "\u0648\u0627\u0644") __prefix_step2b = ("\u0641", "\u0648") __prefix_step3a_noun = ( "\u0627\u0644", "\u0644\u0644", "\u0643\u0627\u0644", "\u0628\u0627\u0644", ) __prefix_step3b_noun = ( "\u0628", "\u0643", "\u0644", "\u0628\u0628", "\u0643\u0643", ) __prefix_step3_verb = ( "\u0633\u064a", "\u0633\u062a", "\u0633\u0646", "\u0633\u0623", ) __prefix_step4_verb = ( "\u064a\u0633\u062a", "\u0646\u0633\u062a", "\u062a\u0633\u062a", ) __conjugation_suffix_verb_1 = ("\u0647", "\u0643") __conjugation_suffix_verb_2 = ( "\u0646\u064a", "\u0646\u0627", "\u0647\u0627", "\u0647\u0645", "\u0647\u0646", "\u0643\u0645", "\u0643\u0646", ) __conjugation_suffix_verb_3 = ( "\u0647\u0645\u0627", "\u0643\u0645\u0627", "\u0643\u0645\u0648", ) __conjugation_suffix_verb_4 = ("\u0627", "\u0646", "\u064a") __conjugation_suffix_verb_past = ( "\u0646\u0627", "\u062a\u0627", "\u062a\u0646", ) __conjugation_suffix_verb_present = ( "\u0627\u0646", "\u0648\u0646", "\u064a\u0646", ) __conjugation_suffix_noun_1 = ("\u064a", "\u0643", "\u0647") __conjugation_suffix_noun_2 = ( "\u0646\u0627", "\u0643\u0645", "\u0647\u0627", "\u0647\u0646", "\u0647\u0645", ) __conjugation_suffix_noun_3 = ( "\u0643\u0645\u0627", "\u0647\u0645\u0627", ) __prefixes1 = ("\u0648\u0627", "\u0641\u0627") __articles_3len = ("\u0643\u0627\u0644", "\u0628\u0627\u0644") __articles_2len = ("\u0627\u0644", "\u0644\u0644") __prepositions1 = ("\u0643", "\u0644") __prepositions2 = ("\u0628\u0628", "\u0643\u0643") is_verb = True is_noun = True is_defined = False suffixes_verb_step1_success = False suffix_verb_step2a_success = False suffix_verb_step2b_success = False suffix_noun_step2c2_success = False suffix_noun_step1a_success = False suffix_noun_step2a_success = False suffix_noun_step2b_success = False suffixe_noun_step1b_success = False prefix_step2a_success = False prefix_step3a_noun_success = False prefix_step3b_noun_success = False def __normalize_pre(self, token): token = self.__vocalization.sub("", token) token = self.__kasheeda.sub("", token) token = self.__arabic_punctuation_marks.sub("", token) return token def __normalize_post(self, token): for hamza in self.__last_hamzat: if token.endswith(hamza): token = suffix_replace(token, hamza, "\u0621") break token = self.__initial_hamzat.sub("\u0627", token) token = self.__waw_hamza.sub("\u0648", token) token = self.__yeh_hamza.sub("\u064a", token) token = self.__alefat.sub("\u0627", token) return token def __checks_1(self, token): for prefix in self.__checks1: if token.startswith(prefix): if prefix in self.__articles_3len and len(token) > 4: self.is_noun = True self.is_verb = False self.is_defined = True break if prefix in self.__articles_2len and len(token) > 3: self.is_noun = True self.is_verb = False self.is_defined = True break def __checks_2(self, token): for suffix in self.__checks2: if token.endswith(suffix): if suffix == "\u0629" and len(token) > 2: self.is_noun = True self.is_verb = False break if suffix == "\u0627\u062a" and len(token) > 3: self.is_noun = True self.is_verb = False break def __Suffix_Verb_Step1(self, token): for suffix in self.__suffix_verb_step1: if token.endswith(suffix): if suffix in self.__conjugation_suffix_verb_1 and len(token) >= 4: token = token[:-1] self.suffixes_verb_step1_success = True break if suffix in self.__conjugation_suffix_verb_2 and len(token) >= 5: token = token[:-2] self.suffixes_verb_step1_success = True break if suffix in self.__conjugation_suffix_verb_3 and len(token) >= 6: token = token[:-3] self.suffixes_verb_step1_success = True break return token def __Suffix_Verb_Step2a(self, token): for suffix in self.__suffix_verb_step2a: if token.endswith(suffix) and len(token) > 3: if suffix == "\u062a" and len(token) >= 4: token = token[:-1] self.suffix_verb_step2a_success = True break if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4: token = token[:-1] self.suffix_verb_step2a_success = True break if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5: token = token[:-2] self.suffix_verb_step2a_success = True break if suffix in self.__conjugation_suffix_verb_present and len(token) > 5: token = token[:-2] self.suffix_verb_step2a_success = True break if suffix == "\u062a\u0645\u0627" and len(token) >= 6: token = token[:-3] self.suffix_verb_step2a_success = True break return token def __Suffix_Verb_Step2c(self, token): for suffix in self.__suffix_verb_step2c: if token.endswith(suffix): if suffix == "\u062a\u0645\u0648" and len(token) >= 6: token = token[:-3] break if suffix == "\u0648" and len(token) >= 4: token = token[:-1] break return token def __Suffix_Verb_Step2b(self, token): for suffix in self.__suffix_verb_step2b: if token.endswith(suffix) and len(token) >= 5: token = token[:-2] self.suffix_verb_step2b_success = True break return token def __Suffix_Noun_Step2c2(self, token): for suffix in self.__suffix_noun_step2c2: if token.endswith(suffix) and len(token) >= 3: token = token[:-1] self.suffix_noun_step2c2_success = True break return token def __Suffix_Noun_Step1a(self, token): for suffix in self.__suffix_noun_step1a: if token.endswith(suffix): if suffix in self.__conjugation_suffix_noun_1 and len(token) >= 4: token = token[:-1] self.suffix_noun_step1a_success = True break if suffix in self.__conjugation_suffix_noun_2 and len(token) >= 5: token = token[:-2] self.suffix_noun_step1a_success = True break if suffix in self.__conjugation_suffix_noun_3 and len(token) >= 6: token = token[:-3] self.suffix_noun_step1a_success = True break return token def __Suffix_Noun_Step2a(self, token): for suffix in self.__suffix_noun_step2a: if token.endswith(suffix) and len(token) > 4: token = token[:-1] self.suffix_noun_step2a_success = True break return token def __Suffix_Noun_Step2b(self, token): for suffix in self.__suffix_noun_step2b: if token.endswith(suffix) and len(token) >= 5: token = token[:-2] self.suffix_noun_step2b_success = True break return token def __Suffix_Noun_Step2c1(self, token): for suffix in self.__suffix_noun_step2c1: if token.endswith(suffix) and len(token) >= 4: token = token[:-1] break return token def __Suffix_Noun_Step1b(self, token): for suffix in self.__suffix_noun_step1b: if token.endswith(suffix) and len(token) > 5: token = token[:-1] self.suffixe_noun_step1b_success = True break return token def __Suffix_Noun_Step3(self, token): for suffix in self.__suffix_noun_step3: if token.endswith(suffix) and len(token) >= 3: token = token[:-1] break return token def __Suffix_All_alef_maqsura(self, token): for suffix in self.__suffix_all_alef_maqsura: if token.endswith(suffix): token = suffix_replace(token, suffix, "\u064a") return token def __Prefix_Step1(self, token): for prefix in self.__prefix_step1: if token.startswith(prefix) and len(token) > 3: if prefix == "\u0623\u0623": token = prefix_replace(token, prefix, "\u0623") break elif prefix == "\u0623\u0622": token = prefix_replace(token, prefix, "\u0622") break elif prefix == "\u0623\u0624": token = prefix_replace(token, prefix, "\u0624") break elif prefix == "\u0623\u0627": token = prefix_replace(token, prefix, "\u0627") break elif prefix == "\u0623\u0625": token = prefix_replace(token, prefix, "\u0625") break return token def __Prefix_Step2a(self, token): for prefix in self.__prefix_step2a: if token.startswith(prefix) and len(token) > 5: token = token[len(prefix) :] self.prefix_step2a_success = True break return token def __Prefix_Step2b(self, token): for prefix in self.__prefix_step2b: if token.startswith(prefix) and len(token) > 3: if token[:2] not in self.__prefixes1: token = token[len(prefix) :] break return token def __Prefix_Step3a_Noun(self, token): for prefix in self.__prefix_step3a_noun: if token.startswith(prefix): if prefix in self.__articles_2len and len(token) > 4: token = token[len(prefix) :] self.prefix_step3a_noun_success = True break if prefix in self.__articles_3len and len(token) > 5: token = token[len(prefix) :] break return token def __Prefix_Step3b_Noun(self, token): for prefix in self.__prefix_step3b_noun: if token.startswith(prefix): if len(token) > 3: if prefix == "\u0628": token = token[len(prefix) :] self.prefix_step3b_noun_success = True break if prefix in self.__prepositions2: token = prefix_replace(token, prefix, prefix[1]) self.prefix_step3b_noun_success = True break if prefix in self.__prepositions1 and len(token) > 4: token = token[len(prefix) :] self.prefix_step3b_noun_success = True break return token def __Prefix_Step3_Verb(self, token): for prefix in self.__prefix_step3_verb: if token.startswith(prefix) and len(token) > 4: token = prefix_replace(token, prefix, prefix[1]) break return token def __Prefix_Step4_Verb(self, token): for prefix in self.__prefix_step4_verb: if token.startswith(prefix) and len(token) > 4: token = prefix_replace(token, prefix, "\u0627\u0633\u062a") self.is_verb = True self.is_noun = False break return token def stem(self, word): self.is_verb = True self.is_noun = True self.is_defined = False self.suffix_verb_step2a_success = False self.suffix_verb_step2b_success = False self.suffix_noun_step2c2_success = False self.suffix_noun_step1a_success = False self.suffix_noun_step2a_success = False self.suffix_noun_step2b_success = False self.suffixe_noun_step1b_success = False self.prefix_step2a_success = False self.prefix_step3a_noun_success = False self.prefix_step3b_noun_success = False modified_word = word self.__checks_1(modified_word) self.__checks_2(modified_word) modified_word = self.__normalize_pre(modified_word) if modified_word in self.stopwords or len(modified_word) <= 2: return modified_word if self.is_verb: modified_word = self.__Suffix_Verb_Step1(modified_word) if self.suffixes_verb_step1_success: modified_word = self.__Suffix_Verb_Step2a(modified_word) if not self.suffix_verb_step2a_success: modified_word = self.__Suffix_Verb_Step2c(modified_word) else: modified_word = self.__Suffix_Verb_Step2b(modified_word) if not self.suffix_verb_step2b_success: modified_word = self.__Suffix_Verb_Step2a(modified_word) if self.is_noun: modified_word = self.__Suffix_Noun_Step2c2(modified_word) if not self.suffix_noun_step2c2_success: if not self.is_defined: modified_word = self.__Suffix_Noun_Step1a(modified_word) modified_word = self.__Suffix_Noun_Step2a(modified_word) if not self.suffix_noun_step2a_success: modified_word = self.__Suffix_Noun_Step2b(modified_word) if ( not self.suffix_noun_step2b_success and not self.suffix_noun_step2a_success ): modified_word = self.__Suffix_Noun_Step2c1(modified_word) else: modified_word = self.__Suffix_Noun_Step1b(modified_word) if self.suffixe_noun_step1b_success: modified_word = self.__Suffix_Noun_Step2a(modified_word) if not self.suffix_noun_step2a_success: modified_word = self.__Suffix_Noun_Step2b(modified_word) if ( not self.suffix_noun_step2b_success and not self.suffix_noun_step2a_success ): modified_word = self.__Suffix_Noun_Step2c1(modified_word) else: if not self.is_defined: modified_word = self.__Suffix_Noun_Step2a(modified_word) modified_word = self.__Suffix_Noun_Step2b(modified_word) modified_word = self.__Suffix_Noun_Step3(modified_word) if not self.is_noun and self.is_verb: modified_word = self.__Suffix_All_alef_maqsura(modified_word) modified_word = self.__Prefix_Step1(modified_word) modified_word = self.__Prefix_Step2a(modified_word) if not self.prefix_step2a_success: modified_word = self.__Prefix_Step2b(modified_word) modified_word = self.__Prefix_Step3a_Noun(modified_word) if not self.prefix_step3a_noun_success and self.is_noun: modified_word = self.__Prefix_Step3b_Noun(modified_word) else: if not self.prefix_step3b_noun_success and self.is_verb: modified_word = self.__Prefix_Step3_Verb(modified_word) modified_word = self.__Prefix_Step4_Verb(modified_word) modified_word = self.__normalize_post(modified_word) stemmed_word = modified_word return stemmed_word class DanishStemmer(_ScandinavianStemmer): __vowels = "aeiouy\xE6\xE5\xF8" __consonants = "bcdfghjklmnpqrstvwxz" __double_consonants = ( "bb", "cc", "dd", "ff", "gg", "hh", "jj", "kk", "ll", "mm", "nn", "pp", "qq", "rr", "ss", "tt", "vv", "ww", "xx", "zz", ) __s_ending = "abcdfghjklmnoprtvyz\xE5" __step1_suffixes = ( "erendes", "erende", "hedens", "ethed", "erede", "heden", "heder", "endes", "ernes", "erens", "erets", "ered", "ende", "erne", "eren", "erer", "heds", "enes", "eres", "eret", "hed", "ene", "ere", "ens", "ers", "ets", "en", "er", "es", "et", "e", "s", ) __step2_suffixes = ("gd", "dt", "gt", "kt") __step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig") def stem(self, word): word = word.lower() if word in self.stopwords: return word r1 = self._r1_scandinavian(word, self.__vowels) for suffix in self.__step1_suffixes: if r1.endswith(suffix): if suffix == "s": if word[-2] in self.__s_ending: word = word[:-1] r1 = r1[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): word = word[:-1] r1 = r1[:-1] break if r1.endswith("igst"): word = word[:-2] r1 = r1[:-2] for suffix in self.__step3_suffixes: if r1.endswith(suffix): if suffix == "l\xF8st": word = word[:-1] r1 = r1[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] if r1.endswith(self.__step2_suffixes): word = word[:-1] r1 = r1[:-1] break for double_cons in self.__double_consonants: if word.endswith(double_cons) and len(word) > 3: word = word[:-1] break return word class DutchStemmer(_StandardStemmer): __vowels = "aeiouy\xE8" __step1_suffixes = ("heden", "ene", "en", "se", "s") __step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig") def stem(self, word): word = word.lower() if word in self.stopwords: return word step2_success = False word = ( word.replace("\xE4", "a") .replace("\xE1", "a") .replace("\xEB", "e") .replace("\xE9", "e") .replace("\xED", "i") .replace("\xEF", "i") .replace("\xF6", "o") .replace("\xF3", "o") .replace("\xFC", "u") .replace("\xFA", "u") ) if word.startswith("y"): word = "".join(("Y", word[1:])) for i in range(1, len(word)): if word[i - 1] in self.__vowels and word[i] == "y": word = "".join((word[:i], "Y", word[i + 1 :])) for i in range(1, len(word) - 1): if ( word[i - 1] in self.__vowels and word[i] == "i" and word[i + 1] in self.__vowels ): word = "".join((word[:i], "I", word[i + 1 :])) r1, r2 = self._r1r2_standard(word, self.__vowels) for i in range(1, len(word)): if word[i] not in self.__vowels and word[i - 1] in self.__vowels: if 3 > len(word[: i + 1]) > 0: r1 = word[3:] elif len(word[: i + 1]) == 0: return word break for suffix in self.__step1_suffixes: if r1.endswith(suffix): if suffix == "heden": word = suffix_replace(word, suffix, "heid") r1 = suffix_replace(r1, suffix, "heid") if r2.endswith("heden"): r2 = suffix_replace(r2, suffix, "heid") elif ( suffix in ("ene", "en") and not word.endswith("heden") and word[-len(suffix) - 1] not in self.__vowels and word[-len(suffix) - 3 : -len(suffix)] != "gem" ): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] if word.endswith(("kk", "dd", "tt")): word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] elif ( suffix in ("se", "s") and word[-len(suffix) - 1] not in self.__vowels and word[-len(suffix) - 1] != "j" ): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break if r1.endswith("e") and word[-2] not in self.__vowels: step2_success = True word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] if word.endswith(("kk", "dd", "tt")): word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] if r2.endswith("heid") and word[-5] != "c": word = word[:-4] r1 = r1[:-4] r2 = r2[:-4] if ( r1.endswith("en") and word[-3] not in self.__vowels and word[-5:-2] != "gem" ): word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] if word.endswith(("kk", "dd", "tt")): word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] for suffix in self.__step3b_suffixes: if r2.endswith(suffix): if suffix in ("end", "ing"): word = word[:-3] r2 = r2[:-3] if r2.endswith("ig") and word[-3] != "e": word = word[:-2] else: if word.endswith(("kk", "dd", "tt")): word = word[:-1] elif suffix == "ig" and word[-3] != "e": word = word[:-2] elif suffix == "lijk": word = word[:-4] r1 = r1[:-4] if r1.endswith("e") and word[-2] not in self.__vowels: word = word[:-1] if word.endswith(("kk", "dd", "tt")): word = word[:-1] elif suffix == "baar": word = word[:-4] elif suffix == "bar" and step2_success: word = word[:-3] break if len(word) >= 4: if word[-1] not in self.__vowels and word[-1] != "I": if word[-3:-1] in ("aa", "ee", "oo", "uu"): if word[-4] not in self.__vowels: word = "".join((word[:-3], word[-3], word[-1])) word = word.replace("I", "i").replace("Y", "y") return word class EnglishStemmer(_StandardStemmer): __vowels = "aeiouy" __double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt") __li_ending = "cdeghkmnrt" __step0_suffixes = ("'s'", "'s", "'") __step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s") __step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed") __step2_suffixes = ( "ization", "ational", "fulness", "ousness", "iveness", "tional", "biliti", "lessli", "entli", "ation", "alism", "aliti", "ousli", "iviti", "fulli", "enci", "anci", "abli", "izer", "ator", "alli", "bli", "ogi", "li", ) __step3_suffixes = ( "ational", "tional", "alize", "icate", "iciti", "ative", "ical", "ness", "ful", ) __step4_suffixes = ( "ement", "ance", "ence", "able", "ible", "ment", "ant", "ent", "ism", "ate", "iti", "ous", "ive", "ize", "ion", "al", "er", "ic", ) __step5_suffixes = ("e", "l") __special_words = { "skis": "ski", "skies": "sky", "dying": "die", "lying": "lie", "tying": "tie", "idly": "idl", "gently": "gentl", "ugly": "ugli", "early": "earli", "only": "onli", "singly": "singl", "sky": "sky", "news": "news", "howe": "howe", "atlas": "atlas", "cosmos": "cosmos", "bias": "bias", "andes": "andes", "inning": "inning", "innings": "inning", "outing": "outing", "outings": "outing", "canning": "canning", "cannings": "canning", "herring": "herring", "herrings": "herring", "earring": "earring", "earrings": "earring", "proceed": "proceed", "proceeds": "proceed", "proceeded": "proceed", "proceeding": "proceed", "exceed": "exceed", "exceeds": "exceed", "exceeded": "exceed", "exceeding": "exceed", "succeed": "succeed", "succeeds": "succeed", "succeeded": "succeed", "succeeding": "succeed", } def stem(self, word): word = word.lower() if word in self.stopwords or len(word) <= 2: return word elif word in self.__special_words: return self.__special_words[word] word = ( word.replace("\u2019", "\x27") .replace("\u2018", "\x27") .replace("\u201B", "\x27") ) if word.startswith("\x27"): word = word[1:] if word.startswith("y"): word = "".join(("Y", word[1:])) for i in range(1, len(word)): if word[i - 1] in self.__vowels and word[i] == "y": word = "".join((word[:i], "Y", word[i + 1 :])) step1a_vowel_found = False step1b_vowel_found = False r1 = "" r2 = "" if word.startswith(("gener", "commun", "arsen")): if word.startswith(("gener", "arsen")): r1 = word[5:] else: r1 = word[6:] for i in range(1, len(r1)): if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels: r2 = r1[i + 1 :] break else: r1, r2 = self._r1r2_standard(word, self.__vowels) for suffix in self.__step0_suffixes: if word.endswith(suffix): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break for suffix in self.__step1a_suffixes: if word.endswith(suffix): if suffix == "sses": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix in ("ied", "ies"): if len(word[: -len(suffix)]) > 1: word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] else: word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] elif suffix == "s": for letter in word[:-2]: if letter in self.__vowels: step1a_vowel_found = True break if step1a_vowel_found: word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] break for suffix in self.__step1b_suffixes: if word.endswith(suffix): if suffix in ("eed", "eedly"): if r1.endswith(suffix): word = suffix_replace(word, suffix, "ee") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ee") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ee") else: r2 = "" else: for letter in word[: -len(suffix)]: if letter in self.__vowels: step1b_vowel_found = True break if step1b_vowel_found: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] if word.endswith(("at", "bl", "iz")): word = "".join((word, "e")) r1 = "".join((r1, "e")) if len(word) > 5 or len(r1) >= 3: r2 = "".join((r2, "e")) elif word.endswith(self.__double_consonants): word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] elif ( r1 == "" and len(word) >= 3 and word[-1] not in self.__vowels and word[-1] not in "wxY" and word[-2] in self.__vowels and word[-3] not in self.__vowels ) or ( r1 == "" and len(word) == 2 and word[0] in self.__vowels and word[1] not in self.__vowels ): word = "".join((word, "e")) if len(r1) > 0: r1 = "".join((r1, "e")) if len(r2) > 0: r2 = "".join((r2, "e")) break if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels: word = "".join((word[:-1], "i")) if len(r1) >= 1: r1 = "".join((r1[:-1], "i")) else: r1 = "" if len(r2) >= 1: r2 = "".join((r2[:-1], "i")) else: r2 = "" for suffix in self.__step2_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix == "tional": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix in ("enci", "anci", "abli"): word = "".join((word[:-1], "e")) if len(r1) >= 1: r1 = "".join((r1[:-1], "e")) else: r1 = "" if len(r2) >= 1: r2 = "".join((r2[:-1], "e")) else: r2 = "" elif suffix == "entli": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix in ("izer", "ization"): word = suffix_replace(word, suffix, "ize") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ize") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ize") else: r2 = "" elif suffix in ("ational", "ation", "ator"): word = suffix_replace(word, suffix, "ate") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ate") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ate") else: r2 = "e" elif suffix in ("alism", "aliti", "alli"): word = suffix_replace(word, suffix, "al") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "al") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "al") else: r2 = "" elif suffix == "fulness": word = word[:-4] r1 = r1[:-4] r2 = r2[:-4] elif suffix in ("ousli", "ousness"): word = suffix_replace(word, suffix, "ous") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ous") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ous") else: r2 = "" elif suffix in ("iveness", "iviti"): word = suffix_replace(word, suffix, "ive") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ive") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ive") else: r2 = "e" elif suffix in ("biliti", "bli"): word = suffix_replace(word, suffix, "ble") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ble") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ble") else: r2 = "" elif suffix == "ogi" and word[-4] == "l": word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] elif suffix in ("fulli", "lessli"): word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix == "li" and word[-3] in self.__li_ending: word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] break for suffix in self.__step3_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix == "tional": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix == "ational": word = suffix_replace(word, suffix, "ate") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ate") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ate") else: r2 = "" elif suffix == "alize": word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] elif suffix in ("icate", "iciti", "ical"): word = suffix_replace(word, suffix, "ic") if len(r1) >= len(suffix): r1 = suffix_replace(r1, suffix, "ic") else: r1 = "" if len(r2) >= len(suffix): r2 = suffix_replace(r2, suffix, "ic") else: r2 = "" elif suffix in ("ful", "ness"): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] elif suffix == "ative" and r2.endswith(suffix): word = word[:-5] r1 = r1[:-5] r2 = r2[:-5] break for suffix in self.__step4_suffixes: if word.endswith(suffix): if r2.endswith(suffix): if suffix == "ion": if word[-4] in "st": word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break if r2.endswith("l") and word[-2] == "l": word = word[:-1] elif r2.endswith("e"): word = word[:-1] elif r1.endswith("e"): if len(word) >= 4 and ( word[-2] in self.__vowels or word[-2] in "wxY" or word[-3] not in self.__vowels or word[-4] in self.__vowels ): word = word[:-1] word = word.replace("Y", "y") return word class FinnishStemmer(_StandardStemmer): __vowels = "aeiouy\xE4\xF6" __restricted_vowels = "aeiou\xE4\xF6" __long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4", "\xF6\xF6") __consonants = "bcdfghjklmnpqrstvwxz" __double_consonants = ( "bb", "cc", "dd", "ff", "gg", "hh", "jj", "kk", "ll", "mm", "nn", "pp", "qq", "rr", "ss", "tt", "vv", "ww", "xx", "zz", ) __step1_suffixes = ( "kaan", "k\xE4\xE4n", "sti", "kin", "han", "h\xE4n", "ko", "k\xF6", "pa", "p\xE4", ) __step2_suffixes = ("nsa", "ns\xE4", "mme", "nne", "si", "ni", "an", "\xE4n", "en") __step3_suffixes = ( "siin", "tten", "seen", "han", "hen", "hin", "hon", "h\xE4n", "h\xF6n", "den", "tta", "tt\xE4", "ssa", "ss\xE4", "sta", "st\xE4", "lla", "ll\xE4", "lta", "lt\xE4", "lle", "ksi", "ine", "ta", "t\xE4", "na", "n\xE4", "a", "\xE4", "n", ) __step4_suffixes = ( "impi", "impa", "imp\xE4", "immi", "imma", "imm\xE4", "mpi", "mpa", "mp\xE4", "mmi", "mma", "mm\xE4", "eja", "ej\xE4", ) def stem(self, word): word = word.lower() if word in self.stopwords: return word step3_success = False r1, r2 = self._r1r2_standard(word, self.__vowels) for suffix in self.__step1_suffixes: if r1.endswith(suffix): if suffix == "sti": if suffix in r2: word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] else: if word[-len(suffix) - 1] in "ntaeiouy\xE4\xF6": word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): if suffix == "si": if word[-3] != "k": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix == "ni": word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] if word.endswith("kse"): word = suffix_replace(word, "kse", "ksi") if r1.endswith("kse"): r1 = suffix_replace(r1, "kse", "ksi") if r2.endswith("kse"): r2 = suffix_replace(r2, "kse", "ksi") elif suffix == "an": if word[-4:-2] in ("ta", "na") or word[-5:-2] in ( "ssa", "sta", "lla", "lta", ): word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix == "\xE4n": if word[-4:-2] in ("t\xE4", "n\xE4") or word[-5:-2] in ( "ss\xE4", "st\xE4", "ll\xE4", "lt\xE4", ): word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] elif suffix == "en": if word[-5:-2] in ("lle", "ine"): word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] else: word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] break for suffix in self.__step3_suffixes: if r1.endswith(suffix): if suffix in ("han", "hen", "hin", "hon", "h\xE4n", "h\xF6n"): if ( (suffix == "han" and word[-4] == "a") or (suffix == "hen" and word[-4] == "e") or (suffix == "hin" and word[-4] == "i") or (suffix == "hon" and word[-4] == "o") or (suffix == "h\xE4n" and word[-4] == "\xE4") or (suffix == "h\xF6n" and word[-4] == "\xF6") ): word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] step3_success = True elif suffix in ("siin", "den", "tten"): if ( word[-len(suffix) - 1] == "i" and word[-len(suffix) - 2] in self.__restricted_vowels ): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] step3_success = True else: continue elif suffix == "seen": if word[-6:-4] in self.__long_vowels: word = word[:-4] r1 = r1[:-4] r2 = r2[:-4] step3_success = True else: continue elif suffix in ("a", "\xE4"): if word[-2] in self.__vowels and word[-3] in self.__consonants: word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] step3_success = True elif suffix in ("tta", "tt\xE4"): if word[-4] == "e": word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] step3_success = True elif suffix == "n": word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] step3_success = True if word[-2:] == "ie" or word[-2:] in self.__long_vowels: word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] step3_success = True break for suffix in self.__step4_suffixes: if r2.endswith(suffix): if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma", "mm\xE4"): if word[-5:-3] != "po": word = word[:-3] r1 = r1[:-3] r2 = r2[:-3] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break if step3_success and len(r1) >= 1 and r1[-1] in "ij": word = word[:-1] r1 = r1[:-1] elif ( not step3_success and len(r1) >= 2 and r1[-1] == "t" and r1[-2] in self.__vowels ): word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] if r2.endswith("imma"): word = word[:-4] r1 = r1[:-4] elif r2.endswith("mma") and r2[-5:-3] != "po": word = word[:-3] r1 = r1[:-3] if r1[-2:] in self.__long_vowels: word = word[:-1] r1 = r1[:-1] if len(r1) >= 2 and r1[-2] in self.__consonants and r1[-1] in "a\xE4ei": word = word[:-1] r1 = r1[:-1] if r1.endswith(("oj", "uj")): word = word[:-1] r1 = r1[:-1] if r1.endswith("jo"): word = word[:-1] r1 = r1[:-1] for i in range(1, len(word)): if word[-i] in self.__vowels: continue else: if i == 1: if word[-i - 1 :] in self.__double_consonants: word = word[:-1] else: if word[-i - 1 : -i + 1] in self.__double_consonants: word = "".join((word[:-i], word[-i + 1 :])) break return word class FrenchStemmer(_StandardStemmer): __vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9" __step1_suffixes = ( "issements", "issement", "atrices", "atrice", "ateurs", "ations", "logies", "usions", "utions", "ements", "amment", "emment", "ances", "iqUes", "ismes", "ables", "istes", "ateur", "ation", "logie", "usion", "ution", "ences", "ement", "euses", "ments", "ance", "iqUe", "isme", "able", "iste", "ence", "it\xE9s", "ives", "eaux", "euse", "ment", "eux", "it\xE9", "ive", "ifs", "aux", "if", ) __step2a_suffixes = ( "issaIent", "issantes", "iraIent", "issante", "issants", "issions", "irions", "issais", "issait", "issant", "issent", "issiez", "issons", "irais", "irait", "irent", "iriez", "irons", "iront", "isses", "issez", "\xEEmes", "\xEEtes", "irai", "iras", "irez", "isse", "ies", "ira", "\xEEt", "ie", "ir", "is", "it", "i", ) __step2b_suffixes = ( "eraIent", "assions", "erions", "assent", "assiez", "\xE8rent", "erais", "erait", "eriez", "erons", "eront", "aIent", "antes", "asses", "ions", "erai", "eras", "erez", "\xE2mes", "\xE2tes", "ante", "ants", "asse", "\xE9es", "era", "iez", "ais", "ait", "ant", "\xE9e", "\xE9s", "er", "ez", "\xE2t", "ai", "as", "\xE9", "a", ) __step4_suffixes = ("i\xE8re", "I\xE8re", "ion", "ier", "Ier", "e", "\xEB") def stem(self, word): word = word.lower() if word in self.stopwords: return word step1_success = False rv_ending_found = False step2a_success = False step2b_success = False for i in range(1, len(word)): if word[i - 1] == "q" and word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) for i in range(1, len(word) - 1): if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: if word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) elif word[i] == "i": word = "".join((word[:i], "I", word[i + 1 :])) if word[i - 1] in self.__vowels or word[i + 1] in self.__vowels: if word[i] == "y": word = "".join((word[:i], "Y", word[i + 1 :])) r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self.__rv_french(word, self.__vowels) for suffix in self.__step1_suffixes: if word.endswith(suffix): if suffix == "eaux": word = word[:-1] step1_success = True elif suffix in ("euse", "euses"): if suffix in r2: word = word[: -len(suffix)] step1_success = True elif suffix in r1: word = suffix_replace(word, suffix, "eux") step1_success = True elif suffix in ("ement", "ements") and suffix in rv: word = word[: -len(suffix)] step1_success = True if word[-2:] == "iv" and "iv" in r2: word = word[:-2] if word[-2:] == "at" and "at" in r2: word = word[:-2] elif word[-3:] == "eus": if "eus" in r2: word = word[:-3] elif "eus" in r1: word = "".join((word[:-1], "x")) elif word[-3:] in ("abl", "iqU"): if "abl" in r2 or "iqU" in r2: word = word[:-3] elif word[-3:] in ("i\xE8r", "I\xE8r"): if "i\xE8r" in rv or "I\xE8r" in rv: word = "".join((word[:-3], "i")) elif suffix == "amment" and suffix in rv: word = suffix_replace(word, "amment", "ant") rv = suffix_replace(rv, "amment", "ant") rv_ending_found = True elif suffix == "emment" and suffix in rv: word = suffix_replace(word, "emment", "ent") rv_ending_found = True elif ( suffix in ("ment", "ments") and suffix in rv and not rv.startswith(suffix) and rv[rv.rindex(suffix) - 1] in self.__vowels ): word = word[: -len(suffix)] rv = rv[: -len(suffix)] rv_ending_found = True elif suffix == "aux" and suffix in r1: word = "".join((word[:-2], "l")) step1_success = True elif ( suffix in ("issement", "issements") and suffix in r1 and word[-len(suffix) - 1] not in self.__vowels ): word = word[: -len(suffix)] step1_success = True elif ( suffix in ( "ance", "iqUe", "isme", "able", "iste", "eux", "ances", "iqUes", "ismes", "ables", "istes", ) and suffix in r2 ): word = word[: -len(suffix)] step1_success = True elif ( suffix in ("atrice", "ateur", "ation", "atrices", "ateurs", "ations") and suffix in r2 ): word = word[: -len(suffix)] step1_success = True if word[-2:] == "ic": if "ic" in r2: word = word[:-2] else: word = "".join((word[:-2], "iqU")) elif suffix in ("logie", "logies") and suffix in r2: word = suffix_replace(word, suffix, "log") step1_success = True elif suffix in ("usion", "ution", "usions", "utions") and suffix in r2: word = suffix_replace(word, suffix, "u") step1_success = True elif suffix in ("ence", "ences") and suffix in r2: word = suffix_replace(word, suffix, "ent") step1_success = True elif suffix in ("it\xE9", "it\xE9s") and suffix in r2: word = word[: -len(suffix)] step1_success = True if word[-4:] == "abil": if "abil" in r2: word = word[:-4] else: word = "".join((word[:-2], "l")) elif word[-2:] == "ic": if "ic" in r2: word = word[:-2] else: word = "".join((word[:-2], "iqU")) elif word[-2:] == "iv": if "iv" in r2: word = word[:-2] elif suffix in ("if", "ive", "ifs", "ives") and suffix in r2: word = word[: -len(suffix)] step1_success = True if word[-2:] == "at" and "at" in r2: word = word[:-2] if word[-2:] == "ic": if "ic" in r2: word = word[:-2] else: word = "".join((word[:-2], "iqU")) break if not step1_success or rv_ending_found: for suffix in self.__step2a_suffixes: if word.endswith(suffix): if ( suffix in rv and len(rv) > len(suffix) and rv[rv.rindex(suffix) - 1] not in self.__vowels ): word = word[: -len(suffix)] step2a_success = True break if not step2a_success: for suffix in self.__step2b_suffixes: if rv.endswith(suffix): if suffix == "ions" and "ions" in r2: word = word[:-4] step2b_success = True elif suffix in ( "eraIent", "erions", "\xE8rent", "erais", "erait", "eriez", "erons", "eront", "erai", "eras", "erez", "\xE9es", "era", "iez", "\xE9e", "\xE9s", "er", "ez", "\xE9", ): word = word[: -len(suffix)] step2b_success = True elif suffix in ( "assions", "assent", "assiez", "aIent", "antes", "asses", "\xE2mes", "\xE2tes", "ante", "ants", "asse", "ais", "ait", "ant", "\xE2t", "ai", "as", "a", ): word = word[: -len(suffix)] rv = rv[: -len(suffix)] step2b_success = True if rv.endswith("e"): word = word[:-1] break if step1_success or step2a_success or step2b_success: if word[-1] == "Y": word = "".join((word[:-1], "i")) elif word[-1] == "\xE7": word = "".join((word[:-1], "c")) else: if len(word) >= 2 and word[-1] == "s" and word[-2] not in "aiou\xE8s": word = word[:-1] for suffix in self.__step4_suffixes: if word.endswith(suffix): if suffix in rv: if suffix == "ion" and suffix in r2 and rv[-4] in "st": word = word[:-3] elif suffix in ("ier", "i\xE8re", "Ier", "I\xE8re"): word = suffix_replace(word, suffix, "i") elif suffix == "e": word = word[:-1] elif suffix == "\xEB" and word[-3:-1] == "gu": word = word[:-1] break if word.endswith(("enn", "onn", "ett", "ell", "eill")): word = word[:-1] for i in range(1, len(word)): if word[-i] not in self.__vowels: i += 1 else: if i != 1 and word[-i] in ("\xE9", "\xE8"): word = "".join((word[:-i], "e", word[-i + 1 :])) break word = word.replace("I", "i").replace("U", "u").replace("Y", "y") return word def __rv_french(self, word, vowels): rv = "" if len(word) >= 2: if word.startswith(("par", "col", "tap")) or ( word[0] in vowels and word[1] in vowels ): rv = word[3:] else: for i in range(1, len(word)): if word[i] in vowels: rv = word[i + 1 :] break return rv class GermanStemmer(_StandardStemmer): __vowels = "aeiouy\xE4\xF6\xFC" __s_ending = "bdfghklmnrt" __st_ending = "bdfghklmnt" __step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s") __step2_suffixes = ("est", "en", "er", "st") __step3_suffixes = ("isch", "lich", "heit", "keit", "end", "ung", "ig", "ik") def stem(self, word): word = word.lower() if word in self.stopwords: return word word = word.replace("\xDF", "ss") for i in range(1, len(word) - 1): if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: if word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) elif word[i] == "y": word = "".join((word[:i], "Y", word[i + 1 :])) r1, r2 = self._r1r2_standard(word, self.__vowels) for i in range(1, len(word)): if word[i] not in self.__vowels and word[i - 1] in self.__vowels: if 3 > len(word[: i + 1]) > 0: r1 = word[3:] elif len(word[: i + 1]) == 0: return word break for suffix in self.__step1_suffixes: if r1.endswith(suffix): if ( suffix in ("en", "es", "e") and word[-len(suffix) - 4 : -len(suffix)] == "niss" ): word = word[: -len(suffix) - 1] r1 = r1[: -len(suffix) - 1] r2 = r2[: -len(suffix) - 1] elif suffix == "s": if word[-2] in self.__s_ending: word = word[:-1] r1 = r1[:-1] r2 = r2[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): if suffix == "st": if word[-3] in self.__st_ending and len(word[:-3]) >= 3: word = word[:-2] r1 = r1[:-2] r2 = r2[:-2] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] break for suffix in self.__step3_suffixes: if r2.endswith(suffix): if suffix in ("end", "ung"): if ( "ig" in r2[-len(suffix) - 2 : -len(suffix)] and "e" not in r2[-len(suffix) - 3 : -len(suffix) - 2] ): word = word[: -len(suffix) - 2] else: word = word[: -len(suffix)] elif ( suffix in ("ig", "ik", "isch") and "e" not in r2[-len(suffix) - 1 : -len(suffix)] ): word = word[: -len(suffix)] elif suffix in ("lich", "heit"): if ( "er" in r1[-len(suffix) - 2 : -len(suffix)] or "en" in r1[-len(suffix) - 2 : -len(suffix)] ): word = word[: -len(suffix) - 2] else: word = word[: -len(suffix)] elif suffix == "keit": if "lich" in r2[-len(suffix) - 4 : -len(suffix)]: word = word[: -len(suffix) - 4] elif "ig" in r2[-len(suffix) - 2 : -len(suffix)]: word = word[: -len(suffix) - 2] else: word = word[: -len(suffix)] break word = ( word.replace("\xE4", "a") .replace("\xF6", "o") .replace("\xFC", "u") .replace("U", "u") .replace("Y", "y") ) return word class HungarianStemmer(_LanguageSpecificStemmer): __vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB" __digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs") __double_consonants = ( "bb", "cc", "ccs", "dd", "ff", "gg", "ggy", "jj", "kk", "ll", "lly", "mm", "nn", "nny", "pp", "rr", "ss", "ssz", "tt", "tty", "vv", "zz", "zzs", ) __step1_suffixes = ("al", "el") __step2_suffixes = ( "k\xE9ppen", "onk\xE9nt", "enk\xE9nt", "ank\xE9nt", "k\xE9pp", "k\xE9nt", "ban", "ben", "nak", "nek", "val", "vel", "t\xF3l", "t\xF5l", "r\xF3l", "r\xF5l", "b\xF3l", "b\xF5l", "hoz", "hez", "h\xF6z", "n\xE1l", "n\xE9l", "\xE9rt", "kor", "ba", "be", "ra", "re", "ig", "at", "et", "ot", "\xF6t", "ul", "\xFCl", "v\xE1", "v\xE9", "en", "on", "an", "\xF6n", "n", "t", ) __step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n") __step4_suffixes = ( "astul", "est\xFCl", "\xE1stul", "\xE9st\xFCl", "stul", "st\xFCl", ) __step5_suffixes = ("\xE1", "\xE9") __step6_suffixes = ( "ok\xE9", "\xF6k\xE9", "ak\xE9", "ek\xE9", "\xE1k\xE9", "\xE1\xE9i", "\xE9k\xE9", "\xE9\xE9i", "k\xE9", "\xE9i", "\xE9\xE9", "\xE9", ) __step7_suffixes = ( "\xE1juk", "\xE9j\xFCk", "\xFCnk", "unk", "juk", "j\xFCk", "\xE1nk", "\xE9nk", "nk", "uk", "\xFCk", "em", "om", "am", "od", "ed", "ad", "\xF6d", "ja", "je", "\xE1m", "\xE1d", "\xE9m", "\xE9d", "m", "d", "a", "e", "o", "\xE1", "\xE9", ) __step8_suffixes = ( "jaitok", "jeitek", "jaink", "jeink", "aitok", "eitek", "\xE1itok", "\xE9itek", "jaim", "jeim", "jaid", "jeid", "eink", "aink", "itek", "jeik", "jaik", "\xE1ink", "\xE9ink", "aim", "eim", "aid", "eid", "jai", "jei", "ink", "aik", "eik", "\xE1im", "\xE1id", "\xE1ik", "\xE9im", "\xE9id", "\xE9ik", "im", "id", "ai", "ei", "ik", "\xE1i", "\xE9i", "i", ) __step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok", "ek", "ak", "k") def stem(self, word): word = word.lower() if word in self.stopwords: return word r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs) if r1.endswith(self.__step1_suffixes): for double_cons in self.__double_consonants: if word[-2 - len(double_cons) : -2] == double_cons: word = "".join((word[:-4], word[-3])) if r1[-2 - len(double_cons) : -2] == double_cons: r1 = "".join((r1[:-4], r1[-3])) break for suffix in self.__step2_suffixes: if word.endswith(suffix): if r1.endswith(suffix): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] if r1.endswith("\xE1"): word = "".join((word[:-1], "a")) r1 = suffix_replace(r1, "\xE1", "a") elif r1.endswith("\xE9"): word = "".join((word[:-1], "e")) r1 = suffix_replace(r1, "\xE9", "e") break for suffix in self.__step3_suffixes: if r1.endswith(suffix): if suffix == "\xE9n": word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") else: word = suffix_replace(word, suffix, "a") r1 = suffix_replace(r1, suffix, "a") break for suffix in self.__step4_suffixes: if r1.endswith(suffix): if suffix == "\xE1stul": word = suffix_replace(word, suffix, "a") r1 = suffix_replace(r1, suffix, "a") elif suffix == "\xE9st\xFCl": word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step5_suffixes: if r1.endswith(suffix): for double_cons in self.__double_consonants: if word[-1 - len(double_cons) : -1] == double_cons: word = "".join((word[:-3], word[-2])) if r1[-1 - len(double_cons) : -1] == double_cons: r1 = "".join((r1[:-3], r1[-2])) break for suffix in self.__step6_suffixes: if r1.endswith(suffix): if suffix in ("\xE1k\xE9", "\xE1\xE9i"): word = suffix_replace(word, suffix, "a") r1 = suffix_replace(r1, suffix, "a") elif suffix in ("\xE9k\xE9", "\xE9\xE9i", "\xE9\xE9"): word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step7_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix in ("\xE1nk", "\xE1juk", "\xE1m", "\xE1d", "\xE1"): word = suffix_replace(word, suffix, "a") r1 = suffix_replace(r1, suffix, "a") elif suffix in ("\xE9nk", "\xE9j\xFCk", "\xE9m", "\xE9d", "\xE9"): word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step8_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix in ( "\xE1im", "\xE1id", "\xE1i", "\xE1ink", "\xE1itok", "\xE1ik", ): word = suffix_replace(word, suffix, "a") r1 = suffix_replace(r1, suffix, "a") elif suffix in ( "\xE9im", "\xE9id", "\xE9i", "\xE9ink", "\xE9itek", "\xE9ik", ): word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step9_suffixes: if word.endswith(suffix): if r1.endswith(suffix): if suffix == "\xE1k": word = suffix_replace(word, suffix, "a") elif suffix == "\xE9k": word = suffix_replace(word, suffix, "e") else: word = word[: -len(suffix)] break return word def __r1_hungarian(self, word, vowels, digraphs): r1 = "" if word[0] in vowels: for digraph in digraphs: if digraph in word[1:]: r1 = word[word.index(digraph[-1]) + 1 :] return r1 for i in range(1, len(word)): if word[i] not in vowels: r1 = word[i + 1 :] break else: for i in range(1, len(word)): if word[i] in vowels: r1 = word[i + 1 :] break return r1 class ItalianStemmer(_StandardStemmer): __vowels = "aeiou\xE0\xE8\xEC\xF2\xF9" __step0_suffixes = ( "gliela", "gliele", "glieli", "glielo", "gliene", "sene", "mela", "mele", "meli", "melo", "mene", "tela", "tele", "teli", "telo", "tene", "cela", "cele", "celi", "celo", "cene", "vela", "vele", "veli", "velo", "vene", "gli", "ci", "la", "le", "li", "lo", "mi", "ne", "si", "ti", "vi", ) __step1_suffixes = ( "atrice", "atrici", "azione", "azioni", "uzione", "uzioni", "usione", "usioni", "amento", "amenti", "imento", "imenti", "amente", "abile", "abili", "ibile", "ibili", "mente", "atore", "atori", "logia", "logie", "anza", "anze", "iche", "ichi", "ismo", "ismi", "ista", "iste", "isti", "ist\xE0", "ist\xE8", "ist\xEC", "ante", "anti", "enza", "enze", "ico", "ici", "ica", "ice", "oso", "osi", "osa", "ose", "it\xE0", "ivo", "ivi", "iva", "ive", ) __step2_suffixes = ( "erebbero", "irebbero", "assero", "assimo", "eranno", "erebbe", "eremmo", "ereste", "eresti", "essero", "iranno", "irebbe", "iremmo", "ireste", "iresti", "iscano", "iscono", "issero", "arono", "avamo", "avano", "avate", "eremo", "erete", "erono", "evamo", "evano", "evate", "iremo", "irete", "irono", "ivamo", "ivano", "ivate", "ammo", "ando", "asse", "assi", "emmo", "enda", "ende", "endi", "endo", "erai", "erei", "Yamo", "iamo", "immo", "irai", "irei", "isca", "isce", "isci", "isco", "ano", "are", "ata", "ate", "ati", "ato", "ava", "avi", "avo", "er\xE0", "ere", "er\xF2", "ete", "eva", "evi", "evo", "ir\xE0", "ire", "ir\xF2", "ita", "ite", "iti", "ito", "iva", "ivi", "ivo", "ono", "uta", "ute", "uti", "uto", "ar", "ir", ) def stem(self, word): word = word.lower() if word in self.stopwords: return word step1_success = False word = ( word.replace("\xE1", "\xE0") .replace("\xE9", "\xE8") .replace("\xED", "\xEC") .replace("\xF3", "\xF2") .replace("\xFA", "\xF9") ) for i in range(1, len(word)): if word[i - 1] == "q" and word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) for i in range(1, len(word) - 1): if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: if word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) elif word[i] == "i": word = "".join((word[:i], "I", word[i + 1 :])) r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self._rv_standard(word, self.__vowels) for suffix in self.__step0_suffixes: if rv.endswith(suffix): if rv[-len(suffix) - 4 : -len(suffix)] in ("ando", "endo"): word = word[: -len(suffix)] r1 = r1[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] elif rv[-len(suffix) - 2 : -len(suffix)] in ("ar", "er", "ir"): word = suffix_replace(word, suffix, "e") r1 = suffix_replace(r1, suffix, "e") r2 = suffix_replace(r2, suffix, "e") rv = suffix_replace(rv, suffix, "e") break for suffix in self.__step1_suffixes: if word.endswith(suffix): if suffix == "amente" and r1.endswith(suffix): step1_success = True word = word[:-6] r2 = r2[:-6] rv = rv[:-6] if r2.endswith("iv"): word = word[:-2] r2 = r2[:-2] rv = rv[:-2] if r2.endswith("at"): word = word[:-2] rv = rv[:-2] elif r2.endswith(("os", "ic")): word = word[:-2] rv = rv[:-2] elif r2.endswith("abil"): word = word[:-4] rv = rv[:-4] elif suffix in ("amento", "amenti", "imento", "imenti") and rv.endswith( suffix ): step1_success = True word = word[:-6] rv = rv[:-6] elif r2.endswith(suffix): step1_success = True if suffix in ("azione", "azioni", "atore", "atori"): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith("ic"): word = word[:-2] rv = rv[:-2] elif suffix in ("logia", "logie"): word = word[:-2] rv = word[:-2] elif suffix in ("uzione", "uzioni", "usione", "usioni"): word = word[:-5] rv = rv[:-5] elif suffix in ("enza", "enze"): word = suffix_replace(word, suffix, "te") rv = suffix_replace(rv, suffix, "te") elif suffix == "it\xE0": word = word[:-3] r2 = r2[:-3] rv = rv[:-3] if r2.endswith(("ic", "iv")): word = word[:-2] rv = rv[:-2] elif r2.endswith("abil"): word = word[:-4] rv = rv[:-4] elif suffix in ("ivo", "ivi", "iva", "ive"): word = word[:-3] r2 = r2[:-3] rv = rv[:-3] if r2.endswith("at"): word = word[:-2] r2 = r2[:-2] rv = rv[:-2] if r2.endswith("ic"): word = word[:-2] rv = rv[:-2] else: word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if not step1_success: for suffix in self.__step2_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8", "\xEC", "\xF2")): word = word[:-1] rv = rv[:-1] if rv.endswith("i"): word = word[:-1] rv = rv[:-1] if rv.endswith(("ch", "gh")): word = word[:-1] word = word.replace("I", "i").replace("U", "u") return word class NorwegianStemmer(_ScandinavianStemmer): __vowels = "aeiouy\xE6\xE5\xF8" __s_ending = "bcdfghjlmnoprtvyz" __step1_suffixes = ( "hetenes", "hetene", "hetens", "heter", "heten", "endes", "ande", "ende", "edes", "enes", "erte", "ede", "ane", "ene", "ens", "ers", "ets", "het", "ast", "ert", "en", "ar", "er", "as", "es", "et", "a", "e", "s", ) __step2_suffixes = ("dt", "vt") __step3_suffixes = ( "hetslov", "eleg", "elig", "elov", "slov", "leg", "eig", "lig", "els", "lov", "ig", ) def stem(self, word): word = word.lower() if word in self.stopwords: return word r1 = self._r1_scandinavian(word, self.__vowels) for suffix in self.__step1_suffixes: if r1.endswith(suffix): if suffix in ("erte", "ert"): word = suffix_replace(word, suffix, "er") r1 = suffix_replace(r1, suffix, "er") elif suffix == "s": if word[-2] in self.__s_ending or ( word[-2] == "k" and word[-3] not in self.__vowels ): word = word[:-1] r1 = r1[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): word = word[:-1] r1 = r1[:-1] break for suffix in self.__step3_suffixes: if r1.endswith(suffix): word = word[: -len(suffix)] break return word class PortugueseStemmer(_StandardStemmer): __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4" __step1_suffixes = ( "amentos", "imentos", "uço~es", "amento", "imento", "adoras", "adores", "a\xE7o~es", "logias", "\xEAncias", "amente", "idades", "an\xE7as", "ismos", "istas", "adora", "a\xE7a~o", "antes", "\xE2ncia", "logia", "uça~o", "\xEAncia", "mente", "idade", "an\xE7a", "ezas", "icos", "icas", "ismo", "\xE1vel", "\xEDvel", "ista", "osos", "osas", "ador", "ante", "ivas", "ivos", "iras", "eza", "ico", "ica", "oso", "osa", "iva", "ivo", "ira", ) __step2_suffixes = ( "ar\xEDamos", "er\xEDamos", "ir\xEDamos", "\xE1ssemos", "\xEAssemos", "\xEDssemos", "ar\xEDeis", "er\xEDeis", "ir\xEDeis", "\xE1sseis", "\xE9sseis", "\xEDsseis", "\xE1ramos", "\xE9ramos", "\xEDramos", "\xE1vamos", "aremos", "eremos", "iremos", "ariam", "eriam", "iriam", "assem", "essem", "issem", "ara~o", "era~o", "ira~o", "arias", "erias", "irias", "ardes", "erdes", "irdes", "asses", "esses", "isses", "astes", "estes", "istes", "\xE1reis", "areis", "\xE9reis", "ereis", "\xEDreis", "ireis", "\xE1veis", "\xEDamos", "armos", "ermos", "irmos", "aria", "eria", "iria", "asse", "esse", "isse", "aste", "este", "iste", "arei", "erei", "irei", "aram", "eram", "iram", "avam", "arem", "erem", "irem", "ando", "endo", "indo", "adas", "idas", "ar\xE1s", "aras", "er\xE1s", "eras", "ir\xE1s", "avas", "ares", "eres", "ires", "\xEDeis", "ados", "idos", "\xE1mos", "amos", "emos", "imos", "iras", "ada", "ida", "ar\xE1", "ara", "er\xE1", "era", "ir\xE1", "ava", "iam", "ado", "ido", "ias", "ais", "eis", "ira", "ia", "ei", "am", "em", "ar", "er", "ir", "as", "es", "is", "eu", "iu", "ou", ) __step4_suffixes = ("os", "a", "i", "o", "\xE1", "\xED", "\xF3") def stem(self, word): word = word.lower() if word in self.stopwords: return word step1_success = False step2_success = False word = ( word.replace("\xE3", "a~") .replace("\xF5", "o~") .replace("q\xFC", "qu") .replace("g\xFC", "gu") ) r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self._rv_standard(word, self.__vowels) for suffix in self.__step1_suffixes: if word.endswith(suffix): if suffix == "amente" and r1.endswith(suffix): step1_success = True word = word[:-6] r2 = r2[:-6] rv = rv[:-6] if r2.endswith("iv"): word = word[:-2] r2 = r2[:-2] rv = rv[:-2] if r2.endswith("at"): word = word[:-2] rv = rv[:-2] elif r2.endswith(("os", "ic", "ad")): word = word[:-2] rv = rv[:-2] elif ( suffix in ("ira", "iras") and rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == "e" ): step1_success = True word = suffix_replace(word, suffix, "ir") rv = suffix_replace(rv, suffix, "ir") elif r2.endswith(suffix): step1_success = True if suffix in ("logia", "logias"): word = suffix_replace(word, suffix, "log") rv = suffix_replace(rv, suffix, "log") elif suffix in ("uça~o", "uço~es"): word = suffix_replace(word, suffix, "u") rv = suffix_replace(rv, suffix, "u") elif suffix in ("\xEAncia", "\xEAncias"): word = suffix_replace(word, suffix, "ente") rv = suffix_replace(rv, suffix, "ente") elif suffix == "mente": word = word[:-5] r2 = r2[:-5] rv = rv[:-5] if r2.endswith(("ante", "avel", "ivel")): word = word[:-4] rv = rv[:-4] elif suffix in ("idade", "idades"): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith(("ic", "iv")): word = word[:-2] rv = rv[:-2] elif r2.endswith("abil"): word = word[:-4] rv = rv[:-4] elif suffix in ("iva", "ivo", "ivas", "ivos"): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith("at"): word = word[:-2] rv = rv[:-2] else: word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if not step1_success: for suffix in self.__step2_suffixes: if rv.endswith(suffix): step2_success = True word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if step1_success or step2_success: if rv.endswith("i") and word[-2] == "c": word = word[:-1] rv = rv[:-1] if not step1_success and not step2_success: for suffix in self.__step4_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if rv.endswith(("e", "\xE9", "\xEA")): word = word[:-1] rv = rv[:-1] if (word.endswith("gu") and rv.endswith("u")) or ( word.endswith("ci") and rv.endswith("i") ): word = word[:-1] elif word.endswith("\xE7"): word = suffix_replace(word, "\xE7", "c") word = word.replace("a~", "\xE3").replace("o~", "\xF5") return word class RomanianStemmer(_StandardStemmer): __vowels = "aeiou\u0103\xE2\xEE" __step0_suffixes = ( "iilor", "ului", "elor", "iile", "ilor", "atei", "a\u0163ie", "a\u0163ia", "aua", "ele", "iua", "iei", "ile", "ul", "ea", "ii", ) __step1_suffixes = ( "abilitate", "abilitati", "abilit\u0103\u0163i", "ibilitate", "abilit\u0103i", "ivitate", "ivitati", "ivit\u0103\u0163i", "icitate", "icitati", "icit\u0103\u0163i", "icatori", "ivit\u0103i", "icit\u0103i", "icator", "a\u0163iune", "atoare", "\u0103toare", "i\u0163iune", "itoare", "iciva", "icive", "icivi", "iciv\u0103", "icala", "icale", "icali", "ical\u0103", "ativa", "ative", "ativi", "ativ\u0103", "atori", "\u0103tori", "itiva", "itive", "itivi", "itiv\u0103", "itori", "iciv", "ical", "ativ", "ator", "\u0103tor", "itiv", "itor", ) __step2_suffixes = ( "abila", "abile", "abili", "abil\u0103", "ibila", "ibile", "ibili", "ibil\u0103", "atori", "itate", "itati", "it\u0103\u0163i", "abil", "ibil", "oasa", "oas\u0103", "oase", "anta", "ante", "anti", "ant\u0103", "ator", "it\u0103i", "iune", "iuni", "isme", "ista", "iste", "isti", "ist\u0103", "i\u015Fti", "ata", "at\u0103", "ati", "ate", "uta", "ut\u0103", "uti", "ute", "ita", "it\u0103", "iti", "ite", "ica", "ice", "ici", "ic\u0103", "osi", "o\u015Fi", "ant", "iva", "ive", "ivi", "iv\u0103", "ism", "ist", "at", "ut", "it", "ic", "os", "iv", ) __step3_suffixes = ( "seser\u0103\u0163i", "aser\u0103\u0163i", "iser\u0103\u0163i", "\xE2ser\u0103\u0163i", "user\u0103\u0163i", "seser\u0103m", "aser\u0103m", "iser\u0103m", "\xE2ser\u0103m", "user\u0103m", "ser\u0103\u0163i", "sese\u015Fi", "seser\u0103", "easc\u0103", "ar\u0103\u0163i", "ur\u0103\u0163i", "ir\u0103\u0163i", "\xE2r\u0103\u0163i", "ase\u015Fi", "aser\u0103", "ise\u015Fi", "iser\u0103", "\xe2se\u015Fi", "\xE2ser\u0103", "use\u015Fi", "user\u0103", "ser\u0103m", "sesem", "indu", "\xE2ndu", "eaz\u0103", "e\u015Fti", "e\u015Fte", "\u0103\u015Fti", "\u0103\u015Fte", "ea\u0163i", "ia\u0163i", "ar\u0103m", "ur\u0103m", "ir\u0103m", "\xE2r\u0103m", "asem", "isem", "\xE2sem", "usem", "se\u015Fi", "ser\u0103", "sese", "are", "ere", "ire", "\xE2re", "ind", "\xE2nd", "eze", "ezi", "esc", "\u0103sc", "eam", "eai", "eau", "iam", "iai", "iau", "a\u015Fi", "ar\u0103", "u\u015Fi", "ur\u0103", "i\u015Fi", "ir\u0103", "\xE2\u015Fi", "\xe2r\u0103", "ase", "ise", "\xE2se", "use", "a\u0163i", "e\u0163i", "i\u0163i", "\xe2\u0163i", "sei", "ez", "am", "ai", "au", "ea", "ia", "ui", "\xE2i", "\u0103m", "em", "im", "\xE2m", "se", ) def stem(self, word): word = word.lower() if word in self.stopwords: return word step1_success = False step2_success = False for i in range(1, len(word) - 1): if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels: if word[i] == "u": word = "".join((word[:i], "U", word[i + 1 :])) elif word[i] == "i": word = "".join((word[:i], "I", word[i + 1 :])) r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self._rv_standard(word, self.__vowels) for suffix in self.__step0_suffixes: if word.endswith(suffix): if suffix in r1: if suffix in ("ul", "ului"): word = word[: -len(suffix)] if suffix in rv: rv = rv[: -len(suffix)] else: rv = "" elif ( suffix == "aua" or suffix == "atei" or (suffix == "ile" and word[-5:-3] != "ab") ): word = word[:-2] elif suffix in ("ea", "ele", "elor"): word = suffix_replace(word, suffix, "e") if suffix in rv: rv = suffix_replace(rv, suffix, "e") else: rv = "" elif suffix in ("ii", "iua", "iei", "iile", "iilor", "ilor"): word = suffix_replace(word, suffix, "i") if suffix in rv: rv = suffix_replace(rv, suffix, "i") else: rv = "" elif suffix in ("a\u0163ie", "a\u0163ia"): word = word[:-1] break while True: replacement_done = False for suffix in self.__step1_suffixes: if word.endswith(suffix): if suffix in r1: step1_success = True replacement_done = True if suffix in ( "abilitate", "abilitati", "abilit\u0103i", "abilit\u0103\u0163i", ): word = suffix_replace(word, suffix, "abil") elif suffix == "ibilitate": word = word[:-5] elif suffix in ( "ivitate", "ivitati", "ivit\u0103i", "ivit\u0103\u0163i", ): word = suffix_replace(word, suffix, "iv") elif suffix in ( "icitate", "icitati", "icit\u0103i", "icit\u0103\u0163i", "icator", "icatori", "iciv", "iciva", "icive", "icivi", "iciv\u0103", "ical", "icala", "icale", "icali", "ical\u0103", ): word = suffix_replace(word, suffix, "ic") elif suffix in ( "ativ", "ativa", "ative", "ativi", "ativ\u0103", "a\u0163iune", "atoare", "ator", "atori", "\u0103toare", "\u0103tor", "\u0103tori", ): word = suffix_replace(word, suffix, "at") if suffix in r2: r2 = suffix_replace(r2, suffix, "at") elif suffix in ( "itiv", "itiva", "itive", "itivi", "itiv\u0103", "i\u0163iune", "itoare", "itor", "itori", ): word = suffix_replace(word, suffix, "it") if suffix in r2: r2 = suffix_replace(r2, suffix, "it") else: step1_success = False break if not replacement_done: break for suffix in self.__step2_suffixes: if word.endswith(suffix): if suffix in r2: step2_success = True if suffix in ("iune", "iuni"): if word[-5] == "\u0163": word = "".join((word[:-5], "t")) elif suffix in ( "ism", "isme", "ist", "ista", "iste", "isti", "ist\u0103", "i\u015Fti", ): word = suffix_replace(word, suffix, "ist") else: word = word[: -len(suffix)] break if not step1_success and not step2_success: for suffix in self.__step3_suffixes: if word.endswith(suffix): if suffix in rv: if suffix in ( "seser\u0103\u0163i", "seser\u0103m", "ser\u0103\u0163i", "sese\u015Fi", "seser\u0103", "ser\u0103m", "sesem", "se\u015Fi", "ser\u0103", "sese", "a\u0163i", "e\u0163i", "i\u0163i", "\xE2\u0163i", "sei", "\u0103m", "em", "im", "\xE2m", "se", ): word = word[: -len(suffix)] rv = rv[: -len(suffix)] else: if ( not rv.startswith(suffix) and rv[rv.index(suffix) - 1] not in "aeio\u0103\xE2\xEE" ): word = word[: -len(suffix)] break for suffix in ("ie", "a", "e", "i", "\u0103"): if word.endswith(suffix): if suffix in rv: word = word[: -len(suffix)] break word = word.replace("I", "i").replace("U", "u") return word class RussianStemmer(_LanguageSpecificStemmer): __perfective_gerund_suffixes = ( "ivshis'", "yvshis'", "vshis'", "ivshi", "yvshi", "vshi", "iv", "yv", "v", ) __adjectival_suffixes = ( "ui^ushchi^ui^u", "ui^ushchi^ai^a", "ui^ushchimi", "ui^ushchymi", "ui^ushchego", "ui^ushchogo", "ui^ushchemu", "ui^ushchomu", "ui^ushchikh", "ui^ushchykh", "ui^ushchui^u", "ui^ushchaia", "ui^ushchoi^u", "ui^ushchei^u", "i^ushchi^ui^u", "i^ushchi^ai^a", "ui^ushchee", "ui^ushchie", "ui^ushchye", "ui^ushchoe", "ui^ushchei`", "ui^ushchii`", "ui^ushchyi`", "ui^ushchoi`", "ui^ushchem", "ui^ushchim", "ui^ushchym", "ui^ushchom", "i^ushchimi", "i^ushchymi", "i^ushchego", "i^ushchogo", "i^ushchemu", "i^ushchomu", "i^ushchikh", "i^ushchykh", "i^ushchui^u", "i^ushchai^a", "i^ushchoi^u", "i^ushchei^u", "i^ushchee", "i^ushchie", "i^ushchye", "i^ushchoe", "i^ushchei`", "i^ushchii`", "i^ushchyi`", "i^ushchoi`", "i^ushchem", "i^ushchim", "i^ushchym", "i^ushchom", "shchi^ui^u", "shchi^ai^a", "ivshi^ui^u", "ivshi^ai^a", "yvshi^ui^u", "yvshi^ai^a", "shchimi", "shchymi", "shchego", "shchogo", "shchemu", "shchomu", "shchikh", "shchykh", "shchui^u", "shchai^a", "shchoi^u", "shchei^u", "ivshimi", "ivshymi", "ivshego", "ivshogo", "ivshemu", "ivshomu", "ivshikh", "ivshykh", "ivshui^u", "ivshai^a", "ivshoi^u", "ivshei^u", "yvshimi", "yvshymi", "yvshego", "yvshogo", "yvshemu", "yvshomu", "yvshikh", "yvshykh", "yvshui^u", "yvshai^a", "yvshoi^u", "yvshei^u", "vshi^ui^u", "vshi^ai^a", "shchee", "shchie", "shchye", "shchoe", "shchei`", "shchii`", "shchyi`", "shchoi`", "shchem", "shchim", "shchym", "shchom", "ivshee", "ivshie", "ivshye", "ivshoe", "ivshei`", "ivshii`", "ivshyi`", "ivshoi`", "ivshem", "ivshim", "ivshym", "ivshom", "yvshee", "yvshie", "yvshye", "yvshoe", "yvshei`", "yvshii`", "yvshyi`", "yvshoi`", "yvshem", "yvshim", "yvshym", "yvshom", "vshimi", "vshymi", "vshego", "vshogo", "vshemu", "vshomu", "vshikh", "vshykh", "vshui^u", "vshai^a", "vshoi^u", "vshei^u", "emi^ui^u", "emi^ai^a", "nni^ui^u", "nni^ai^a", "vshee", "vshie", "vshye", "vshoe", "vshei`", "vshii`", "vshyi`", "vshoi`", "vshem", "vshim", "vshym", "vshom", "emimi", "emymi", "emego", "emogo", "ememu", "emomu", "emikh", "emykh", "emui^u", "emai^a", "emoi^u", "emei^u", "nnimi", "nnymi", "nnego", "nnogo", "nnemu", "nnomu", "nnikh", "nnykh", "nnui^u", "nnai^a", "nnoi^u", "nnei^u", "emee", "emie", "emye", "emoe", "emei`", "emii`", "emyi`", "emoi`", "emem", "emim", "emym", "emom", "nnee", "nnie", "nnye", "nnoe", "nnei`", "nnii`", "nnyi`", "nnoi`", "nnem", "nnim", "nnym", "nnom", "i^ui^u", "i^ai^a", "imi", "ymi", "ego", "ogo", "emu", "omu", "ikh", "ykh", "ui^u", "ai^a", "oi^u", "ei^u", "ee", "ie", "ye", "oe", "ei`", "ii`", "yi`", "oi`", "em", "im", "ym", "om", ) __reflexive_suffixes = ("si^a", "s'") __verb_suffixes = ( "esh'", "ei`te", "ui`te", "ui^ut", "ish'", "ete", "i`te", "i^ut", "nno", "ila", "yla", "ena", "ite", "ili", "yli", "ilo", "ylo", "eno", "i^at", "uet", "eny", "it'", "yt'", "ui^u", "la", "na", "li", "em", "lo", "no", "et", "ny", "t'", "ei`", "ui`", "il", "yl", "im", "ym", "en", "it", "yt", "i^u", "i`", "l", "n", ) __noun_suffixes = ( "ii^ami", "ii^akh", "i^ami", "ii^am", "i^akh", "ami", "iei`", "i^am", "iem", "akh", "ii^u", "'i^u", "ii^a", "'i^a", "ev", "ov", "ie", "'e", "ei", "ii", "ei`", "oi`", "ii`", "em", "am", "om", "i^u", "i^a", "a", "e", "i", "i`", "o", "u", "y", "'", ) __superlative_suffixes = ("ei`she", "ei`sh") __derivational_suffixes = ("ost'", "ost") def stem(self, word): if word in self.stopwords: return word chr_exceeded = False for i in range(len(word)): if ord(word[i]) > 255: chr_exceeded = True break if not chr_exceeded: return word word = self.__cyrillic_to_roman(word) step1_success = False adjectival_removed = False verb_removed = False undouble_success = False superlative_removed = False rv, r2 = self.__regions_russian(word) for suffix in self.__perfective_gerund_suffixes: if rv.endswith(suffix): if suffix in ("v", "vshi", "vshis'"): if ( rv[-len(suffix) - 3 : -len(suffix)] == "i^a" or rv[-len(suffix) - 1 : -len(suffix)] == "a" ): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] step1_success = True break else: word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] step1_success = True break if not step1_success: for suffix in self.__reflexive_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] break for suffix in self.__adjectival_suffixes: if rv.endswith(suffix): if suffix in ( "i^ushchi^ui^u", "i^ushchi^ai^a", "i^ushchui^u", "i^ushchai^a", "i^ushchoi^u", "i^ushchei^u", "i^ushchimi", "i^ushchymi", "i^ushchego", "i^ushchogo", "i^ushchemu", "i^ushchomu", "i^ushchikh", "i^ushchykh", "shchi^ui^u", "shchi^ai^a", "i^ushchee", "i^ushchie", "i^ushchye", "i^ushchoe", "i^ushchei`", "i^ushchii`", "i^ushchyi`", "i^ushchoi`", "i^ushchem", "i^ushchim", "i^ushchym", "i^ushchom", "vshi^ui^u", "vshi^ai^a", "shchui^u", "shchai^a", "shchoi^u", "shchei^u", "emi^ui^u", "emi^ai^a", "nni^ui^u", "nni^ai^a", "shchimi", "shchymi", "shchego", "shchogo", "shchemu", "shchomu", "shchikh", "shchykh", "vshui^u", "vshai^a", "vshoi^u", "vshei^u", "shchee", "shchie", "shchye", "shchoe", "shchei`", "shchii`", "shchyi`", "shchoi`", "shchem", "shchim", "shchym", "shchom", "vshimi", "vshymi", "vshego", "vshogo", "vshemu", "vshomu", "vshikh", "vshykh", "emui^u", "emai^a", "emoi^u", "emei^u", "nnui^u", "nnai^a", "nnoi^u", "nnei^u", "vshee", "vshie", "vshye", "vshoe", "vshei`", "vshii`", "vshyi`", "vshoi`", "vshem", "vshim", "vshym", "vshom", "emimi", "emymi", "emego", "emogo", "ememu", "emomu", "emikh", "emykh", "nnimi", "nnymi", "nnego", "nnogo", "nnemu", "nnomu", "nnikh", "nnykh", "emee", "emie", "emye", "emoe", "emei`", "emii`", "emyi`", "emoi`", "emem", "emim", "emym", "emom", "nnee", "nnie", "nnye", "nnoe", "nnei`", "nnii`", "nnyi`", "nnoi`", "nnem", "nnim", "nnym", "nnom", ): if ( rv[-len(suffix) - 3 : -len(suffix)] == "i^a" or rv[-len(suffix) - 1 : -len(suffix)] == "a" ): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] adjectival_removed = True break else: word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] adjectival_removed = True break if not adjectival_removed: for suffix in self.__verb_suffixes: if rv.endswith(suffix): if suffix in ( "la", "na", "ete", "i`te", "li", "i`", "l", "em", "n", "lo", "no", "et", "i^ut", "ny", "t'", "esh'", "nno", ): if ( rv[-len(suffix) - 3 : -len(suffix)] == "i^a" or rv[-len(suffix) - 1 : -len(suffix)] == "a" ): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] verb_removed = True break else: word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] verb_removed = True break if not adjectival_removed and not verb_removed: for suffix in self.__noun_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] break if rv.endswith("i"): word = word[:-1] r2 = r2[:-1] for suffix in self.__derivational_suffixes: if r2.endswith(suffix): word = word[: -len(suffix)] break if word.endswith("nn"): word = word[:-1] undouble_success = True if not undouble_success: for suffix in self.__superlative_suffixes: if word.endswith(suffix): word = word[: -len(suffix)] superlative_removed = True break if word.endswith("nn"): word = word[:-1] if not undouble_success and not superlative_removed: if word.endswith("'"): word = word[:-1] word = self.__roman_to_cyrillic(word) return word def __regions_russian(self, word): r1 = "" r2 = "" rv = "" vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y") word = word.replace("i^a", "A").replace("i^u", "U").replace("e`", "E") for i in range(1, len(word)): if word[i] not in vowels and word[i - 1] in vowels: r1 = word[i + 1 :] break for i in range(1, len(r1)): if r1[i] not in vowels and r1[i - 1] in vowels: r2 = r1[i + 1 :] break for i in range(len(word)): if word[i] in vowels: rv = word[i + 1 :] break r2 = r2.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") rv = rv.replace("A", "i^a").replace("U", "i^u").replace("E", "e`") return (rv, r2) def __cyrillic_to_roman(self, word): word = ( word.replace("\u0410", "a") .replace("\u0430", "a") .replace("\u0411", "b") .replace("\u0431", "b") .replace("\u0412", "v") .replace("\u0432", "v") .replace("\u0413", "g") .replace("\u0433", "g") .replace("\u0414", "d") .replace("\u0434", "d") .replace("\u0415", "e") .replace("\u0435", "e") .replace("\u0401", "e") .replace("\u0451", "e") .replace("\u0416", "zh") .replace("\u0436", "zh") .replace("\u0417", "z") .replace("\u0437", "z") .replace("\u0418", "i") .replace("\u0438", "i") .replace("\u0419", "i`") .replace("\u0439", "i`") .replace("\u041A", "k") .replace("\u043A", "k") .replace("\u041B", "l") .replace("\u043B", "l") .replace("\u041C", "m") .replace("\u043C", "m") .replace("\u041D", "n") .replace("\u043D", "n") .replace("\u041E", "o") .replace("\u043E", "o") .replace("\u041F", "p") .replace("\u043F", "p") .replace("\u0420", "r") .replace("\u0440", "r") .replace("\u0421", "s") .replace("\u0441", "s") .replace("\u0422", "t") .replace("\u0442", "t") .replace("\u0423", "u") .replace("\u0443", "u") .replace("\u0424", "f") .replace("\u0444", "f") .replace("\u0425", "kh") .replace("\u0445", "kh") .replace("\u0426", "t^s") .replace("\u0446", "t^s") .replace("\u0427", "ch") .replace("\u0447", "ch") .replace("\u0428", "sh") .replace("\u0448", "sh") .replace("\u0429", "shch") .replace("\u0449", "shch") .replace("\u042A", "''") .replace("\u044A", "''") .replace("\u042B", "y") .replace("\u044B", "y") .replace("\u042C", "'") .replace("\u044C", "'") .replace("\u042D", "e`") .replace("\u044D", "e`") .replace("\u042E", "i^u") .replace("\u044E", "i^u") .replace("\u042F", "i^a") .replace("\u044F", "i^a") ) return word def __roman_to_cyrillic(self, word): word = ( word.replace("i^u", "\u044E") .replace("i^a", "\u044F") .replace("shch", "\u0449") .replace("kh", "\u0445") .replace("t^s", "\u0446") .replace("ch", "\u0447") .replace("e`", "\u044D") .replace("i`", "\u0439") .replace("sh", "\u0448") .replace("k", "\u043A") .replace("e", "\u0435") .replace("zh", "\u0436") .replace("a", "\u0430") .replace("b", "\u0431") .replace("v", "\u0432") .replace("g", "\u0433") .replace("d", "\u0434") .replace("e", "\u0435") .replace("z", "\u0437") .replace("i", "\u0438") .replace("l", "\u043B") .replace("m", "\u043C") .replace("n", "\u043D") .replace("o", "\u043E") .replace("p", "\u043F") .replace("r", "\u0440") .replace("s", "\u0441") .replace("t", "\u0442") .replace("u", "\u0443") .replace("f", "\u0444") .replace("''", "\u044A") .replace("y", "\u044B") .replace("'", "\u044C") ) return word class SpanishStemmer(_StandardStemmer): __vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC" __step0_suffixes = ( "selas", "selos", "sela", "selo", "las", "les", "los", "nos", "me", "se", "la", "le", "lo", ) __step1_suffixes = ( "amientos", "imientos", "amiento", "imiento", "acion", "aciones", "uciones", "adoras", "adores", "ancias", "log\xEDas", "encias", "amente", "idades", "anzas", "ismos", "ables", "ibles", "istas", "adora", "aci\xF3n", "antes", "ancia", "log\xEDa", "uci\xf3n", "encia", "mente", "anza", "icos", "icas", "ismo", "able", "ible", "ista", "osos", "osas", "ador", "ante", "idad", "ivas", "ivos", "ico", "ica", "oso", "osa", "iva", "ivo", ) __step2a_suffixes = ( "yeron", "yendo", "yamos", "yais", "yan", "yen", "yas", "yes", "ya", "ye", "yo", "y\xF3", ) __step2b_suffixes = ( "ar\xEDamos", "er\xEDamos", "ir\xEDamos", "i\xE9ramos", "i\xE9semos", "ar\xEDais", "aremos", "er\xEDais", "eremos", "ir\xEDais", "iremos", "ierais", "ieseis", "asteis", "isteis", "\xE1bamos", "\xE1ramos", "\xE1semos", "ar\xEDan", "ar\xEDas", "ar\xE9is", "er\xEDan", "er\xEDas", "er\xE9is", "ir\xEDan", "ir\xEDas", "ir\xE9is", "ieran", "iesen", "ieron", "iendo", "ieras", "ieses", "abais", "arais", "aseis", "\xE9amos", "ar\xE1n", "ar\xE1s", "ar\xEDa", "er\xE1n", "er\xE1s", "er\xEDa", "ir\xE1n", "ir\xE1s", "ir\xEDa", "iera", "iese", "aste", "iste", "aban", "aran", "asen", "aron", "ando", "abas", "adas", "idas", "aras", "ases", "\xEDais", "ados", "idos", "amos", "imos", "emos", "ar\xE1", "ar\xE9", "er\xE1", "er\xE9", "ir\xE1", "ir\xE9", "aba", "ada", "ida", "ara", "ase", "\xEDan", "ado", "ido", "\xEDas", "\xE1is", "\xE9is", "\xEDa", "ad", "ed", "id", "an", "i\xF3", "ar", "er", "ir", "as", "\xEDs", "en", "es", ) __step3_suffixes = ("os", "a", "e", "o", "\xE1", "\xE9", "\xED", "\xF3") def stem(self, word): word = word.lower() if word in self.stopwords: return word step1_success = False r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self._rv_standard(word, self.__vowels) for suffix in self.__step0_suffixes: if not (word.endswith(suffix) and rv.endswith(suffix)): continue if ( rv[: -len(suffix)].endswith( ( "ando", "\xE1ndo", "ar", "\xE1r", "er", "\xE9r", "iendo", "i\xE9ndo", "ir", "\xEDr", ) ) ) or ( rv[: -len(suffix)].endswith("yendo") and word[: -len(suffix)].endswith("uyendo") ): word = self.__replace_accented(word[: -len(suffix)]) r1 = self.__replace_accented(r1[: -len(suffix)]) r2 = self.__replace_accented(r2[: -len(suffix)]) rv = self.__replace_accented(rv[: -len(suffix)]) break for suffix in self.__step1_suffixes: if not word.endswith(suffix): continue if suffix == "amente" and r1.endswith(suffix): step1_success = True word = word[:-6] r2 = r2[:-6] rv = rv[:-6] if r2.endswith("iv"): word = word[:-2] r2 = r2[:-2] rv = rv[:-2] if r2.endswith("at"): word = word[:-2] rv = rv[:-2] elif r2.endswith(("os", "ic", "ad")): word = word[:-2] rv = rv[:-2] elif r2.endswith(suffix): step1_success = True if suffix in ( "adora", "ador", "aci\xF3n", "adoras", "adores", "acion", "aciones", "ante", "antes", "ancia", "ancias", ): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith("ic"): word = word[:-2] rv = rv[:-2] elif suffix in ("log\xEDa", "log\xEDas"): word = suffix_replace(word, suffix, "log") rv = suffix_replace(rv, suffix, "log") elif suffix in ("uci\xF3n", "uciones"): word = suffix_replace(word, suffix, "u") rv = suffix_replace(rv, suffix, "u") elif suffix in ("encia", "encias"): word = suffix_replace(word, suffix, "ente") rv = suffix_replace(rv, suffix, "ente") elif suffix == "mente": word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith(("ante", "able", "ible")): word = word[:-4] rv = rv[:-4] elif suffix in ("idad", "idades"): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] for pre_suff in ("abil", "ic", "iv"): if r2.endswith(pre_suff): word = word[: -len(pre_suff)] rv = rv[: -len(pre_suff)] elif suffix in ("ivo", "iva", "ivos", "ivas"): word = word[: -len(suffix)] r2 = r2[: -len(suffix)] rv = rv[: -len(suffix)] if r2.endswith("at"): word = word[:-2] rv = rv[:-2] else: word = word[: -len(suffix)] rv = rv[: -len(suffix)] break if not step1_success: for suffix in self.__step2a_suffixes: if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == "u": word = word[: -len(suffix)] rv = rv[: -len(suffix)] break for suffix in self.__step2b_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] rv = rv[: -len(suffix)] if suffix in ("en", "es", "\xE9is", "emos"): if word.endswith("gu"): word = word[:-1] if rv.endswith("gu"): rv = rv[:-1] break for suffix in self.__step3_suffixes: if rv.endswith(suffix): word = word[: -len(suffix)] if suffix in ("e", "\xE9"): rv = rv[: -len(suffix)] if word[-2:] == "gu" and rv.endswith("u"): word = word[:-1] break word = self.__replace_accented(word) return word def __replace_accented(self, word): return ( word.replace("\xE1", "a") .replace("\xE9", "e") .replace("\xED", "i") .replace("\xF3", "o") .replace("\xFA", "u") ) class SwedishStemmer(_ScandinavianStemmer): __vowels = "aeiouy\xE4\xE5\xF6" __s_ending = "bcdfghjklmnoprtvy" __step1_suffixes = ( "heterna", "hetens", "heter", "heten", "anden", "arnas", "ernas", "ornas", "andes", "andet", "arens", "arna", "erna", "orna", "ande", "arne", "aste", "aren", "ades", "erns", "ade", "are", "ern", "ens", "het", "ast", "ad", "en", "ar", "er", "or", "as", "es", "at", "a", "e", "s", ) __step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt") __step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig") def stem(self, word): word = word.lower() if word in self.stopwords: return word r1 = self._r1_scandinavian(word, self.__vowels) for suffix in self.__step1_suffixes: if r1.endswith(suffix): if suffix == "s": if word[-2] in self.__s_ending: word = word[:-1] r1 = r1[:-1] else: word = word[: -len(suffix)] r1 = r1[: -len(suffix)] break for suffix in self.__step2_suffixes: if r1.endswith(suffix): word = word[:-1] r1 = r1[:-1] break for suffix in self.__step3_suffixes: if r1.endswith(suffix): if suffix in ("els", "lig", "ig"): word = word[: -len(suffix)] elif suffix in ("fullt", "l\xF6st"): word = word[:-1] break return word def demo(): from nltk.corpus import udhr udhr_corpus = { "arabic": "Arabic_Alarabia-Arabic", "danish": "Danish_Dansk-Latin1", "dutch": "Dutch_Nederlands-Latin1", "english": "English-Latin1", "finnish": "Finnish_Suomi-Latin1", "french": "French_Francais-Latin1", "german": "German_Deutsch-Latin1", "hungarian": "Hungarian_Magyar-UTF8", "italian": "Italian_Italiano-Latin1", "norwegian": "Norwegian-Latin1", "porter": "English-Latin1", "portuguese": "Portuguese_Portugues-Latin1", "romanian": "Romanian_Romana-Latin2", "russian": "Russian-UTF8", "spanish": "Spanish-Latin1", "swedish": "Swedish_Svenska-Latin1", } print("\n") print("******************************") print("Demo for the Snowball stemmers") print("******************************") while True: language = input( "Please enter the name of the language " + "to be demonstrated\n" + "/".join(SnowballStemmer.languages) + "\n" + "(enter 'exit' in order to leave): " ) if language == "exit": break if language not in SnowballStemmer.languages: print( "\nOops, there is no stemmer for this language. " + "Please try again.\n" ) continue stemmer = SnowballStemmer(language) excerpt = udhr.words(udhr_corpus[language])[:300] stemmed = " ".join(stemmer.stem(word) for word in excerpt) stemmed = re.sub(r"(.{,70})\s", r"\1\n", stemmed + " ").rstrip() excerpt = " ".join(excerpt) excerpt = re.sub(r"(.{,70})\s", r"\1\n", excerpt + " ").rstrip() print("\n") print("-" * 70) print("ORIGINAL".center(70)) print(excerpt) print("\n\n") print("STEMMED RESULTS".center(70)) print(stemmed) print("-" * 70) print("\n")
natural language toolkit stemmer utilities c 20012023 nltk project helder he7d3rgmail com url https www nltk org for license information see license txt replaces the old suffix of the original string by a new suffix replaces the old prefix of the original string by a new suffix param original string param old string param new string return string natural language toolkit stemmer utilities c 2001 2023 nltk project helder he7d3r gmail com url https www nltk org for license information see license txt replaces the old suffix of the original string by a new suffix replaces the old prefix of the original string by a new suffix param original string param old string param new string return string
def suffix_replace(original, old, new): return original[: -len(old)] + new def prefix_replace(original, old, new): return new + original[len(old) :]
natural language toolkit wordnet stemmer interface c 20012023 nltk project steven bird stevenbird1gmail com edward loper edlopergmail com url https www nltk org for license information see license txt wordnet lemmatizer lemmatize using wordnet s builtin morphy function returns the input word unchanged if it cannot be found in wordnet from nltk stem import wordnetlemmatizer wnl wordnetlemmatizer printwnl lemmatize dogs dog printwnl lemmatize churches church printwnl lemmatize aardwolves aardwolf printwnl lemmatize abaci abacus printwnl lemmatize hardrock hardrock lemmatize word using wordnet s builtin morphy function returns the input word unchanged if it cannot be found in wordnet param word the input word to lemmatize type word str param pos the part of speech tag valid options are n for nouns v for verbs a for adjectives r for adverbs and s for satellite adjectives type pos str return the lemma of word for the given pos natural language toolkit wordnet stemmer interface c 2001 2023 nltk project steven bird stevenbird1 gmail com edward loper edloper gmail com url https www nltk org for license information see license txt wordnet lemmatizer lemmatize using wordnet s built in morphy function returns the input word unchanged if it cannot be found in wordnet from nltk stem import wordnetlemmatizer wnl wordnetlemmatizer print wnl lemmatize dogs dog print wnl lemmatize churches church print wnl lemmatize aardwolves aardwolf print wnl lemmatize abaci abacus print wnl lemmatize hardrock hardrock lemmatize word using wordnet s built in morphy function returns the input word unchanged if it cannot be found in wordnet param word the input word to lemmatize type word str param pos the part of speech tag valid options are n for nouns v for verbs a for adjectives r for adverbs and s for satellite adjectives type pos str return the lemma of word for the given pos
from nltk.corpus import wordnet as wn class WordNetLemmatizer: def lemmatize(self, word: str, pos: str = "n") -> str: lemmas = wn._morphy(word, pos) return min(lemmas, key=len) if lemmas else word def __repr__(self): return "<WordNetLemmatizer>"
natural language toolkit taggers c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com minor additions url https www nltk org for license information see license txt nltk taggers this package contains classes and interfaces for partofspeech tagging or simply tagging a tag is a casesensitive string that specifies some property of a token such as its part of speech tagged tokens are encoded as tuples tag token for example the following tagged token combines the word fly with a noun part of speech tag nn taggedtok fly nn an offtheshelf tagger is available for english it uses the penn treebank tagset from nltk import postag wordtokenize postagwordtokenizejohn s big idea isn t all that bad doctest normalizewhitespace john nnp s pos big jj idea nn is vbz n t rb all pdt that dt bad jj a russian tagger is also available if you specify langrus it uses the russian national corpus tagset postagwordtokenize lang rus doctest skip s v conj adv v s nonlex this package defines several taggers which take a list of tokens assign a tag to each one and return the resulting list of tagged tokens most of the taggers are built automatically based on a training corpus for example the unigram tagger tags each word w by checking what the most frequent tag for w was in a training corpus from nltk corpus import brown from nltk tag import unigramtagger tagger unigramtaggerbrown taggedsentscategories news 500 sent mitchell decried the high rate of unemployment for word tag in tagger tagsent printword tag mitchell np decried none the at high jj rate nn of in unemployment none note that words that the tagger has not seen during training receive a tag of none we evaluate a tagger on data that was not seen during training roundtagger accuracybrown taggedsentscategories news 500 600 3 0 735 for more information please consult chapter 5 of the nltk book isort skipfile currently only supports english and russian throws error if tokens is of string type note that the new russian pos tags from the model contains suffixes see https github comnltknltkissues2151issuecomment430709018 use nltk s currently recommended part of speech tagger to tag the given list of tokens from nltk tag import postag from nltk tokenize import wordtokenize postagwordtokenizejohn s big idea isn t all that bad doctest normalizewhitespace john nnp s pos big jj idea nn is vbz n t rb all pdt that dt bad jj postagwordtokenizejohn s big idea isn t all that bad tagset universal doctest normalizewhitespace john noun s prt big adj idea noun is verb n t adv all det that det bad adj nb use postagsents for efficient tagging of more than one sentence param tokens sequence of tokens to be tagged type tokens liststr param tagset the tagset to be used e g universal wsj brown type tagset str param lang the iso 639 code of the language e g eng for english rus for russian type lang str return the tagged tokens rtype listtuplestr str use nltk s currently recommended part of speech tagger to tag the given list of sentences each consisting of a list of tokens param sentences list of sentences to be tagged type sentences listliststr param tagset the tagset to be used e g universal wsj brown type tagset str param lang the iso 639 code of the language e g eng for english rus for russian type lang str return the list of tagged sentences rtype listlisttuplestr str natural language toolkit taggers c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com minor additions url https www nltk org for license information see license txt nltk taggers this package contains classes and interfaces for part of speech tagging or simply tagging a tag is a case sensitive string that specifies some property of a token such as its part of speech tagged tokens are encoded as tuples tag token for example the following tagged token combines the word fly with a noun part of speech tag nn tagged_tok fly nn an off the shelf tagger is available for english it uses the penn treebank tagset from nltk import pos_tag word_tokenize pos_tag word_tokenize john s big idea isn t all that bad doctest normalize_whitespace john nnp s pos big jj idea nn is vbz n t rb all pdt that dt bad jj a russian tagger is also available if you specify lang rus it uses the russian national corpus tagset pos_tag word_tokenize илья оторопел и дважды перечитал бумажку lang rus doctest skip илья s оторопел v и conj дважды adv перечитал v бумажку s nonlex this package defines several taggers which take a list of tokens assign a tag to each one and return the resulting list of tagged tokens most of the taggers are built automatically based on a training corpus for example the unigram tagger tags each word w by checking what the most frequent tag for w was in a training corpus from nltk corpus import brown from nltk tag import unigramtagger tagger unigramtagger brown tagged_sents categories news 500 sent mitchell decried the high rate of unemployment for word tag in tagger tag sent print word tag mitchell np decried none the at high jj rate nn of in unemployment none note that words that the tagger has not seen during training receive a tag of none we evaluate a tagger on data that was not seen during training round tagger accuracy brown tagged_sents categories news 500 600 3 0 735 for more information please consult chapter 5 of the nltk book isort skip_file currently only supports english and russian throws error if tokens is of string type maps to the specified tagset note that the new russian pos tags from the model contains suffixes see https github com nltk nltk issues 2151 issuecomment 430709018 use nltk s currently recommended part of speech tagger to tag the given list of tokens from nltk tag import pos_tag from nltk tokenize import word_tokenize pos_tag word_tokenize john s big idea isn t all that bad doctest normalize_whitespace john nnp s pos big jj idea nn is vbz n t rb all pdt that dt bad jj pos_tag word_tokenize john s big idea isn t all that bad tagset universal doctest normalize_whitespace john noun s prt big adj idea noun is verb n t adv all det that det bad adj nb use pos_tag_sents for efficient tagging of more than one sentence param tokens sequence of tokens to be tagged type tokens list str param tagset the tagset to be used e g universal wsj brown type tagset str param lang the iso 639 code of the language e g eng for english rus for russian type lang str return the tagged tokens rtype list tuple str str use nltk s currently recommended part of speech tagger to tag the given list of sentences each consisting of a list of tokens param sentences list of sentences to be tagged type sentences list list str param tagset the tagset to be used e g universal wsj brown type tagset str param lang the iso 639 code of the language e g eng for english rus for russian type lang str return the list of tagged sentences rtype list list tuple str str
from nltk.tag.api import TaggerI from nltk.tag.util import str2tuple, tuple2str, untag from nltk.tag.sequential import ( SequentialBackoffTagger, ContextTagger, DefaultTagger, NgramTagger, UnigramTagger, BigramTagger, TrigramTagger, AffixTagger, RegexpTagger, ClassifierBasedTagger, ClassifierBasedPOSTagger, ) from nltk.tag.brill import BrillTagger from nltk.tag.brill_trainer import BrillTaggerTrainer from nltk.tag.tnt import TnT from nltk.tag.hunpos import HunposTagger from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger from nltk.tag.mapping import tagset_mapping, map_tag from nltk.tag.crf import CRFTagger from nltk.tag.perceptron import PerceptronTagger from nltk.data import load, find RUS_PICKLE = ( "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle" ) def _get_tagger(lang=None): if lang == "rus": tagger = PerceptronTagger(False) ap_russian_model_loc = "file:" + str(find(RUS_PICKLE)) tagger.load(ap_russian_model_loc) else: tagger = PerceptronTagger() return tagger def _pos_tag(tokens, tagset=None, tagger=None, lang=None): if lang not in ["eng", "rus"]: raise NotImplementedError( "Currently, NLTK pos_tag only supports English and Russian " "(i.e. lang='eng' or lang='rus')" ) elif isinstance(tokens, str): raise TypeError("tokens: expected a list of strings, got a string") else: tagged_tokens = tagger.tag(tokens) if tagset: if lang == "eng": tagged_tokens = [ (token, map_tag("en-ptb", tagset, tag)) for (token, tag) in tagged_tokens ] elif lang == "rus": tagged_tokens = [ (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0])) for (token, tag) in tagged_tokens ] return tagged_tokens def pos_tag(tokens, tagset=None, lang="eng"): tagger = _get_tagger(lang) return _pos_tag(tokens, tagset, tagger, lang) def pos_tag_sents(sentences, tagset=None, lang="eng"): tagger = _get_tagger(lang) return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences]
natural language toolkit interface to the crfsuite tagger c 20012023 nltk project long duong longdt219gmail com url https www nltk org for license information see license txt a module for pos tagging using crfsuite a module for pos tagging using crfsuite https pypi python orgpypipythoncrfsuite from nltk tag import crftagger ct crftagger doctest skip traindata university noun is verb a det good adj place noun dog noun eat verb meat noun ct traintraindata model crf tagger doctest skip ct tagsents dog is good cat eat meat doctest skip dog noun is verb good adj cat noun eat verb meat noun goldsentences dog noun is verb good adj cat noun eat verb meat noun ct accuracygoldsentences doctest skip 1 0 setting learned model file ct crftagger doctest skip ct setmodelfile model crf tagger doctest skip ct accuracygoldsentences doctest skip 1 0 initialize the crfsuite tagger param featurefunc the function that extracts features for each token of a sentence this function should take 2 parameters tokens and index which extract features at index position from tokens list see the build in getfeatures function for more detail param verbose output the debugging messages during training type verbose boolean param trainingopt pythoncrfsuite training options type trainingopt dictionary set of possible training options using lbfgs training algorithm feature minfreq the minimum frequency of features feature possiblestates force to generate possible state features feature possibletransitions force to generate possible transition features c1 coefficient for l1 regularization c2 coefficient for l2 regularization maxiterations the maximum number of iterations for lbfgs optimization nummemories the number of limited memories for approximating the inverse hessian matrix epsilon epsilon for testing the convergence of the objective period the duration of iterations to test the stopping criterion delta the threshold for the stopping criterion an lbfgs iteration stops when the improvement of the log likelihood over the last period iterations is no greater than this threshold linesearch the line search algorithm used in lbfgs updates morethuente more and thuente s method backtracking backtracking method with regular wolfe condition strongbacktracking backtracking method with strong wolfe condition maxlinesearch the maximum number of trials for the line search algorithm extract basic features about this word including current word is it capitalized does it have punctuation does it have a number suffixes up to length 3 note that we might include feature over previous word next word etc return a list which contains the features rtype liststr capitalization number punctuation suffix up to length 3 tag a list of sentences nb before using this function user should specify the modefile either by train a new model using train function use the pretrained model which is set via setmodelfile function params sentences list of sentences needed to tag type sentences listliststr return list of tagged sentences rtype listlisttuplestr str we need the list of sentences instead of the list generator for matching the input and output train the crf tagger using crfsuite params traindata is the list of annotated sentences type traindata list listtuplestr str params modelfile the model will be saved to this file now train the model the output should be modelfile save the model file tag a sentence using python crfsuite tagger nb before using this function user should specify the modefile either by train a new model using train function use the pretrained model which is set via setmodelfile function params tokens list of tokens needed to tag type tokens liststr return list of tagged tokens rtype listtuplestr str natural language toolkit interface to the crfsuite tagger c 2001 2023 nltk project long duong longdt219 gmail com url https www nltk org for license information see license txt a module for pos tagging using crfsuite a module for pos tagging using crfsuite https pypi python org pypi python crfsuite from nltk tag import crftagger ct crftagger doctest skip train_data university noun is verb a det good adj place noun dog noun eat verb meat noun ct train train_data model crf tagger doctest skip ct tag_sents dog is good cat eat meat doctest skip dog noun is verb good adj cat noun eat verb meat noun gold_sentences dog noun is verb good adj cat noun eat verb meat noun ct accuracy gold_sentences doctest skip 1 0 setting learned model file ct crftagger doctest skip ct set_model_file model crf tagger doctest skip ct accuracy gold_sentences doctest skip 1 0 initialize the crfsuite tagger param feature_func the function that extracts features for each token of a sentence this function should take 2 parameters tokens and index which extract features at index position from tokens list see the build in _get_features function for more detail param verbose output the debugging messages during training type verbose boolean param training_opt python crfsuite training options type training_opt dictionary set of possible training options using lbfgs training algorithm feature minfreq the minimum frequency of features feature possible_states force to generate possible state features feature possible_transitions force to generate possible transition features c1 coefficient for l1 regularization c2 coefficient for l2 regularization max_iterations the maximum number of iterations for l bfgs optimization num_memories the number of limited memories for approximating the inverse hessian matrix epsilon epsilon for testing the convergence of the objective period the duration of iterations to test the stopping criterion delta the threshold for the stopping criterion an l bfgs iteration stops when the improvement of the log likelihood over the last period iterations is no greater than this threshold linesearch the line search algorithm used in l bfgs updates morethuente more and thuente s method backtracking backtracking method with regular wolfe condition strongbacktracking backtracking method with strong wolfe condition max_linesearch the maximum number of trials for the line search algorithm extract basic features about this word including current word is it capitalized does it have punctuation does it have a number suffixes up to length 3 note that we might include feature over previous word next word etc return a list which contains the features rtype list str capitalization number punctuation suffix up to length 3 tag a list of sentences nb before using this function user should specify the mode_file either by train a new model using train function use the pre trained model which is set via set_model_file function params sentences list of sentences needed to tag type sentences list list str return list of tagged sentences rtype list list tuple str str we need the list of sentences instead of the list generator for matching the input and output train the crf tagger using crfsuite params train_data is the list of annotated sentences type train_data list list tuple str str params model_file the model will be saved to this file now train the model the output should be model_file save the model file tag a sentence using python crfsuite tagger nb before using this function user should specify the mode_file either by train a new model using train function use the pre trained model which is set via set_model_file function params tokens list of tokens needed to tag type tokens list str return list of tagged tokens rtype list tuple str str
import re import unicodedata from nltk.tag.api import TaggerI try: import pycrfsuite except ImportError: pass class CRFTagger(TaggerI): def __init__(self, feature_func=None, verbose=False, training_opt={}): self._model_file = "" self._tagger = pycrfsuite.Tagger() if feature_func is None: self._feature_func = self._get_features else: self._feature_func = feature_func self._verbose = verbose self._training_options = training_opt self._pattern = re.compile(r"\d") def set_model_file(self, model_file): self._model_file = model_file self._tagger.open(self._model_file) def _get_features(self, tokens, idx): token = tokens[idx] feature_list = [] if not token: return feature_list if token[0].isupper(): feature_list.append("CAPITALIZATION") if re.search(self._pattern, token) is not None: feature_list.append("HAS_NUM") punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} if all(unicodedata.category(x) in punc_cat for x in token): feature_list.append("PUNCTUATION") if len(token) > 1: feature_list.append("SUF_" + token[-1:]) if len(token) > 2: feature_list.append("SUF_" + token[-2:]) if len(token) > 3: feature_list.append("SUF_" + token[-3:]) feature_list.append("WORD_" + token) return feature_list def tag_sents(self, sents): if self._model_file == "": raise Exception( " No model file is found !! Please use train or set_model_file function" ) result = [] for tokens in sents: features = [self._feature_func(tokens, i) for i in range(len(tokens))] labels = self._tagger.tag(features) if len(labels) != len(tokens): raise Exception(" Predicted Length Not Matched, Expect Errors !") tagged_sent = list(zip(tokens, labels)) result.append(tagged_sent) return result def train(self, train_data, model_file): trainer = pycrfsuite.Trainer(verbose=self._verbose) trainer.set_params(self._training_options) for sent in train_data: tokens, labels = zip(*sent) features = [self._feature_func(tokens, i) for i in range(len(tokens))] trainer.append(features, labels) trainer.train(model_file) self.set_model_file(model_file) def tag(self, tokens): return self.tag_sents([tokens])[0]
natural language toolkit hidden markov model c 20012023 nltk project trevor cohn tacohncsse unimelb edu au philip blunsom pcblcsse unimelb edu au tiago tresoldi tiagotresoldi pro br fixes steven bird stevenbird1gmail com fixes joseph frazee jfrazeemail utexas edu fixes steven xu xxustudent unimelb edu au fixes url https www nltk org for license information see license txt hidden markov models hmms largely used to assign the correct label sequence to sequential data or assess the probability of a given label and data sequence these models are finite state machines characterised by a number of states transitions between these states and output symbols emitted while in each state the hmm is an extension to the markov chain where each state corresponds deterministically to a given event in the hmm the observation is a probabilistic function of the state hmms share the markov chain s assumption being that the probability of transition from one state to another only depends on the current state i e the series of states that led to the current state are not used they are also time invariant the hmm is a directed graph with probability weighted edges representing the probability of a transition between the source and sink states where each vertex emits an output symbol when entered the symbol or observation is nondeterministically generated for this reason knowing that a sequence of output observations was generated by a given hmm does not mean that the corresponding sequence of states and what the current state is is known this is the hidden in the hidden markov model formally a hmm can be characterised by the output observation alphabet this is the set of symbols which may be observed as output of the system the set of states the transition probabilities aij pst j st1 i these represent the probability of transition to each state from a given state the output probability matrix bik pxt ok st i these represent the probability of observing each symbol in a given state the initial state distribution this gives the probability of starting in each state to ground this discussion take a common nlp application partofspeech pos tagging an hmm is desirable for this task as the highest probability tag sequence can be calculated for a given sequence of word forms this differs from other tagging techniques which often tag each word individually seeking to optimise each individual tagging greedily without regard to the optimal combination of tags for a larger unit such as a sentence the hmm does this with the viterbi algorithm which efficiently computes the optimal path through the graph given the sequence of words forms in pos tagging the states usually have a 1 1 correspondence with the tag alphabet i e each state represents a single tag the output observation alphabet is the set of word forms the lexicon and the remaining three parameters are derived by a training regime with this information the probability of a given sentence can be easily derived by simply summing the probability of each distinct path through the model similarly the highest probability tagging sequence can be derived with the viterbi algorithm yielding a state sequence which can be mapped into a tag sequence this discussion assumes that the hmm has been trained this is probably the most difficult task with the model and requires either mle estimates of the parameters or unsupervised learning using the baumwelch algorithm a variant of em for more information please consult the source code for this module which includes extensive demonstration code hidden markov model class a generative model for labelling sequence data these models define the joint probability of a sequence of symbols and their labels state transitions as the product of the starting state probability the probability of each state transition and the probability of each observation being generated from each state this is described in more detail in the module documentation this implementation is based on the hmm description in chapter 8 huang acero and hon spoken language processing and includes an extension for training shallow hmm parsers or specialized hmms as in molina et al 2002 a specialized hmm modifies training data by applying a specialization function to create a new training set that is more appropriate for sequential tagging with an hmm a typical use case is chunking param symbols the set of output symbols alphabet type symbols seq of any param states a set of states representing state space type states seq of any param transitions transition probabilities prsi sj is the probability of transition from state i given the model is in statej type transitions conditionalprobdisti param outputs output probabilities prok si is the probability of emitting symbol k when entering state i type outputs conditionalprobdisti param priors initial state distribution prsi is the probability of starting in state i type priors probdisti param transform an optional function for transforming training instances defaults to the identity function type transform callable train a new hiddenmarkovmodeltagger using the given labeled and unlabeled training instances testing will be performed if test instances are provided return a hidden markov model tagger rtype hiddenmarkovmodeltagger param labeledsequence a sequence of labeled training instances i e a list of sentences represented as tuples type labeledsequence listlist param testsequence a sequence of labeled test instances type testsequence listlist param unlabeledsequence a sequence of unlabeled training instances i e a list of sentences represented as words type unlabeledsequence listlist param transform an optional function for transforming training instances defaults to the identity function see transform type transform function param estimator an optional function or class that maps a condition s frequency distribution to its probability distribution defaults to a lidstone distribution with gamma 0 1 type estimator class or function param verbose boolean flag indicating whether training should be verbose or include printed output type verbose bool param maxiterations number of baumwelch iterations to perform type maxiterations int returns the probability of the given symbol sequence if the sequence is labelled then returns the joint probability of the symbol state sequence otherwise uses the forward algorithm to find the probability over all label sequences return the probability of the sequence rtype float param sequence the sequence of symbols which must contain the text property and optionally the tag property type sequence token returns the logprobability of the given symbol sequence if the sequence is labelled then returns the joint logprobability of the symbol state sequence otherwise uses the forward algorithm to find the logprobability over all label sequences return the logprobability of the sequence rtype float param sequence the sequence of symbols which must contain the text property and optionally the tag property type sequence token tags the sequence with the highest probability state sequence this uses the bestpath method to find the viterbi path return a labelled sequence of symbols rtype list param unlabeledsequence the sequence of unlabeled symbols type unlabeledsequence list return the log probability of the symbol being observed in the given state rtype float the cache is a tuple p o x s where s maps symbols to integers i e it is the inverse mapping from self symbols for each symbol s in self symbols the following is true self symbolsss s o is the log output probabilities oi k log ptokentsymktagtstatei x is the log transition probabilities xi j log ptagtstatejtagt1statei p is the log prior probabilities pi log ptag0statei add new symbols to the symbol table and repopulate the output probabilities and symbol table mapping don t bother with the work if there aren t any new symbols add new columns to the output probability table without destroying the old probabilities only calculate probabilities for new symbols only create symbol mappings for new symbols returns the state sequence of the optimal most probable path through the hmm uses the viterbi algorithm to calculate this part by dynamic programming return the state sequence rtype sequence of any param unlabeledsequence the sequence of unlabeled symbols type unlabeledsequence list returns the state sequence of the optimal most probable path through the hmm uses the viterbi algorithm to calculate this part by dynamic programming this uses a simple direct method and is included for teaching purposes return the state sequence rtype sequence of any param unlabeledsequence the sequence of unlabeled symbols type unlabeledsequence list find the starting log probabilities for each state find the maximum log probabilities for reaching each state at time t find the highest probability final state traverse the backpointers b to find the state sequence randomly sample the hmm to generate a sentence of a given length this samples the prior distribution then the observation distribution and transition distribution for each subsequent observation and state this will mostly generate unintelligible garbage but can provide some amusement return the randomly created stateobservation sequence generated according to the hmm s probability distributions the subtokens have text and tag properties containing the observation and state respectively rtype list param rng random number generator type rng random or any object with a random method param length desired output length type length int sample the starting state and symbol prob dists sample the state transition and symbol prob dists returns the entropy over labellings of the given sequence this is given by ho sums prs o log prs o where the summation ranges over all state sequences s let z pro sums prs o where the summation ranges over all state sequences and o is the observation sequence as such the entropy can be reexpressed as h sums prs o log prs o z log z sums prs o log prs 0 log z sums prs o log prs0 sumt prst st1 sumt prot st the order of summation for the log terms can be flipped allowing dynamic programming to be used to calculate the entropy specifically we use the forward and backward probabilities alpha beta giving h log z sums0 alpha0s0 beta0s0 z log prs0 sumt si sj alphatsi prsj si prot1 sj betatsj z log prsj si sumt st alphatst betatst z log prot st this simply uses alpha and beta to find the probabilities of partial sequences constrained to include the given states at some point in time starting state t 0 print ps0 s state p state transitions print psd s sd s t0 s0 t1 s1 p symbol emissions print psd s t state p returns the pointwise entropy over the possible states at each position in the chain given the observation sequence return a matrix of transition log probabilities transiter self transitionssj logprobsi for sj in self states for si in self states transitionslogprob np fromitertransiter dtypenp float64 n lenself states return transitionslogprob reshapen n t def outputsvectorself symbol outiter self outputlogprobsj symbol for sj in self states return np fromiteroutiter dtypenp float64 def forwardprobabilityself unlabeledsequence t lenunlabeledsequence n lenself states alpha ninfarrayt n transitionslogprob self transitionsmatrix initialization symbol unlabeledsequence0text for i state in enumerateself states alpha0 i self priors logprobstate self outputlogprob state symbol induction for t in range1 t symbol unlabeledsequencettext outputlogprob self outputsvectorsymbol for i in rangen summand alphat 1 transitionslogprobi alphat i logsumexp2summand outputlogprobi return alpha def backwardprobabilityself unlabeledsequence t lenunlabeledsequence n lenself states beta ninfarrayt n transitionslogprob self transitionsmatrix t initialise the backward values 1 is an arbitrarily chosen value from rabiner tutorial betat 1 np log21 inductively calculate remaining backward values for t in ranget 2 1 1 symbol unlabeledsequencet 1text outputs self outputsvectorsymbol for i in rangen summand transitionslogprobi betat 1 outputs betat i logsumexp2summand return beta def testself testsequence verbosefalse kwargs def wordssent return word for word tag in sent def tagssent return tag for word tag in sent def flattenseq return listitertools chainseq testsequence self transformtestsequence predictedsequence listmapself tag mapwords testsequence if verbose for testsent predictedsent in ziptestsequence predictedsequence print test joinftokentag for token tag in testsent print printuntagged joins token for token tag in testsent print print hmmtagged joinftokentag for token tag in predictedsent print print entropy self entropytoken none for token tag in predictedsent print print 60 testtags flattenmaptags testsequence predictedtags flattenmaptags predictedsequence acc accuracytesttags predictedtags count sumlensent for sent in testsequence printaccuracy over d tokens 2f count acc 100 def reprself return hiddenmarkovmodeltagger d states and d output symbols lenself states lenself symbols class hiddenmarkovmodeltrainer def initself statesnone symbolsnone self states states if states else self symbols symbols if symbols else def trainself labeledsequencesnone unlabeledsequencesnone kwargs assert labeledsequences or unlabeledsequences model none if labeledsequences model self trainsupervisedlabeledsequences kwargs if unlabeledsequences if model kwargsmodel model model self trainunsupervisedunlabeledsequences kwargs return model def baumwelchstepself sequence model symboltonumber n lenmodel states m lenmodel symbols t lensequence compute forward and backward probabilities alpha model forwardprobabilitysequence beta model backwardprobabilitysequence find the log probability of the sequence lpk logsumexp2alphat 1 anumer ninfarrayn n bnumer ninfarrayn m adenom ninfarrayn bdenom ninfarrayn transitionslogprob model transitionsmatrix t for t in ranget symbol sequencettext not found fixme nextsymbol none if t t 1 nextsymbol sequencet 1text not found fixme xi symboltonumbersymbol nextoutputslogprob model outputsvectornextsymbol alphaplusbeta alphat betat if t t 1 numeradd transitionslogprob nextoutputslogprob betat 1 alphat reshapen 1 anumer np logaddexp2anumer numeradd adenom np logaddexp2adenom alphaplusbeta else bdenom np logaddexp2adenom alphaplusbeta bnumer xi np logaddexp2bnumer xi alphaplusbeta return lpk anumer adenom bnumer bdenom def trainunsupervisedself unlabeledsequences updateoutputstrue kwargs create a uniform hmm which will be iteratively refined unless given an existing model model kwargs getmodel if not model priors randomprobdistself states transitions dictionaryconditionalprobdist state randomprobdistself states for state in self states outputs dictionaryconditionalprobdist state randomprobdistself symbols for state in self states model hiddenmarkovmodeltagger self symbols self states transitions outputs priors self states model states self symbols model symbols n lenself states m lenself symbols symbolnumbers sym i for i sym in enumerateself symbols update model prob dists so that they can be modified model priors mutableprobdistmodel priors self states model transitions dictionaryconditionalprobdist s mutableprobdistmodel transitionss self states for s in self states if updateoutputs model outputs dictionaryconditionalprobdist s mutableprobdistmodel outputss self symbols for s in self states model resetcache iterate until convergence converged false lastlogprob none iteration 0 maxiterations kwargs getmaxiterations 1000 epsilon kwargs getconvergencelogprob 1e6 while not converged and iteration maxiterations anumer ninfarrayn n bnumer ninfarrayn m adenom ninfarrayn bdenom ninfarrayn logprob 0 for sequence in unlabeledsequences sequence listsequence if not sequence continue lpk seqanumer seqadenom seqbnumer seqbdenom self baumwelchstepsequence model symbolnumbers add these sums to the global a and b values for i in rangen anumeri np logaddexp2anumeri seqanumeri lpk bnumeri np logaddexp2bnumeri seqbnumeri lpk adenom np logaddexp2adenom seqadenom lpk bdenom np logaddexp2bdenom seqbdenom lpk logprob lpk use the calculated values to update the transition and output probability values for i in rangen logprobai anumeri adenomi logprobbi bnumeri bdenomi we should normalize all probabilities see p 391 huang et al let sump be k we can divide each pi by k to make sump 1 pi pik log2pi log2pi log2k logprobai logsumexp2logprobai logprobbi logsumexp2logprobbi update output and transition probabilities si self statesi for j in rangen sj self statesj model transitionssi updatesj logprobaij if updateoutputs for k in rangem ok self symbolsk model outputssi updateok logprobbik rabiner says the priors don t need to be updated i don t believe him fixme test for convergence if iteration 0 and abslogprob lastlogprob epsilon converged true printiteration iteration logprob logprob iteration 1 lastlogprob logprob return model def trainsupervisedself labelledsequences estimatornone default to the mle estimate if estimator is none estimator lambda fdist bins mleprobdistfdist count occurrences of starting states transitions out of each state and output symbols observed in each state knownsymbols setself symbols knownstates setself states starting freqdist transitions conditionalfreqdist outputs conditionalfreqdist for sequence in labelledsequences lasts none for token in sequence state tokentag symbol tokentext if lasts is none startingstate 1 else transitionslastsstate 1 outputsstatesymbol 1 lasts state update the state and symbol lists if state not in knownstates self states appendstate knownstates addstate if symbol not in knownsymbols self symbols appendsymbol knownsymbols addsymbol create probability distributions with smoothing n lenself states pi estimatorstarting n a conditionalprobdisttransitions estimator n b conditionalprobdistoutputs estimator lenself symbols return hiddenmarkovmodeltaggerself symbols self states a b pi def ninfarrayshape res np emptyshape np float64 res fillnp inf return res def logsumexp2arr max arr max return np log2np sum2 arr max max def logaddvalues x maxvalues if x np inf sumdiffs 0 for value in values sumdiffs 2 value x return x np log2sumdiffs else return x def createhmmtaggerstates symbols a b pi def pdvalues samples d dictzipsamples values return dictionaryprobdistd def cpdarray conditions samples d for values condition in ziparray conditions dcondition pdvalues samples return dictionaryconditionalprobdistd a cpda states states b cpdb states symbols pi pdpi states return hiddenmarkovmodeltagger symbolssymbols statesstates transitionsa outputsb priorspi def markethmmexample states bull bear static symbols up down unchanged a np array0 6 0 2 0 2 0 5 0 3 0 2 0 4 0 1 0 5 np float64 b np array0 7 0 1 0 2 0 1 0 6 0 3 0 3 0 3 0 4 np float64 pi np array0 5 0 2 0 3 np float64 model createhmmtaggerstates symbols a b pi return model states symbols def demo demonstrates hmm probability calculation print printhmm probability calculation demo print model states symbols markethmmexample printtesting model for test in up up up down up down 5 unchanged 5 up sequence t none for t in test printtesting with state sequence test printprobability model probabilitysequence printtagging model tagword for word tag in sequence printptagged model probabilitysequence printh model entropysequence printhexh model exhaustiveentropysequence printhpoint model pointentropysequence printhexhpoint model exhaustivepointentropysequence print def loadposnumsents from nltk corpus import brown sentences brown taggedsentscategoriesnews numsents tagre re compiler tagset set symbols set cleanedsentences for sentence in sentences for i in rangelensentence word tag sentencei word word lower normalize symbols addword log this word clean up the tag tag tagre matchtag group tagset addtag sentencei word tag store cleanedup tagged token cleanedsentences sentence return cleanedsentences listtagset listsymbols def demopos demonstrates pos tagging using supervised training print printhmm pos tagging demo print printtraining hmm labelledsequences tagset symbols loadpos20000 trainer hiddenmarkovmodeltrainertagset symbols hmm trainer trainsupervised labelledsequences10 estimatorlambda fd bins lidstoneprobdistfd 0 1 bins printtesting hmm testlabelledsequences 10 verbosetrue def untagsentences unlabeled for sentence in sentences unlabeled appendtokentext none for token in sentence return unlabeled def demoposbw test10 supervised20 unsupervised10 verbosetrue maxiterations5 demonstrates the baumwelch algorithm in pos tagging print printbaumwelch demo for pos tagging print printtraining hmm supervised d sentences supervised sentences tagset symbols loadpostest supervised unsupervised symbols set for sentence in sentences for token in sentence symbols addtokentext trainer hiddenmarkovmodeltrainertagset listsymbols hmm trainer trainsupervised sentencestest test supervised estimatorlambda fd bins lidstoneprobdistfd 0 1 bins hmm testsentences test verboseverbose printtraining unsupervised d sentences unsupervised it s rather slow so only use 10 samples by default unlabeled untagsentencestest supervised hmm trainer trainunsupervised unlabeled modelhmm maxiterationsmaxiterations hmm testsentences test verboseverbose def demobw demo baum welch by generating some sequences and then performing unsupervised training on them print printbaumwelch demo for market example print model states symbols markethmmexample generate some random sequences training import random rng random random rng seed0 for i in range10 item model randomsamplerng 5 training appendi0 none for i in item train on those examples starting with the model that generated them trainer hiddenmarkovmodeltrainerstates symbols hmm trainer trainunsupervisedtraining modelmodel maxiterations1000 natural language toolkit hidden markov model c 2001 2023 nltk project trevor cohn tacohn csse unimelb edu au philip blunsom pcbl csse unimelb edu au tiago tresoldi tiago tresoldi pro br fixes steven bird stevenbird1 gmail com fixes joseph frazee jfrazee mail utexas edu fixes steven xu xxu student unimelb edu au fixes url https www nltk org for license information see license txt hidden markov models hmms largely used to assign the correct label sequence to sequential data or assess the probability of a given label and data sequence these models are finite state machines characterised by a number of states transitions between these states and output symbols emitted while in each state the hmm is an extension to the markov chain where each state corresponds deterministically to a given event in the hmm the observation is a probabilistic function of the state hmms share the markov chain s assumption being that the probability of transition from one state to another only depends on the current state i e the series of states that led to the current state are not used they are also time invariant the hmm is a directed graph with probability weighted edges representing the probability of a transition between the source and sink states where each vertex emits an output symbol when entered the symbol or observation is non deterministically generated for this reason knowing that a sequence of output observations was generated by a given hmm does not mean that the corresponding sequence of states and what the current state is is known this is the hidden in the hidden markov model formally a hmm can be characterised by the output observation alphabet this is the set of symbols which may be observed as output of the system the set of states the transition probabilities a_ ij p s_t j s_ t 1 i these represent the probability of transition to each state from a given state the output probability matrix b_i k p x_t o_k s_t i these represent the probability of observing each symbol in a given state the initial state distribution this gives the probability of starting in each state to ground this discussion take a common nlp application part of speech pos tagging an hmm is desirable for this task as the highest probability tag sequence can be calculated for a given sequence of word forms this differs from other tagging techniques which often tag each word individually seeking to optimise each individual tagging greedily without regard to the optimal combination of tags for a larger unit such as a sentence the hmm does this with the viterbi algorithm which efficiently computes the optimal path through the graph given the sequence of words forms in pos tagging the states usually have a 1 1 correspondence with the tag alphabet i e each state represents a single tag the output observation alphabet is the set of word forms the lexicon and the remaining three parameters are derived by a training regime with this information the probability of a given sentence can be easily derived by simply summing the probability of each distinct path through the model similarly the highest probability tagging sequence can be derived with the viterbi algorithm yielding a state sequence which can be mapped into a tag sequence this discussion assumes that the hmm has been trained this is probably the most difficult task with the model and requires either mle estimates of the parameters or unsupervised learning using the baum welch algorithm a variant of em for more information please consult the source code for this module which includes extensive demonstration code index of text in a tuple index of tag in a tuple hidden markov model class a generative model for labelling sequence data these models define the joint probability of a sequence of symbols and their labels state transitions as the product of the starting state probability the probability of each state transition and the probability of each observation being generated from each state this is described in more detail in the module documentation this implementation is based on the hmm description in chapter 8 huang acero and hon spoken language processing and includes an extension for training shallow hmm parsers or specialized hmms as in molina et al 2002 a specialized hmm modifies training data by applying a specialization function to create a new training set that is more appropriate for sequential tagging with an hmm a typical use case is chunking param symbols the set of output symbols alphabet type symbols seq of any param states a set of states representing state space type states seq of any param transitions transition probabilities pr s_i s_j is the probability of transition from state i given the model is in state_j type transitions conditionalprobdisti param outputs output probabilities pr o_k s_i is the probability of emitting symbol k when entering state i type outputs conditionalprobdisti param priors initial state distribution pr s_i is the probability of starting in state i type priors probdisti param transform an optional function for transforming training instances defaults to the identity function type transform callable train a new hiddenmarkovmodeltagger using the given labeled and unlabeled training instances testing will be performed if test instances are provided return a hidden markov model tagger rtype hiddenmarkovmodeltagger param labeled_sequence a sequence of labeled training instances i e a list of sentences represented as tuples type labeled_sequence list list param test_sequence a sequence of labeled test instances type test_sequence list list param unlabeled_sequence a sequence of unlabeled training instances i e a list of sentences represented as words type unlabeled_sequence list list param transform an optional function for transforming training instances defaults to the identity function see transform type transform function param estimator an optional function or class that maps a condition s frequency distribution to its probability distribution defaults to a lidstone distribution with gamma 0 1 type estimator class or function param verbose boolean flag indicating whether training should be verbose or include printed output type verbose bool param max_iterations number of baum welch iterations to perform type max_iterations int returns the probability of the given symbol sequence if the sequence is labelled then returns the joint probability of the symbol state sequence otherwise uses the forward algorithm to find the probability over all label sequences return the probability of the sequence rtype float param sequence the sequence of symbols which must contain the text property and optionally the tag property type sequence token returns the log probability of the given symbol sequence if the sequence is labelled then returns the joint log probability of the symbol state sequence otherwise uses the forward algorithm to find the log probability over all label sequences return the log probability of the sequence rtype float param sequence the sequence of symbols which must contain the text property and optionally the tag property type sequence token tags the sequence with the highest probability state sequence this uses the best_path method to find the viterbi path return a labelled sequence of symbols rtype list param unlabeled_sequence the sequence of unlabeled symbols type unlabeled_sequence list return the log probability of the symbol being observed in the given state rtype float the cache is a tuple p o x s where s maps symbols to integers i e it is the inverse mapping from self _symbols for each symbol s in self _symbols the following is true self _symbols s s s o is the log output probabilities o i k log p token t sym k tag t state i x is the log transition probabilities x i j log p tag t state j tag t 1 state i p is the log prior probabilities p i log p tag 0 state i add new symbols to the symbol table and repopulate the output probabilities and symbol table mapping don t bother with the work if there aren t any new symbols add new columns to the output probability table without destroying the old probabilities only calculate probabilities for new symbols only create symbol mappings for new symbols returns the state sequence of the optimal most probable path through the hmm uses the viterbi algorithm to calculate this part by dynamic programming return the state sequence rtype sequence of any param unlabeled_sequence the sequence of unlabeled symbols type unlabeled_sequence list returns the state sequence of the optimal most probable path through the hmm uses the viterbi algorithm to calculate this part by dynamic programming this uses a simple direct method and is included for teaching purposes return the state sequence rtype sequence of any param unlabeled_sequence the sequence of unlabeled symbols type unlabeled_sequence list find the starting log probabilities for each state find the maximum log probabilities for reaching each state at time t find the highest probability final state traverse the back pointers b to find the state sequence randomly sample the hmm to generate a sentence of a given length this samples the prior distribution then the observation distribution and transition distribution for each subsequent observation and state this will mostly generate unintelligible garbage but can provide some amusement return the randomly created state observation sequence generated according to the hmm s probability distributions the subtokens have text and tag properties containing the observation and state respectively rtype list param rng random number generator type rng random or any object with a random method param length desired output length type length int sample the starting state and symbol prob dists sample the state transition and symbol prob dists returns the entropy over labellings of the given sequence this is given by h o sum_s pr s o log pr s o where the summation ranges over all state sequences s let z pr o sum_s pr s o where the summation ranges over all state sequences and o is the observation sequence as such the entropy can be re expressed as h sum_s pr s o log pr s o z log z sum_s pr s o log pr s 0 log z sum_s pr s o log pr s_0 sum_t pr s_t s_ t 1 sum_t pr o_t s_t the order of summation for the log terms can be flipped allowing dynamic programming to be used to calculate the entropy specifically we use the forward and backward probabilities alpha beta giving h log z sum_s0 alpha_0 s0 beta_0 s0 z log pr s0 sum_t si sj alpha_t si pr sj si pr o_t 1 sj beta_t sj z log pr sj si sum_t st alpha_t st beta_t st z log pr o_t st this simply uses alpha and beta to find the probabilities of partial sequences constrained to include the given state s at some point in time starting state t 0 print p s_0 s state p state transitions print p s_ d s s_ d s t0 s0 t1 s1 p symbol emissions print p s_ d s t state p returns the pointwise entropy over the possible states at each position in the chain given the observation sequence return a matrix of transition log probabilities return a vector with log probabilities of emitting a symbol when entering states return the forward probability matrix a t by n array of log probabilities where t is the length of the sequence and n is the number of states each entry t s gives the probability of being in state s at time t after observing the partial symbol sequence up to and including t param unlabeled_sequence the sequence of unlabeled symbols type unlabeled_sequence list return the forward log probability matrix rtype array initialization induction return the backward probability matrix a t by n array of log probabilities where t is the length of the sequence and n is the number of states each entry t s gives the probability of being in state s at time t after observing the partial symbol sequence from t t return the backward log probability matrix rtype array param unlabeled_sequence the sequence of unlabeled symbols type unlabeled_sequence list initialise the backward values 1 is an arbitrarily chosen value from rabiner tutorial inductively calculate remaining backward values tests the hiddenmarkovmodeltagger instance param test_sequence a sequence of labeled test instances type test_sequence list list param verbose boolean flag indicating whether training should be verbose or include printed output type verbose bool algorithms for learning hmm parameters from training data these include both supervised learning mle and unsupervised learning baum welch creates an hmm trainer to induce an hmm with the given states and output symbol alphabet a supervised and unsupervised training method may be used if either of the states or symbols are not given these may be derived from supervised training param states the set of state labels type states sequence of any param symbols the set of observation symbols type symbols sequence of any trains the hmm using both or either of supervised and unsupervised techniques return the trained model rtype hiddenmarkovmodeltagger param labelled_sequences the supervised training data a set of labelled sequences of observations ex word_1 tag_1 word_n tag_n type labelled_sequences list param unlabeled_sequences the unsupervised training data a set of sequences of observations ex word_1 word_n type unlabeled_sequences list param kwargs additional arguments to pass to the training methods compute forward and backward probabilities find the log probability of the sequence not found fixme not found fixme trains the hmm using the baum welch algorithm to maximise the probability of the data sequence this is a variant of the em algorithm and is unsupervised in that it doesn t need the state sequences for the symbols the code is based on a tutorial on hidden markov models and selected applications in speech recognition lawrence rabiner ieee 1989 return the trained model rtype hiddenmarkovmodeltagger param unlabeled_sequences the training data a set of sequences of observations type unlabeled_sequences list kwargs may include following parameters param model a hiddenmarkovmodeltagger instance used to begin the baum welch algorithm param max_iterations the maximum number of em iterations param convergence_logprob the maximum change in log probability to allow convergence create a uniform hmm which will be iteratively refined unless given an existing model update model prob dists so that they can be modified model _priors mutableprobdist model _priors self _states iterate until convergence add these sums to the global a and b values use the calculated values to update the transition and output probability values we should normalize all probabilities see p 391 huang et al let sum p be k we can divide each pi by k to make sum p 1 pi pi k log2 pi log2 pi log2 k update output and transition probabilities rabiner says the priors don t need to be updated i don t believe him fixme test for convergence supervised training maximising the joint probability of the symbol and state sequences this is done via collecting frequencies of transitions between states symbol observations while within each state and which states start a sentence these frequency distributions are then normalised into probability estimates which can be smoothed if desired return the trained model rtype hiddenmarkovmodeltagger param labelled_sequences the training data a set of labelled sequences of observations type labelled_sequences list param estimator a function taking a freqdist and a number of bins and returning a cprobdisti otherwise a mle estimate is used default to the mle estimate count occurrences of starting states transitions out of each state and output symbols observed in each state update the state and symbol lists create probability distributions with smoothing adds the logged values returning the logarithm of the addition return an example hmm described at page 381 huang et al demonstrates hmm probability calculation normalize log this word clean up the tag store cleaned up tagged token demonstrates pos tagging using supervised training demonstrates the baum welch algorithm in pos tagging it s rather slow so only use 10 samples by default demo baum welch by generating some sequences and then performing unsupervised training on them generate some random sequences train on those examples starting with the model that generated them
import itertools import re try: import numpy as np except ImportError: pass from nltk.metrics import accuracy from nltk.probability import ( ConditionalFreqDist, ConditionalProbDist, DictionaryConditionalProbDist, DictionaryProbDist, FreqDist, LidstoneProbDist, MLEProbDist, MutableProbDist, RandomProbDist, ) from nltk.tag.api import TaggerI from nltk.util import LazyMap, unique_list _TEXT = 0 _TAG = 1 def _identity(labeled_symbols): return labeled_symbols class HiddenMarkovModelTagger(TaggerI): def __init__( self, symbols, states, transitions, outputs, priors, transform=_identity ): self._symbols = unique_list(symbols) self._states = unique_list(states) self._transitions = transitions self._outputs = outputs self._priors = priors self._cache = None self._transform = transform @classmethod def _train( cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, transform=_identity, estimator=None, **kwargs, ): if estimator is None: def estimator(fd, bins): return LidstoneProbDist(fd, 0.1, bins) labeled_sequence = LazyMap(transform, labeled_sequence) symbols = unique_list(word for sent in labeled_sequence for word, tag in sent) tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent) trainer = HiddenMarkovModelTrainer(tag_set, symbols) hmm = trainer.train_supervised(labeled_sequence, estimator=estimator) hmm = cls( hmm._symbols, hmm._states, hmm._transitions, hmm._outputs, hmm._priors, transform=transform, ) if test_sequence: hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) if unlabeled_sequence: max_iterations = kwargs.get("max_iterations", 5) hmm = trainer.train_unsupervised( unlabeled_sequence, model=hmm, max_iterations=max_iterations ) if test_sequence: hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) return hmm @classmethod def train( cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs ): return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs) def probability(self, sequence): return 2 ** (self.log_probability(self._transform(sequence))) def log_probability(self, sequence): sequence = self._transform(sequence) T = len(sequence) if T > 0 and sequence[0][_TAG]: last_state = sequence[0][_TAG] p = self._priors.logprob(last_state) + self._output_logprob( last_state, sequence[0][_TEXT] ) for t in range(1, T): state = sequence[t][_TAG] p += self._transitions[last_state].logprob( state ) + self._output_logprob(state, sequence[t][_TEXT]) last_state = state return p else: alpha = self._forward_probability(sequence) p = logsumexp2(alpha[T - 1]) return p def tag(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) return self._tag(unlabeled_sequence) def _tag(self, unlabeled_sequence): path = self._best_path(unlabeled_sequence) return list(zip(unlabeled_sequence, path)) def _output_logprob(self, state, symbol): return self._outputs[state].logprob(symbol) def _create_cache(self): if not self._cache: N = len(self._states) M = len(self._symbols) P = np.zeros(N, np.float32) X = np.zeros((N, N), np.float32) O = np.zeros((N, M), np.float32) for i in range(N): si = self._states[i] P[i] = self._priors.logprob(si) for j in range(N): X[i, j] = self._transitions[si].logprob(self._states[j]) for k in range(M): O[i, k] = self._output_logprob(si, self._symbols[k]) S = {} for k in range(M): S[self._symbols[k]] = k self._cache = (P, O, X, S) def _update_cache(self, symbols): if symbols: self._create_cache() P, O, X, S = self._cache for symbol in symbols: if symbol not in self._symbols: self._cache = None self._symbols.append(symbol) if not self._cache: N = len(self._states) M = len(self._symbols) Q = O.shape[1] O = np.hstack([O, np.zeros((N, M - Q), np.float32)]) for i in range(N): si = self._states[i] for k in range(Q, M): O[i, k] = self._output_logprob(si, self._symbols[k]) for k in range(Q, M): S[self._symbols[k]] = k self._cache = (P, O, X, S) def reset_cache(self): self._cache = None def best_path(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) return self._best_path(unlabeled_sequence) def _best_path(self, unlabeled_sequence): T = len(unlabeled_sequence) N = len(self._states) self._create_cache() self._update_cache(unlabeled_sequence) P, O, X, S = self._cache V = np.zeros((T, N), np.float32) B = -np.ones((T, N), int) V[0] = P + O[:, S[unlabeled_sequence[0]]] for t in range(1, T): for j in range(N): vs = V[t - 1, :] + X[:, j] best = np.argmax(vs) V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]] B[t, j] = best current = np.argmax(V[T - 1, :]) sequence = [current] for t in range(T - 1, 0, -1): last = B[t, current] sequence.append(last) current = last sequence.reverse() return list(map(self._states.__getitem__, sequence)) def best_path_simple(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) return self._best_path_simple(unlabeled_sequence) def _best_path_simple(self, unlabeled_sequence): T = len(unlabeled_sequence) N = len(self._states) V = np.zeros((T, N), np.float64) B = {} symbol = unlabeled_sequence[0] for i, state in enumerate(self._states): V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol) B[0, state] = None for t in range(1, T): symbol = unlabeled_sequence[t] for j in range(N): sj = self._states[j] best = None for i in range(N): si = self._states[i] va = V[t - 1, i] + self._transitions[si].logprob(sj) if not best or va > best[0]: best = (va, si) V[t, j] = best[0] + self._output_logprob(sj, symbol) B[t, sj] = best[1] best = None for i in range(N): val = V[T - 1, i] if not best or val > best[0]: best = (val, self._states[i]) current = best[1] sequence = [current] for t in range(T - 1, 0, -1): last = B[t, current] sequence.append(last) current = last sequence.reverse() return sequence def random_sample(self, rng, length): tokens = [] state = self._sample_probdist(self._priors, rng.random(), self._states) symbol = self._sample_probdist( self._outputs[state], rng.random(), self._symbols ) tokens.append((symbol, state)) for i in range(1, length): state = self._sample_probdist( self._transitions[state], rng.random(), self._states ) symbol = self._sample_probdist( self._outputs[state], rng.random(), self._symbols ) tokens.append((symbol, state)) return tokens def _sample_probdist(self, probdist, p, samples): cum_p = 0 for sample in samples: add_p = probdist.prob(sample) if cum_p <= p <= cum_p + add_p: return sample cum_p += add_p raise Exception("Invalid probability distribution - " "does not sum to one") def entropy(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) T = len(unlabeled_sequence) N = len(self._states) alpha = self._forward_probability(unlabeled_sequence) beta = self._backward_probability(unlabeled_sequence) normalisation = logsumexp2(alpha[T - 1]) entropy = normalisation for i, state in enumerate(self._states): p = 2 ** (alpha[0, i] + beta[0, i] - normalisation) entropy -= p * self._priors.logprob(state) for t0 in range(T - 1): t1 = t0 + 1 for i0, s0 in enumerate(self._states): for i1, s1 in enumerate(self._states): p = 2 ** ( alpha[t0, i0] + self._transitions[s0].logprob(s1) + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT]) + beta[t1, i1] - normalisation ) entropy -= p * self._transitions[s0].logprob(s1) for t in range(T): for i, state in enumerate(self._states): p = 2 ** (alpha[t, i] + beta[t, i] - normalisation) entropy -= p * self._outputs[state].logprob( unlabeled_sequence[t][_TEXT] ) return entropy def point_entropy(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) T = len(unlabeled_sequence) N = len(self._states) alpha = self._forward_probability(unlabeled_sequence) beta = self._backward_probability(unlabeled_sequence) normalisation = logsumexp2(alpha[T - 1]) entropies = np.zeros(T, np.float64) probs = np.zeros(N, np.float64) for t in range(T): for s in range(N): probs[s] = alpha[t, s] + beta[t, s] - normalisation for s in range(N): entropies[t] -= 2 ** (probs[s]) * probs[s] return entropies def _exhaustive_entropy(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) T = len(unlabeled_sequence) N = len(self._states) labellings = [[state] for state in self._states] for t in range(T - 1): current = labellings labellings = [] for labelling in current: for state in self._states: labellings.append(labelling + [state]) log_probs = [] for labelling in labellings: labeled_sequence = unlabeled_sequence[:] for t, label in enumerate(labelling): labeled_sequence[t] = (labeled_sequence[t][_TEXT], label) lp = self.log_probability(labeled_sequence) log_probs.append(lp) normalisation = _log_add(*log_probs) entropy = 0 for lp in log_probs: lp -= normalisation entropy -= 2 ** (lp) * lp return entropy def _exhaustive_point_entropy(self, unlabeled_sequence): unlabeled_sequence = self._transform(unlabeled_sequence) T = len(unlabeled_sequence) N = len(self._states) labellings = [[state] for state in self._states] for t in range(T - 1): current = labellings labellings = [] for labelling in current: for state in self._states: labellings.append(labelling + [state]) log_probs = [] for labelling in labellings: labelled_sequence = unlabeled_sequence[:] for t, label in enumerate(labelling): labelled_sequence[t] = (labelled_sequence[t][_TEXT], label) lp = self.log_probability(labelled_sequence) log_probs.append(lp) normalisation = _log_add(*log_probs) probabilities = _ninf_array((T, N)) for labelling, lp in zip(labellings, log_probs): lp -= normalisation for t, label in enumerate(labelling): index = self._states.index(label) probabilities[t, index] = _log_add(probabilities[t, index], lp) entropies = np.zeros(T, np.float64) for t in range(T): for s in range(N): entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s] return entropies def _transitions_matrix(self): trans_iter = ( self._transitions[sj].logprob(si) for sj in self._states for si in self._states ) transitions_logprob = np.fromiter(trans_iter, dtype=np.float64) N = len(self._states) return transitions_logprob.reshape((N, N)).T def _outputs_vector(self, symbol): out_iter = (self._output_logprob(sj, symbol) for sj in self._states) return np.fromiter(out_iter, dtype=np.float64) def _forward_probability(self, unlabeled_sequence): T = len(unlabeled_sequence) N = len(self._states) alpha = _ninf_array((T, N)) transitions_logprob = self._transitions_matrix() symbol = unlabeled_sequence[0][_TEXT] for i, state in enumerate(self._states): alpha[0, i] = self._priors.logprob(state) + self._output_logprob( state, symbol ) for t in range(1, T): symbol = unlabeled_sequence[t][_TEXT] output_logprob = self._outputs_vector(symbol) for i in range(N): summand = alpha[t - 1] + transitions_logprob[i] alpha[t, i] = logsumexp2(summand) + output_logprob[i] return alpha def _backward_probability(self, unlabeled_sequence): T = len(unlabeled_sequence) N = len(self._states) beta = _ninf_array((T, N)) transitions_logprob = self._transitions_matrix().T beta[T - 1, :] = np.log2(1) for t in range(T - 2, -1, -1): symbol = unlabeled_sequence[t + 1][_TEXT] outputs = self._outputs_vector(symbol) for i in range(N): summand = transitions_logprob[i] + beta[t + 1] + outputs beta[t, i] = logsumexp2(summand) return beta def test(self, test_sequence, verbose=False, **kwargs): def words(sent): return [word for (word, tag) in sent] def tags(sent): return [tag for (word, tag) in sent] def flatten(seq): return list(itertools.chain(*seq)) test_sequence = self._transform(test_sequence) predicted_sequence = list(map(self._tag, map(words, test_sequence))) if verbose: for test_sent, predicted_sent in zip(test_sequence, predicted_sequence): print( "Test:", " ".join(f"{token}/{tag}" for (token, tag) in test_sent), ) print() print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent)) print() print( "HMM-tagged:", " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent), ) print() print( "Entropy:", self.entropy([(token, None) for (token, tag) in predicted_sent]), ) print() print("-" * 60) test_tags = flatten(map(tags, test_sequence)) predicted_tags = flatten(map(tags, predicted_sequence)) acc = accuracy(test_tags, predicted_tags) count = sum(len(sent) for sent in test_sequence) print("accuracy over %d tokens: %.2f" % (count, acc * 100)) def __repr__(self): return "<HiddenMarkovModelTagger %d states and %d output symbols>" % ( len(self._states), len(self._symbols), ) class HiddenMarkovModelTrainer: def __init__(self, states=None, symbols=None): self._states = states if states else [] self._symbols = symbols if symbols else [] def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs): assert labeled_sequences or unlabeled_sequences model = None if labeled_sequences: model = self.train_supervised(labeled_sequences, **kwargs) if unlabeled_sequences: if model: kwargs["model"] = model model = self.train_unsupervised(unlabeled_sequences, **kwargs) return model def _baum_welch_step(self, sequence, model, symbol_to_number): N = len(model._states) M = len(model._symbols) T = len(sequence) alpha = model._forward_probability(sequence) beta = model._backward_probability(sequence) lpk = logsumexp2(alpha[T - 1]) A_numer = _ninf_array((N, N)) B_numer = _ninf_array((N, M)) A_denom = _ninf_array(N) B_denom = _ninf_array(N) transitions_logprob = model._transitions_matrix().T for t in range(T): symbol = sequence[t][_TEXT] next_symbol = None if t < T - 1: next_symbol = sequence[t + 1][_TEXT] xi = symbol_to_number[symbol] next_outputs_logprob = model._outputs_vector(next_symbol) alpha_plus_beta = alpha[t] + beta[t] if t < T - 1: numer_add = ( transitions_logprob + next_outputs_logprob + beta[t + 1] + alpha[t].reshape(N, 1) ) A_numer = np.logaddexp2(A_numer, numer_add) A_denom = np.logaddexp2(A_denom, alpha_plus_beta) else: B_denom = np.logaddexp2(A_denom, alpha_plus_beta) B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta) return lpk, A_numer, A_denom, B_numer, B_denom def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs): model = kwargs.get("model") if not model: priors = RandomProbDist(self._states) transitions = DictionaryConditionalProbDist( {state: RandomProbDist(self._states) for state in self._states} ) outputs = DictionaryConditionalProbDist( {state: RandomProbDist(self._symbols) for state in self._states} ) model = HiddenMarkovModelTagger( self._symbols, self._states, transitions, outputs, priors ) self._states = model._states self._symbols = model._symbols N = len(self._states) M = len(self._symbols) symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)} model._transitions = DictionaryConditionalProbDist( { s: MutableProbDist(model._transitions[s], self._states) for s in self._states } ) if update_outputs: model._outputs = DictionaryConditionalProbDist( { s: MutableProbDist(model._outputs[s], self._symbols) for s in self._states } ) model.reset_cache() converged = False last_logprob = None iteration = 0 max_iterations = kwargs.get("max_iterations", 1000) epsilon = kwargs.get("convergence_logprob", 1e-6) while not converged and iteration < max_iterations: A_numer = _ninf_array((N, N)) B_numer = _ninf_array((N, M)) A_denom = _ninf_array(N) B_denom = _ninf_array(N) logprob = 0 for sequence in unlabeled_sequences: sequence = list(sequence) if not sequence: continue ( lpk, seq_A_numer, seq_A_denom, seq_B_numer, seq_B_denom, ) = self._baum_welch_step(sequence, model, symbol_numbers) for i in range(N): A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk) B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk) A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk) B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk) logprob += lpk for i in range(N): logprob_Ai = A_numer[i] - A_denom[i] logprob_Bi = B_numer[i] - B_denom[i] logprob_Ai -= logsumexp2(logprob_Ai) logprob_Bi -= logsumexp2(logprob_Bi) si = self._states[i] for j in range(N): sj = self._states[j] model._transitions[si].update(sj, logprob_Ai[j]) if update_outputs: for k in range(M): ok = self._symbols[k] model._outputs[si].update(ok, logprob_Bi[k]) if iteration > 0 and abs(logprob - last_logprob) < epsilon: converged = True print("iteration", iteration, "logprob", logprob) iteration += 1 last_logprob = logprob return model def train_supervised(self, labelled_sequences, estimator=None): if estimator is None: estimator = lambda fdist, bins: MLEProbDist(fdist) known_symbols = set(self._symbols) known_states = set(self._states) starting = FreqDist() transitions = ConditionalFreqDist() outputs = ConditionalFreqDist() for sequence in labelled_sequences: lasts = None for token in sequence: state = token[_TAG] symbol = token[_TEXT] if lasts is None: starting[state] += 1 else: transitions[lasts][state] += 1 outputs[state][symbol] += 1 lasts = state if state not in known_states: self._states.append(state) known_states.add(state) if symbol not in known_symbols: self._symbols.append(symbol) known_symbols.add(symbol) N = len(self._states) pi = estimator(starting, N) A = ConditionalProbDist(transitions, estimator, N) B = ConditionalProbDist(outputs, estimator, len(self._symbols)) return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi) def _ninf_array(shape): res = np.empty(shape, np.float64) res.fill(-np.inf) return res def logsumexp2(arr): max_ = arr.max() return np.log2(np.sum(2 ** (arr - max_))) + max_ def _log_add(*values): x = max(values) if x > -np.inf: sum_diffs = 0 for value in values: sum_diffs += 2 ** (value - x) return x + np.log2(sum_diffs) else: return x def _create_hmm_tagger(states, symbols, A, B, pi): def pd(values, samples): d = dict(zip(samples, values)) return DictionaryProbDist(d) def cpd(array, conditions, samples): d = {} for values, condition in zip(array, conditions): d[condition] = pd(values, samples) return DictionaryConditionalProbDist(d) A = cpd(A, states, states) B = cpd(B, states, symbols) pi = pd(pi, states) return HiddenMarkovModelTagger( symbols=symbols, states=states, transitions=A, outputs=B, priors=pi ) def _market_hmm_example(): states = ["bull", "bear", "static"] symbols = ["up", "down", "unchanged"] A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64) B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64) pi = np.array([0.5, 0.2, 0.3], np.float64) model = _create_hmm_tagger(states, symbols, A, B, pi) return model, states, symbols def demo(): print() print("HMM probability calculation demo") print() model, states, symbols = _market_hmm_example() print("Testing", model) for test in [ ["up", "up"], ["up", "down", "up"], ["down"] * 5, ["unchanged"] * 5 + ["up"], ]: sequence = [(t, None) for t in test] print("Testing with state sequence", test) print("probability =", model.probability(sequence)) print("tagging = ", model.tag([word for (word, tag) in sequence])) print("p(tagged) = ", model.probability(sequence)) print("H = ", model.entropy(sequence)) print("H_exh = ", model._exhaustive_entropy(sequence)) print("H(point) = ", model.point_entropy(sequence)) print("H_exh(point)=", model._exhaustive_point_entropy(sequence)) print() def load_pos(num_sents): from nltk.corpus import brown sentences = brown.tagged_sents(categories="news")[:num_sents] tag_re = re.compile(r"[*]|--|[^+*-]+") tag_set = set() symbols = set() cleaned_sentences = [] for sentence in sentences: for i in range(len(sentence)): word, tag = sentence[i] word = word.lower() symbols.add(word) tag = tag_re.match(tag).group() tag_set.add(tag) sentence[i] = (word, tag) cleaned_sentences += [sentence] return cleaned_sentences, list(tag_set), list(symbols) def demo_pos(): print() print("HMM POS tagging demo") print() print("Training HMM...") labelled_sequences, tag_set, symbols = load_pos(20000) trainer = HiddenMarkovModelTrainer(tag_set, symbols) hmm = trainer.train_supervised( labelled_sequences[10:], estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), ) print("Testing...") hmm.test(labelled_sequences[:10], verbose=True) def _untag(sentences): unlabeled = [] for sentence in sentences: unlabeled.append([(token[_TEXT], None) for token in sentence]) return unlabeled def demo_pos_bw( test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5 ): print() print("Baum-Welch demo for POS tagging") print() print("Training HMM (supervised, %d sentences)..." % supervised) sentences, tag_set, symbols = load_pos(test + supervised + unsupervised) symbols = set() for sentence in sentences: for token in sentence: symbols.add(token[_TEXT]) trainer = HiddenMarkovModelTrainer(tag_set, list(symbols)) hmm = trainer.train_supervised( sentences[test : test + supervised], estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), ) hmm.test(sentences[:test], verbose=verbose) print("Training (unsupervised, %d sentences)..." % unsupervised) unlabeled = _untag(sentences[test + supervised :]) hmm = trainer.train_unsupervised( unlabeled, model=hmm, max_iterations=max_iterations ) hmm.test(sentences[:test], verbose=verbose) def demo_bw(): print() print("Baum-Welch demo for market example") print() model, states, symbols = _market_hmm_example() training = [] import random rng = random.Random() rng.seed(0) for i in range(10): item = model.random_sample(rng, 5) training.append([(i[0], None) for i in item]) trainer = HiddenMarkovModelTrainer(states, symbols) hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000)
natural language toolkit interface to the hunpos postagger c 20012023 nltk project peter ljunglf peter ljunglofheatherleaf se dvid mrk nemeskey nemeskeydgmail com modifications attila zsder zsedergmail com modifications url https www nltk org for license information see license txt a module for interfacing with the hunpos opensource postagger the default encoding used by hunpos iso88591 class hunpostaggertaggeri def init self pathtomodel pathtobinnone encodinghunposcharset verbosefalse self closed true hunpospaths usrbin usrlocalbin optlocalbin applicationsbin bin applicationsbin hunpospaths listmapos path expanduser hunpospaths self hunposbin findbinary hunpostag pathtobin envvarshunpostagger searchpathhunpospaths urlhunposurl verboseverbose self hunposmodel findfile pathtomodel envvarshunpostagger verboseverbose self encoding encoding self hunpos popen self hunposbin self hunposmodel shellfalse stdinpipe stdoutpipe stderrpipe self closed false def delself self close def closeself tags a single sentence a list of words the tokens should not contain any newline characters we write a final empty line to tell hunpos that the sentence is finished we have to read and dismiss the final empty line natural language toolkit interface to the hunpos pos tagger c 2001 2023 nltk project peter ljunglöf peter ljunglof heatherleaf se dávid márk nemeskey nemeskeyd gmail com modifications attila zséder zseder gmail com modifications url https www nltk org for license information see license txt a module for interfacing with the hunpos open source pos tagger the default encoding used by hunpos iso 8859 1 a class for pos tagging with hunpos the input is the paths to a model trained on training data optionally the path to the hunpos tag binary optionally the encoding of the training data default iso 8859 1 check whether the required hunpos tag binary is available from nltk test setup_fixt import check_binary check_binary hunpos tag example from nltk tag import hunpostagger ht hunpostagger en_wsj model ht tag what is the airspeed of an unladen swallow split what wp is vbz the dt airspeed nn of in an dt unladen nn swallow vb ht close this class communicates with the hunpos tag binary via pipes when the tagger object is no longer needed the close method should be called to free system resources the class supports the context manager interface if used in a with statement the close method is invoked automatically with hunpostagger en_wsj model as ht ht tag what is the airspeed of an unladen swallow split what wp is vbz the dt airspeed nn of in an dt unladen nn swallow vb starts the hunpos tag executable and establishes a connection with it param path_to_model the model file param path_to_bin the hunpos tag binary param encoding the encoding used by the model unicode tokens passed to the tag and tag_sents methods are converted to this charset when they are sent to hunpos tag the default is iso 8859 1 latin 1 this parameter is ignored for str tokens which are sent as is the caller must ensure that tokens are encoded in the right charset closes the pipe to the hunpos executable tags a single sentence a list of words the tokens should not contain any newline characters we write a final empty line to tell hunpos that the sentence is finished we have to read and dismiss the final empty line
import os from subprocess import PIPE, Popen from nltk.internals import find_binary, find_file from nltk.tag.api import TaggerI _hunpos_url = "https://code.google.com/p/hunpos/" _hunpos_charset = "ISO-8859-1" class HunposTagger(TaggerI): def __init__( self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False ): self._closed = True hunpos_paths = [ ".", "/usr/bin", "/usr/local/bin", "/opt/local/bin", "/Applications/bin", "~/bin", "~/Applications/bin", ] hunpos_paths = list(map(os.path.expanduser, hunpos_paths)) self._hunpos_bin = find_binary( "hunpos-tag", path_to_bin, env_vars=("HUNPOS_TAGGER",), searchpath=hunpos_paths, url=_hunpos_url, verbose=verbose, ) self._hunpos_model = find_file( path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose ) self._encoding = encoding self._hunpos = Popen( [self._hunpos_bin, self._hunpos_model], shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, ) self._closed = False def __del__(self): self.close() def close(self): if not self._closed: self._hunpos.communicate() self._closed = True def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def tag(self, tokens): for token in tokens: assert "\n" not in token, "Tokens should not contain newlines" if isinstance(token, str): token = token.encode(self._encoding) self._hunpos.stdin.write(token + b"\n") self._hunpos.stdin.write(b"\n") self._hunpos.stdin.flush() tagged_tokens = [] for token in tokens: tagged = self._hunpos.stdout.readline().strip().split(b"\t") tag = tagged[1] if len(tagged) > 1 else None tagged_tokens.append((token, tag)) self._hunpos.stdout.readline() return tagged_tokens
natural language toolkit tagset mapping c 20012023 nltk project nathan schneider nathancmu edu steven bird stevenbird1gmail com url https www nltk org for license information see license txt interface for converting pos tags from various treebanks to the universal tagset of petrov das mcdonald the tagset consists of the following 12 coarse tags verb verbs all tenses and modes noun nouns common and proper pron pronouns adj adjectives adv adverbs adp adpositions prepositions and postpositions conj conjunctions det determiners num cardinal numbers prt particles or other function words x other foreign words typos abbreviations punctuation see https arxiv orgabs1104 2086 and https code google compuniversalpostags mappings defaultdictlambda defaultdictdict the mapping between tagset t1 and t2 returns unk if applied to an unrecognized tag when mapping to the universal tagset map unknown inputs to x not unk retrieve the mapping dictionary between tagsets tagsetmapping rurnc universal a adj c conj ad adv nn noun vg verb comp conj nc num vp verb p adp ij x v verb z x vi verb yesnosent x ptcl prt true added the new russian national corpus mappings because the russian model for nltk postag uses it maps the tag from the source tagset to the target tagset maptag enptb universal vbz verb maptag enptb universal vbp verb maptag enptb universal we need a systematic approach to naming natural language toolkit tagset mapping c 2001 2023 nltk project nathan schneider nathan cmu edu steven bird stevenbird1 gmail com url https www nltk org for license information see license txt interface for converting pos tags from various treebanks to the universal tagset of petrov das mcdonald the tagset consists of the following 12 coarse tags verb verbs all tenses and modes noun nouns common and proper pron pronouns adj adjectives adv adverbs adp adpositions prepositions and postpositions conj conjunctions det determiners num cardinal numbers prt particles or other function words x other foreign words typos abbreviations punctuation see https arxiv org abs 1104 2086 and https code google com p universal pos tags _mappings defaultdict lambda defaultdict dict the mapping between tagset t1 and t2 returns unk if applied to an unrecognized tag when mapping to the universal tagset map unknown inputs to x not unk retrieve the mapping dictionary between tagsets tagset_mapping ru rnc universal a adj c conj ad adv nn noun vg verb comp conj nc num vp verb p adp ij x v verb z x vi verb yes_no_sent x ptcl prt true added the new russian national corpus mappings because the russian model for nltk pos_tag uses it maps the tag from the source tagset to the target tagset map_tag en ptb universal vbz verb map_tag en ptb universal vbp verb map_tag en ptb universal we need a systematic approach to naming
from collections import defaultdict from os.path import join from nltk.data import load _UNIVERSAL_DATA = "taggers/universal_tagset" _UNIVERSAL_TAGS = ( "VERB", "NOUN", "PRON", "ADJ", "ADV", "ADP", "CONJ", "DET", "NUM", "PRT", "X", ".", ) _MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK"))) def _load_universal_map(fileid): contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text") _MAPPINGS[fileid]["universal"].default_factory = lambda: "X" for line in contents.splitlines(): line = line.strip() if line == "": continue fine, coarse = line.split("\t") assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}" assert ( fine not in _MAPPINGS[fileid]["universal"] ), f"Multiple entries for original tag: {fine}" _MAPPINGS[fileid]["universal"][fine] = coarse def tagset_mapping(source, target): if source not in _MAPPINGS or target not in _MAPPINGS[source]: if target == "universal": _load_universal_map(source) _MAPPINGS["ru-rnc-new"]["universal"] = { "A": "ADJ", "A-PRO": "PRON", "ADV": "ADV", "ADV-PRO": "PRON", "ANUM": "ADJ", "CONJ": "CONJ", "INTJ": "X", "NONLEX": ".", "NUM": "NUM", "PARENTH": "PRT", "PART": "PRT", "PR": "ADP", "PRAEDIC": "PRT", "PRAEDIC-PRO": "PRON", "S": "NOUN", "S-PRO": "PRON", "V": "VERB", } return _MAPPINGS[source][target] def map_tag(source, target, source_tag): if target == "universal": if source == "wsj": source = "en-ptb" if source == "brown": source = "en-brown" return tagset_mapping(source, target)[source_tag]
this module is a port of the textblob averaged perceptron tagger matthew honnibal honnibalghgmail com long duong longdt219gmail com nltk port url https github comsloriatextblobaptagger https www nltk org 2013 matthew honnibal nltk modifications 2015 the nltk project this module is provided under the terms of the mit license an averaged perceptron as implemented by matthew honnibal see more implementation details here https explosion aiblogpartofspeechpostaggerinpython each feature gets its own weight vector so weights is a dictofdicts the accumulated values for the averaging these will be keyed by featureclas tuples the last time the feature was changed for the averaging also keyed by featureclas tuples tstamps is short for timestamps number of instances seen dotproduct the features and current weights and return the best label scores defaultdictfloat for feat value in features items if feat not in self weights or value 0 continue weights self weightsfeat for label weight in weights items scoreslabel value weight do a secondary alphabetic sort for stability bestlabel maxself classes keylambda label scoreslabel label compute the confidence conf maxself softmaxscores if returnconf true else none return bestlabel conf def updateself truth guess features average weights from all iterations for feat weights in self weights items newfeatweights for clas weight in weights items param feat clas total self totalsparam total self i self tstampsparam weight averaged roundtotal self i 3 if averaged newfeatweightsclas averaged self weightsfeat newfeatweights def saveself path load the pickled model weights self weights loadpath def encodejsonobjself return self weights classmethod def decodejsonobjcls obj return clsobj jsontags registertag class perceptrontaggertaggeri jsontag nltk tag sequential perceptrontagger start start start2 end end end2 def initself loadtrue self model averagedperceptron self tagdict self classes set if load apmodelloc file str findtaggersaveragedperceptrontagger pickle self loadapmodelloc def tagself tokens returnconffalse usetagdicttrue prev prev2 self start output context self start self normalizew for w in tokens self end for i word in enumeratetokens tag conf self tagdict getword 1 0 if usetagdict true else none none if not tag features self getfeaturesi word context prev prev2 tag conf self model predictfeatures returnconf output appendword tag conf if returnconf true else word tag prev2 prev prev tag return output def trainself sentences savelocnone nriter5 we d like to allow sentences to be either a list or an iterator the latter being especially important for a large training dataset because self maketagdictsentences runs regardless we make it populate self sentences a list with all the sentences this saves the overheard of just iterating through sentences to get the list by sentences listsentences self sentences list to be populated by self maketagdict self maketagdictsentences self model classes self classes for iter in rangenriter c 0 n 0 for sentence in self sentences words tags zipsentence prev prev2 self start context self start self normalizew for w in words self end for i word in enumeratewords guess self tagdict getword if not guess feats self getfeaturesi word context prev prev2 guess self model predictfeats self model updatetagsi guess feats prev2 prev prev guess c guess tagsi n 1 random shuffleself sentences logging infofiter iter cnpcc n we don t need the training sentences anymore and we don t want to waste space on them when we pickle the trained tagger self sentences none self model averageweights pickle as a binary file if saveloc is not none with opensaveloc wb as fout changed protocol from 1 to 2 to make pickling python 2 compatible pickle dumpself model weights self tagdict self classes fout 2 def loadself loc self model weights self tagdict self classes loadloc self model classes self classes def encodejsonobjself return self model weights self tagdict listself classes classmethod def decodejsonobjcls obj tagger clsloadfalse tagger model weights tagger tagdict tagger classes obj tagger classes settagger classes tagger model classes tagger classes return tagger def normalizeself word if in word and word0 return hyphen if word isdigit and lenword 4 return year if word and word0 isdigit return digits return word lower def getfeaturesself i word context prev prev2 def addname args features joinname tupleargs 1 i lenself start features defaultdictint it s useful to have a constant feature which acts sort of like a prior addbias addi suffix word3 addi pref1 word0 if word else addi1 tag prev addi2 tag prev2 addi tagi2 tag prev prev2 addi word contexti addi1 tagi word prev contexti addi1 word contexti 1 addi1 suffix contexti 13 addi2 word contexti 2 addi1 word contexti 1 addi1 suffix contexti 13 addi2 word contexti 2 return features def maketagdictself sentences counts defaultdictlambda defaultdictint for sentence in sentences self sentences appendsentence for word tag in sentence countswordtag 1 self classes addtag freqthresh 20 ambiguitythresh 0 97 for word tagfreqs in counts items tag mode maxtagfreqs items keylambda item item1 n sumtagfreqs values don t add rare words to the tag dictionary only add quite unambiguous words if n freqthresh and mode n ambiguitythresh self tagdictword tag def pcn d return n d 100 def loaddataconllformatfilename printread from file filename with openfilename rb as fin sentences sentence for line in fin readlines line line strip print line if lenline 0 sentences appendsentence sentence continue tokens line splitt word tokens1 tag tokens4 sentence appendword tag return sentences def getpretrainmodel train and test on english part of conll data wsj part of penn treebank train section 211 test section 23 tagger perceptrontagger training loaddataconllformatenglishptbtrain conll testing loaddataconllformatenglishptbtest conll printsize of training and testing sentence lentraining lentesting train and save the model tagger traintraining pickle printaccuracy tagger accuracytesting if name main getpretrainmodel pass this module is a port of the textblob averaged perceptron tagger matthew honnibal honnibal gh gmail com long duong longdt219 gmail com nltk port url https github com sloria textblob aptagger https www nltk org 2013 matthew honnibal nltk modifications 2015 the nltk project this module is provided under the terms of the mit license an averaged perceptron as implemented by matthew honnibal see more implementation details here https explosion ai blog part of speech pos tagger in python each feature gets its own weight vector so weights is a dict of dicts the accumulated values for the averaging these will be keyed by feature clas tuples the last time the feature was changed for the averaging also keyed by feature clas tuples tstamps is short for timestamps number of instances seen dot product the features and current weights and return the best label do a secondary alphabetic sort for stability compute the confidence update the feature weights average weights from all iterations save the pickled model weights load the pickled model weights greedy averaged perceptron tagger as implemented by matthew honnibal see more implementation details here https explosion ai blog part of speech pos tagger in python from nltk tag perceptron import perceptrontagger train the model tagger perceptrontagger load false tagger train today nn is vbz good jj day nn yes nns it prp beautiful jj tagger tag today is a beautiful day today nn is prp a prp beautiful jj day nn use the pretrain model the default constructor pretrain perceptrontagger pretrain tag the quick brown fox jumps over the lazy dog split the dt quick jj brown nn fox nn jumps vbz over in the dt lazy jj dog nn pretrain tag the red cat split the dt red jj cat nn param load load the pickled model upon instantiation tag tokenized sentences params tokens list of word type tokens list str train a model from sentences and save it at save_loc nr_iter controls the number of perceptron training iterations param sentences a list or iterator of sentences where each sentence is a list of words tags tuples param save_loc if not none saves a pickled model in this location param nr_iter number of training iterations we d like to allow sentences to be either a list or an iterator the latter being especially important for a large training dataset because self _make_tagdict sentences runs regardless we make it populate self _sentences a list with all the sentences this saves the overheard of just iterating through sentences to get the list by sentences list sentences to be populated by self _make_tagdict we don t need the training sentences anymore and we don t want to waste space on them when we pickle the trained tagger pickle as a binary file changed protocol from 1 to 2 to make pickling python 2 compatible param loc load a pickled model at location type loc str normalization used in pre processing all words are lower cased groups of digits of length 4 are represented as year other digits are represented as digits rtype str map tokens into a feature representation implemented as a hashable int dict if the features change a new model must be trained it s useful to have a constant feature which acts sort of like a prior make a tag dictionary for single tag words param sentences a list of list of word tag tuples don t add rare words to the tag dictionary only add quite unambiguous words print line train and test on english part of conll data wsj part of penn treebank train section 2 11 test section 23 train and save the model _get_pretrain_model
import logging import pickle import random from collections import defaultdict from nltk import jsontags from nltk.data import find, load from nltk.tag.api import TaggerI try: import numpy as np except ImportError: pass PICKLE = "averaged_perceptron_tagger.pickle" @jsontags.register_tag class AveragedPerceptron: json_tag = "nltk.tag.perceptron.AveragedPerceptron" def __init__(self, weights=None): self.weights = weights if weights else {} self.classes = set() self._totals = defaultdict(int) self._tstamps = defaultdict(int) self.i = 0 def _softmax(self, scores): s = np.fromiter(scores.values(), dtype=float) exps = np.exp(s) return exps / np.sum(exps) def predict(self, features, return_conf=False): scores = defaultdict(float) for feat, value in features.items(): if feat not in self.weights or value == 0: continue weights = self.weights[feat] for label, weight in weights.items(): scores[label] += value * weight best_label = max(self.classes, key=lambda label: (scores[label], label)) conf = max(self._softmax(scores)) if return_conf == True else None return best_label, conf def update(self, truth, guess, features): def upd_feat(c, f, w, v): param = (f, c) self._totals[param] += (self.i - self._tstamps[param]) * w self._tstamps[param] = self.i self.weights[f][c] = w + v self.i += 1 if truth == guess: return None for f in features: weights = self.weights.setdefault(f, {}) upd_feat(truth, f, weights.get(truth, 0.0), 1.0) upd_feat(guess, f, weights.get(guess, 0.0), -1.0) def average_weights(self): for feat, weights in self.weights.items(): new_feat_weights = {} for clas, weight in weights.items(): param = (feat, clas) total = self._totals[param] total += (self.i - self._tstamps[param]) * weight averaged = round(total / self.i, 3) if averaged: new_feat_weights[clas] = averaged self.weights[feat] = new_feat_weights def save(self, path): with open(path, "wb") as fout: return pickle.dump(dict(self.weights), fout) def load(self, path): self.weights = load(path) def encode_json_obj(self): return self.weights @classmethod def decode_json_obj(cls, obj): return cls(obj) @jsontags.register_tag class PerceptronTagger(TaggerI): json_tag = "nltk.tag.sequential.PerceptronTagger" START = ["-START-", "-START2-"] END = ["-END-", "-END2-"] def __init__(self, load=True): self.model = AveragedPerceptron() self.tagdict = {} self.classes = set() if load: AP_MODEL_LOC = "file:" + str( find("taggers/averaged_perceptron_tagger/" + PICKLE) ) self.load(AP_MODEL_LOC) def tag(self, tokens, return_conf=False, use_tagdict=True): prev, prev2 = self.START output = [] context = self.START + [self.normalize(w) for w in tokens] + self.END for i, word in enumerate(tokens): tag, conf = ( (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None) ) if not tag: features = self._get_features(i, word, context, prev, prev2) tag, conf = self.model.predict(features, return_conf) output.append((word, tag, conf) if return_conf == True else (word, tag)) prev2 = prev prev = tag return output def train(self, sentences, save_loc=None, nr_iter=5): self._sentences = list() self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for sentence in self._sentences: words, tags = zip(*sentence) prev, prev2 = self.START context = self.START + [self.normalize(w) for w in words] + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess, _ = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(self._sentences) logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}") self._sentences = None self.model.average_weights() if save_loc is not None: with open(save_loc, "wb") as fout: pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2) def load(self, loc): self.model.weights, self.tagdict, self.classes = load(loc) self.model.classes = self.classes def encode_json_obj(self): return self.model.weights, self.tagdict, list(self.classes) @classmethod def decode_json_obj(cls, obj): tagger = cls(load=False) tagger.model.weights, tagger.tagdict, tagger.classes = obj tagger.classes = set(tagger.classes) tagger.model.classes = tagger.classes return tagger def normalize(self, word): if "-" in word and word[0] != "-": return "!HYPHEN" if word.isdigit() and len(word) == 4: return "!YEAR" if word and word[0].isdigit(): return "!DIGITS" return word.lower() def _get_features(self, i, word, context, prev, prev2): def add(name, *args): features[" ".join((name,) + tuple(args))] += 1 i += len(self.START) features = defaultdict(int) add("bias") add("i suffix", word[-3:]) add("i pref1", word[0] if word else "") add("i-1 tag", prev) add("i-2 tag", prev2) add("i tag+i-2 tag", prev, prev2) add("i word", context[i]) add("i-1 tag+i word", prev, context[i]) add("i-1 word", context[i - 1]) add("i-1 suffix", context[i - 1][-3:]) add("i-2 word", context[i - 2]) add("i+1 word", context[i + 1]) add("i+1 suffix", context[i + 1][-3:]) add("i+2 word", context[i + 2]) return features def _make_tagdict(self, sentences): counts = defaultdict(lambda: defaultdict(int)) for sentence in sentences: self._sentences.append(sentence) for word, tag in sentence: counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) if n >= freq_thresh and (mode / n) >= ambiguity_thresh: self.tagdict[word] = tag def _pc(n, d): return (n / d) * 100 def _load_data_conll_format(filename): print("Read from file: ", filename) with open(filename, "rb") as fin: sentences = [] sentence = [] for line in fin.readlines(): line = line.strip() if len(line) == 0: sentences.append(sentence) sentence = [] continue tokens = line.split("\t") word = tokens[1] tag = tokens[4] sentence.append((word, tag)) return sentences def _get_pretrain_model(): tagger = PerceptronTagger() training = _load_data_conll_format("english_ptb_train.conll") testing = _load_data_conll_format("english_ptb_test.conll") print("Size of training and testing (sentence)", len(training), len(testing)) tagger.train(training, PICKLE) print("Accuracy : ", tagger.accuracy(testing)) if __name__ == "__main__": pass
natural language toolkit senna pos tagger c 20012023 nltk project rami alrfou ralrfoucs stonybrook edu url https www nltk org for license information see license txt senna pos tagger ner tagger chunk tagger the input is path to the directory that contains senna executables if the path is incorrect sennatagger will automatically search for executable file specified in senna environment variable optionally the encoding of the input data default utf8 note unit tests for this module can be found in testunittestsenna py from nltk tag import sennatagger tagger sennatagger usrsharesennav3 0 doctest skip tagger tag what is the airspeed of an unladen swallow split doctest skip what wp is vbz the dt airspeed nn of in an dt unladen nn swallow nn from nltk tag import sennachunktagger chktagger sennachunktagger usrsharesennav3 0 doctest skip chktagger tag what is the airspeed of an unladen swallow split doctest skip what bnp is bvp the bnp airspeed inp of bpp an bnp unladen inp swallow inp o from nltk tag import sennanertagger nertagger sennanertagger usrsharesennav3 0 doctest skip nertagger tag shakespeare theatre was in london split doctest skip shakespeare bper theatre o was o in o london bloc o nertagger tag un headquarters are in ny usa split doctest skip un borg headquarters o are o in o ny bloc o usa bloc o applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag extracts the chunks in a bio chunktagged sentence from nltk tag import sennachunktagger chktagger sennachunktagger usrsharesennav3 0 doctest skip sent what is the airspeed of an unladen swallow split taggedsent chktagger tagsent doctest skip taggedsent doctest skip what bnp is bvp the bnp airspeed inp of bpp an bnp unladen inp swallow inp o listchktagger biotochunkstaggedsent chunktype np doctest skip what 0 the airspeed 23 an unladen swallow 567 param taggedsent a list of tuples of word and bio chunk tag type taggedsent listtuple param taggedsent the chunk tag that users want to extract e g np or vp type taggedsent str return an iterable of tuples of chunks that users want to extract and their corresponding indices rtype itertuplestr applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag natural language toolkit senna pos tagger c 2001 2023 nltk project rami al rfou ralrfou cs stonybrook edu url https www nltk org for license information see license txt senna pos tagger ner tagger chunk tagger the input is path to the directory that contains senna executables if the path is incorrect sennatagger will automatically search for executable file specified in senna environment variable optionally the encoding of the input data default utf 8 note unit tests for this module can be found in test unit test_senna py from nltk tag import sennatagger tagger sennatagger usr share senna v3 0 doctest skip tagger tag what is the airspeed of an unladen swallow split doctest skip what wp is vbz the dt airspeed nn of in an dt unladen nn swallow nn from nltk tag import sennachunktagger chktagger sennachunktagger usr share senna v3 0 doctest skip chktagger tag what is the airspeed of an unladen swallow split doctest skip what b np is b vp the b np airspeed i np of b pp an b np unladen i np swallow i np o from nltk tag import sennanertagger nertagger sennanertagger usr share senna v3 0 doctest skip nertagger tag shakespeare theatre was in london split doctest skip shakespeare b per theatre o was o in o london b loc o nertagger tag un headquarters are in ny usa split doctest skip un b org headquarters o are o in o ny b loc o usa b loc o applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag extracts the chunks in a bio chunk tagged sentence from nltk tag import sennachunktagger chktagger sennachunktagger usr share senna v3 0 doctest skip sent what is the airspeed of an unladen swallow split tagged_sent chktagger tag sent doctest skip tagged_sent doctest skip what b np is b vp the b np airspeed i np of b pp an b np unladen i np swallow i np o list chktagger bio_to_chunks tagged_sent chunk_type np doctest skip what 0 the airspeed 2 3 an unladen swallow 5 6 7 param tagged_sent a list of tuples of word and bio chunk tag type tagged_sent list tuple param tagged_sent the chunk tag that users want to extract e g np or vp type tagged_sent str return an iterable of tuples of chunks that users want to extract and their corresponding indices rtype iter tuple str append the word to the current_chunk flush the full chunk when out of an np flush the last chunk applies the tag method over a list of sentences this method will return for each sentence a list of tuples of word tag
from nltk.classify import Senna class SennaTagger(Senna): def __init__(self, path, encoding="utf-8"): super().__init__(path, ["pos"], encoding) def tag_sents(self, sentences): tagged_sents = super().tag_sents(sentences) for i in range(len(tagged_sents)): for j in range(len(tagged_sents[i])): annotations = tagged_sents[i][j] tagged_sents[i][j] = (annotations["word"], annotations["pos"]) return tagged_sents class SennaChunkTagger(Senna): def __init__(self, path, encoding="utf-8"): super().__init__(path, ["chk"], encoding) def tag_sents(self, sentences): tagged_sents = super().tag_sents(sentences) for i in range(len(tagged_sents)): for j in range(len(tagged_sents[i])): annotations = tagged_sents[i][j] tagged_sents[i][j] = (annotations["word"], annotations["chk"]) return tagged_sents def bio_to_chunks(self, tagged_sent, chunk_type): current_chunk = [] current_chunk_position = [] for idx, word_pos in enumerate(tagged_sent): word, pos = word_pos if "-" + chunk_type in pos: current_chunk.append(word) current_chunk_position.append(idx) else: if current_chunk: _chunk_str = " ".join(current_chunk) _chunk_pos_str = "-".join(map(str, current_chunk_position)) yield _chunk_str, _chunk_pos_str current_chunk = [] current_chunk_position = [] if current_chunk: yield " ".join(current_chunk), "-".join(map(str, current_chunk_position)) class SennaNERTagger(Senna): def __init__(self, path, encoding="utf-8"): super().__init__(path, ["ner"], encoding) def tag_sents(self, sentences): tagged_sents = super().tag_sents(sentences) for i in range(len(tagged_sents)): for j in range(len(tagged_sents[i])): annotations = tagged_sents[i][j] tagged_sents[i][j] = (annotations["word"], annotations["ner"]) return tagged_sents
natural language toolkit tnt tagger c 20012023 nltk project sam huston sjh900gmail com url https www nltk org for license information see license txt implementation of tnt a statisical part of speech tagger by thorsten brants https aclanthology orga001031 pdf tnt statistical pos tagger important notes does not automatically deal with unseen words it is possible to provide an untrained pos tagger to create tags for unknown words see init function should be used with sentencedelimited input due to the nature of this tagger it works best when trained over sentence delimited input however it still produces good results if the training data and testing data are separated on all punctuation eg input for training is expected to be a list of sentences where each sentence is a list of word tag tuples input for tag function is a single sentence input for tagdata function is a list of sentences output is of a similar form function provided to process text that is unsegmented please see basicsentchop tnt uses a second order markov model to produce tags for a sequence of input specifically argmax projptiti1 ti2pwiti ptt1 tt ie the maximum projection of a set of probabilities the set of possible tags for a given word is derived from the training data it is the set of all tags that exact word has been assigned to speed up and get more precision we can use log addition to instead multiplication specifically argmax sigmalogptiti1 ti2logpwiti logptt1tt the probability of a tag for a given word is the linear interpolation of 3 markov models a zeroorder firstorder and a second order model pti ti1 ti2 l1pti l2pti ti1 l3pti ti1 ti2 a beam search is used to limit the memory usage of the algorithm the degree of the beam can be changed using n in the initialization n represents the maximum number of possible solutions to maintain while tagging it is possible to differentiate the tags which are assigned to capitalized words however this does not result in a significant gain in the accuracy of the results construct a tnt statistical tagger tagger must be trained before being used to tag input param unk instance of a pos tagger conforms to taggeri type unk taggeri param trained indication that the pos tagger is trained or not type trained bool param n beam search degree see above type n int param c capitalization flag type c bool initializer creates frequency distributions to be used for tagging lx values represent the portion of the tribiuni taggers to be used to calculate the probability n value is the number of possible solutions to maintain while tagging a good value for this is 1000 c is a boolean value which specifies to use or not use the capitalization of the word as additional information for tagging note using capitalization may not increase the accuracy of the tagger statistical tools ignore or delete me uses a set of tagged data to train the tagger if an unknown word tagger is specified it is trained on the same data param data list of lists of word tag tuples type data tuplestr ensure that local c flag is initialized before use if capitalization is requested and the word begins with a capital set local flag c to true set local flag c to false for the next word compute lambda values from the trained frequency distributions creates lambda values based upon training data note no need to explicitly reference c it is contained within the tag variable tag tag c for each tag trigram t1 t2 t3 depending on the maximum value of ft1 t2 t31 ft1 t21 ft2 t31 ft21 ft31 n1 increment l3 l2 or l1 by ft1 t2 t3 issues resolutions if 2 values are equal increment both lambda values by ft1 t2 t3 2 temporary lambda variables for each t1 t2 in system for each t3 given t1 t2 in system note tag actually represents tag c however no effect within this function if there has only been 1 occurrence of this tag in the data then ignore this trigram safediv provides a safe floating point division it returns 1 if the denominator is 0 if c1 is the maximum value if c2 is the maximum value if c3 is the maximum value if c3 and c2 are equal and larger than c1 if c1 and c2 are equal and larger than c3 this might be a dumb thing to do not sure yet otherwise there might be a problem eg all values 0 lambda normalisation ensures that l1l2l3 1 safe floating point division function does not allow division by 0 returns 1 if the denominator is 0 tags each sentence in a list of sentences param data list of list of words type data string return list of list of word tag tuples invokes tagsent function for each sentence compiles the results into a list of tagged sentences each tagged sentence is a list of word tag tuples tags a single sentence param data list of words type data string return word tag calls recursive function tagword to produce a list of tags associates the sequence of returned tags with the correct words in the input sequence returns a list of word tag tuples unpack and discard the c flags param sent list of words remaining in the sentence type sent word param currentstates list of possible tag combinations for the sentence so far and the log probability associated with each tag combination type currentstates tag logprob tags the first word in the sentence and recursively tags the reminder of sentence uses formula specified above to calculate the probability of a particular tag if this word marks the end of the sentence return the most probable tag otherwise there are more words to be tagged if the capitalisation is requested initialise the flag for this word if word is known compute the set of possible tags and their associated log probabilities compute the result of appending each tag to this history otherwise a new word set of possible tags is unknown since a set of possible tags and the probability of each specific tag can not be returned from most classifiers specify that any unknown words are tagged with certainty if no unknown word tagger has been specified then use the tag unk otherwise apply the unknown word tagger now have computed a set of possible newstates sort states by log prob set is now ordered greatest to least log probability del everything after n threshold this is the beam search cut compute the tags for the rest of the sentence return the best list of tags for the sentence helper function basic sentence tokenizer basic method for tokenizing input into sentences for this tagger param data list of tokens words or word tag tuples type data str or tuplestr str param raw boolean flag marking the input data as a list of words or a list of tagged words type raw bool return list of sentences sentences are a list of tokens tokens are the same as the input function takes a list of tokens and separates the tokens into lists where each list represents a sentence fragment this function can separate both tagged and raw sequences into basic sentences sentence markers are the set of this is a simple method which enhances the performance of the tnt tagger better sentence tokenization will further enhance the results printi1 tacc tpkn i1 sacc tpkn i1 tacc i1 sacc natural language toolkit tnt tagger c 2001 2023 nltk project sam huston sjh900 gmail com url https www nltk org for license information see license txt implementation of tnt a statisical part of speech tagger by thorsten brants https aclanthology org a00 1031 pdf tnt statistical pos tagger important notes does not automatically deal with unseen words it is possible to provide an untrained pos tagger to create tags for unknown words see __init__ function should be used with sentence delimited input due to the nature of this tagger it works best when trained over sentence delimited input however it still produces good results if the training data and testing data are separated on all punctuation eg input for training is expected to be a list of sentences where each sentence is a list of word tag tuples input for tag function is a single sentence input for tagdata function is a list of sentences output is of a similar form function provided to process text that is unsegmented please see basic_sent_chop tnt uses a second order markov model to produce tags for a sequence of input specifically argmax proj p t_i t_i 1 t_i 2 p w_i t_i p t_t 1 t_t ie the maximum projection of a set of probabilities the set of possible tags for a given word is derived from the training data it is the set of all tags that exact word has been assigned to speed up and get more precision we can use log addition to instead multiplication specifically argmax sigma log p t_i t_i 1 t_i 2 log p w_i t_i log p t_t 1 t_t the probability of a tag for a given word is the linear interpolation of 3 markov models a zero order first order and a second order model p t_i t_i 1 t_i 2 l1 p t_i l2 p t_i t_i 1 l3 p t_i t_i 1 t_i 2 a beam search is used to limit the memory usage of the algorithm the degree of the beam can be changed using n in the initialization n represents the maximum number of possible solutions to maintain while tagging it is possible to differentiate the tags which are assigned to capitalized words however this does not result in a significant gain in the accuracy of the results construct a tnt statistical tagger tagger must be trained before being used to tag input param unk instance of a pos tagger conforms to taggeri type unk taggeri param trained indication that the pos tagger is trained or not type trained bool param n beam search degree see above type n int param c capitalization flag type c bool initializer creates frequency distributions to be used for tagging _lx values represent the portion of the tri bi uni taggers to be used to calculate the probability n value is the number of possible solutions to maintain while tagging a good value for this is 1000 c is a boolean value which specifies to use or not use the capitalization of the word as additional information for tagging note using capitalization may not increase the accuracy of the tagger statistical tools ignore or delete me uses a set of tagged data to train the tagger if an unknown word tagger is specified it is trained on the same data param data list of lists of word tag tuples type data tuple str ensure that local c flag is initialized before use if capitalization is requested and the word begins with a capital set local flag c to true set local flag c to false for the next word compute lambda values from the trained frequency distributions creates lambda values based upon training data note no need to explicitly reference c it is contained within the tag variable tag tag c for each tag trigram t1 t2 t3 depending on the maximum value of f t1 t2 t3 1 f t1 t2 1 f t2 t3 1 f t2 1 f t3 1 n 1 increment l3 l2 or l1 by f t1 t2 t3 issues resolutions if 2 values are equal increment both lambda values by f t1 t2 t3 2 temporary lambda variables for each t1 t2 in system for each t3 given t1 t2 in system note tag actually represents tag c however no effect within this function if there has only been 1 occurrence of this tag in the data then ignore this trigram safe_div provides a safe floating point division it returns 1 if the denominator is 0 if c1 is the maximum value if c2 is the maximum value if c3 is the maximum value if c3 and c2 are equal and larger than c1 if c1 and c2 are equal and larger than c3 this might be a dumb thing to do not sure yet otherwise there might be a problem eg all values 0 lambda normalisation ensures that l1 l2 l3 1 safe floating point division function does not allow division by 0 returns 1 if the denominator is 0 tags each sentence in a list of sentences param data list of list of words type data string return list of list of word tag tuples invokes tag sent function for each sentence compiles the results into a list of tagged sentences each tagged sentence is a list of word tag tuples tags a single sentence param data list of words type data string return word tag calls recursive function _tagword to produce a list of tags associates the sequence of returned tags with the correct words in the input sequence returns a list of word tag tuples unpack and discard the c flags param sent list of words remaining in the sentence type sent word param current_states list of possible tag combinations for the sentence so far and the log probability associated with each tag combination type current_states tag logprob tags the first word in the sentence and recursively tags the reminder of sentence uses formula specified above to calculate the probability of a particular tag if this word marks the end of the sentence return the most probable tag otherwise there are more words to be tagged if the capitalisation is requested initialise the flag for this word if word is known compute the set of possible tags and their associated log probabilities compute the result of appending each tag to this history otherwise a new word set of possible tags is unknown since a set of possible tags and the probability of each specific tag can not be returned from most classifiers specify that any unknown words are tagged with certainty if no unknown word tagger has been specified then use the tag unk otherwise apply the unknown word tagger now have computed a set of possible new_states sort states by log prob set is now ordered greatest to least log probability del everything after n threshold this is the beam search cut compute the tags for the rest of the sentence return the best list of tags for the sentence helper function basic sentence tokenizer basic method for tokenizing input into sentences for this tagger param data list of tokens words or word tag tuples type data str or tuple str str param raw boolean flag marking the input data as a list of words or a list of tagged words type raw bool return list of sentences sentences are a list of tokens tokens are the same as the input function takes a list of tokens and separates the tokens into lists where each list represents a sentence fragment this function can separate both tagged and raw sequences into basic sentences sentence markers are the set of this is a simple method which enhances the performance of the tnt tagger better sentence tokenization will further enhance the results print i 1 tacc tp_kn i 1 sacc tp_kn i 1 tacc i 1 sacc
from math import log from operator import itemgetter from nltk.probability import ConditionalFreqDist, FreqDist from nltk.tag.api import TaggerI class TnT(TaggerI): def __init__(self, unk=None, Trained=False, N=1000, C=False): self._uni = FreqDist() self._bi = ConditionalFreqDist() self._tri = ConditionalFreqDist() self._wd = ConditionalFreqDist() self._eos = ConditionalFreqDist() self._l1 = 0.0 self._l2 = 0.0 self._l3 = 0.0 self._N = N self._C = C self._T = Trained self._unk = unk self.unknown = 0 self.known = 0 def train(self, data): C = False if self._unk is not None and self._T == False: self._unk.train(data) for sent in data: history = [("BOS", False), ("BOS", False)] for w, t in sent: if self._C and w[0].isupper(): C = True self._wd[w][t] += 1 self._uni[(t, C)] += 1 self._bi[history[1]][(t, C)] += 1 self._tri[tuple(history)][(t, C)] += 1 history.append((t, C)) history.pop(0) C = False self._eos[t]["EOS"] += 1 self._compute_lambda() def _compute_lambda(self): tl1 = 0.0 tl2 = 0.0 tl3 = 0.0 for history in self._tri.conditions(): (h1, h2) = history for tag in self._tri[history].keys(): if self._uni[tag] == 1: continue c3 = self._safe_div( (self._tri[history][tag] - 1), (self._tri[history].N() - 1) ) c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) if (c1 > c3) and (c1 > c2): tl1 += self._tri[history][tag] elif (c2 > c3) and (c2 > c1): tl2 += self._tri[history][tag] elif (c3 > c2) and (c3 > c1): tl3 += self._tri[history][tag] elif (c3 == c2) and (c3 > c1): tl2 += self._tri[history][tag] / 2.0 tl3 += self._tri[history][tag] / 2.0 elif (c2 == c1) and (c1 > c3): tl1 += self._tri[history][tag] / 2.0 tl2 += self._tri[history][tag] / 2.0 else: pass self._l1 = tl1 / (tl1 + tl2 + tl3) self._l2 = tl2 / (tl1 + tl2 + tl3) self._l3 = tl3 / (tl1 + tl2 + tl3) def _safe_div(self, v1, v2): if v2 == 0: return -1 else: return v1 / v2 def tagdata(self, data): res = [] for sent in data: res1 = self.tag(sent) res.append(res1) return res def tag(self, data): current_state = [(["BOS", "BOS"], 0.0)] sent = list(data) tags = self._tagword(sent, current_state) res = [] for i in range(len(sent)): (t, C) = tags[i + 2] res.append((sent[i], t)) return res def _tagword(self, sent, current_states): if sent == []: (h, logp) = current_states[0] return h word = sent[0] sent = sent[1:] new_states = [] C = False if self._C and word[0].isupper(): C = True if word in self._wd: self.known += 1 for (history, curr_sent_logprob) in current_states: logprobs = [] for t in self._wd[word].keys(): tC = (t, C) p_uni = self._uni.freq(tC) p_bi = self._bi[history[-1]].freq(tC) p_tri = self._tri[tuple(history[-2:])].freq(tC) p_wd = self._wd[word][t] / self._uni[tC] p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri p2 = log(p, 2) + log(p_wd, 2) new_states.append((history + [tC], curr_sent_logprob + p2)) else: self.unknown += 1 p = 1 if self._unk is None: tag = ("Unk", C) else: [(_w, t)] = list(self._unk.tag([word])) tag = (t, C) for (history, logprob) in current_states: history.append(tag) new_states = current_states new_states.sort(reverse=True, key=itemgetter(1)) if len(new_states) > self._N: new_states = new_states[: self._N] return self._tagword(sent, new_states) def basic_sent_chop(data, raw=True): new_data = [] curr_sent = [] sent_mark = [",", ".", "?", "!"] if raw: for word in data: if word in sent_mark: curr_sent.append(word) new_data.append(curr_sent) curr_sent = [] else: curr_sent.append(word) else: for (word, tag) in data: if word in sent_mark: curr_sent.append((word, tag)) new_data.append(curr_sent) curr_sent = [] else: curr_sent.append((word, tag)) return new_data def demo(): from nltk.corpus import brown sents = list(brown.tagged_sents()) test = list(brown.sents()) tagger = TnT() tagger.train(sents[200:1000]) tagged_data = tagger.tagdata(test[100:120]) for j in range(len(tagged_data)): s = tagged_data[j] t = sents[j + 100] for i in range(len(s)): print(s[i], "--", t[i]) print() def demo2(): from nltk.corpus import treebank d = list(treebank.tagged_sents()) t = TnT(N=1000, C=False) s = TnT(N=1000, C=True) t.train(d[(11) * 100 :]) s.train(d[(11) * 100 :]) for i in range(10): tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)]) tp_un = t.unknown / (t.known + t.unknown) tp_kn = t.known / (t.known + t.unknown) t.unknown = 0 t.known = 0 print("Capitalization off:") print("Accuracy:", tacc) print("Percentage known:", tp_kn) print("Percentage unknown:", tp_un) print("Accuracy over known words:", (tacc / tp_kn)) sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)]) sp_un = s.unknown / (s.known + s.unknown) sp_kn = s.known / (s.known + s.unknown) s.unknown = 0 s.known = 0 print("Capitalization on:") print("Accuracy:", sacc) print("Percentage known:", sp_kn) print("Percentage unknown:", sp_un) print("Accuracy over known words:", (sacc / sp_kn)) def demo3(): from nltk.corpus import brown, treebank d = list(treebank.tagged_sents()) e = list(brown.tagged_sents()) d = d[:1000] e = e[:1000] d10 = int(len(d) * 0.1) e10 = int(len(e) * 0.1) tknacc = 0 sknacc = 0 tallacc = 0 sallacc = 0 tknown = 0 sknown = 0 for i in range(10): t = TnT(N=1000, C=False) s = TnT(N=1000, C=False) dtest = d[(i * d10) : ((i + 1) * d10)] etest = e[(i * e10) : ((i + 1) * e10)] dtrain = d[: (i * d10)] + d[((i + 1) * d10) :] etrain = e[: (i * e10)] + e[((i + 1) * e10) :] t.train(dtrain) s.train(etrain) tacc = t.accuracy(dtest) tp_un = t.unknown / (t.known + t.unknown) tp_kn = t.known / (t.known + t.unknown) tknown += tp_kn t.unknown = 0 t.known = 0 sacc = s.accuracy(etest) sp_un = s.unknown / (s.known + s.unknown) sp_kn = s.known / (s.known + s.unknown) sknown += sp_kn s.unknown = 0 s.known = 0 tknacc += tacc / tp_kn sknacc += sacc / tp_kn tallacc += tacc sallacc += sacc print("brown: acc over words known:", 10 * tknacc) print(" : overall accuracy:", 10 * tallacc) print(" : words known:", 10 * tknown) print("treebank: acc over words known:", 10 * sknacc) print(" : overall accuracy:", 10 * sallacc) print(" : words known:", 10 * sknown)
natural language toolkit tagger utilities c 20012023 nltk project edward loper edlopergmail com steven bird stevenbird1gmail com url https www nltk org for license information see license txt given the string representation of a tagged token return the corresponding tuple representation the rightmost occurrence of sep in s will be used to divide s into a word string and a tag string if sep does not occur in s return s none from nltk tag util import str2tuple str2tuple flynn fly nn type s str param s the string representation of a tagged token type sep str param sep the separator string used to separate word strings from tags given the tuple representation of a tagged token return the corresponding string representation this representation is formed by concatenating the token s word string followed by the separator followed by the token s tag if the tag is none then just return the bare word string from nltk tag util import tuple2str taggedtoken fly nn tuple2strtaggedtoken flynn type taggedtoken tuplestr str param taggedtoken the tuple representation of a tagged token type sep str param sep the separator string used to separate word strings from tags given a tagged sentence return an untagged version of that sentence i e return a list containing the first element of each tuple in taggedsentence from nltk tag util import untag untag john nnp saw vbd mary nnp john saw mary natural language toolkit tagger utilities c 2001 2023 nltk project edward loper edloper gmail com steven bird stevenbird1 gmail com url https www nltk org for license information see license txt given the string representation of a tagged token return the corresponding tuple representation the rightmost occurrence of sep in s will be used to divide s into a word string and a tag string if sep does not occur in s return s none from nltk tag util import str2tuple str2tuple fly nn fly nn type s str param s the string representation of a tagged token type sep str param sep the separator string used to separate word strings from tags given the tuple representation of a tagged token return the corresponding string representation this representation is formed by concatenating the token s word string followed by the separator followed by the token s tag if the tag is none then just return the bare word string from nltk tag util import tuple2str tagged_token fly nn tuple2str tagged_token fly nn type tagged_token tuple str str param tagged_token the tuple representation of a tagged token type sep str param sep the separator string used to separate word strings from tags given a tagged sentence return an untagged version of that sentence i e return a list containing the first element of each tuple in tagged_sentence from nltk tag util import untag untag john nnp saw vbd mary nnp john saw mary
def str2tuple(s, sep="/"): loc = s.rfind(sep) if loc >= 0: return (s[:loc], s[loc + len(sep) :].upper()) else: return (s, None) def tuple2str(tagged_token, sep="/"): word, tag = tagged_token if tag is None: return word else: assert sep not in tag, "tag may not contain sep!" return f"{word}{sep}{tag}" def untag(tagged_sentence): return [w for (w, t) in tagged_sentence]
natural language toolkit transformationbased learning c 20012023 nltk project marcus uneson marcus unesongmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt transformation based learning a general purpose package for transformation based learning currently used by nltk tag brilltagger isort skipfile api template template expand api feature feature expand api rule format rule templatetid natural language toolkit transformation based learning c 2001 2023 nltk project marcus uneson marcus uneson gmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt transformation based learning a general purpose package for transformation based learning currently used by nltk tag brilltagger isort skip_file api template template expand api feature feature expand api rule format rule templatetid
from nltk.tbl.template import Template from nltk.tbl.feature import Feature from nltk.tbl.rule import Rule from nltk.tbl.erroranalysis import error_list
natural language toolkit transformationbased learning c 20012023 nltk project marcus uneson marcus unesongmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt run a demo with defaults see source comments for details or docstrings of any of the more specific demo functions exemplify reprrule see also strrule and rule formatverbose exemplify reprrule see also strrule and rule formatverbose exemplify rule formatverbose the features of a template takes a list of positions relative to the current word where the feature should be looked for conceptually joined by logical or for instance pos1 1 given a value v will hold whenever v is found one step to the left andor one step to the right for contiguous ranges a 2arg form giving inclusive end points can also be used pos3 1 is the same as the arg below templates can have more than a single feature show aggregate statistics per template little used templates are candidates for deletion much used templates may possibly be refined deleting unused templates is mostly about saving time andor space training is basically ot in the number of templates t also in terms of memory usage which often will be the limiting factor template expand and feature expand are class methods facilitating generating large amounts of templates see their documentation for details note training with 500 templates can easily fill all available even on relatively small corpora plot a learning curve the contribution on tagging accuracy of the individual rules note requires matplotlib writes a file with context for each erroneous word after tagging testing data serializes the learned tagger to a file in pickle format reloads it and validates the process discard rules with low accuracy this may hurt performance a bit but will often produce rules which are more interesting read to a human brill tagger demonstration param templates how many sentences of training and testing data to use type templates list of template param taggeddata maximum number of rule instances to create type taggeddata cint param numsents how many sentences of training and testing data to use type numsents cint param maxrules maximum number of rule instances to create type maxrules cint param minscore the minimum score for a rule in order for it to be considered type minscore cint param minacc the minimum score for a rule in order for it to be considered type minacc cfloat param train the fraction of the the corpus to be used for training 1all type train cfloat param trace the level of diagnostic tracing output to produce 04 type trace cint param randomize whether the training data should be a random subset of the corpus type randomize cbool param ruleformat rule output format one of str repr verbose type ruleformat cstr param incrementalstats if true will tag incrementally and collect stats for each rule rather slow type incrementalstats cbool param templatestats if true will print pertemplate statistics collected in training and optionally testing type templatestats cbool param erroroutput the file where errors will be saved type erroroutput cstring param serializeoutput the file where the learned tbl tagger will be saved type serializeoutput cstring param learningcurveoutput filename of plot of learning curves train and also test if available type learningcurveoutput cstring param learningcurvetake how many rules plotted type learningcurvetake cint param baselinebackofftagger the file where rules will be saved type baselinebackofftagger tagger param separatebaselinedata use a fraction of the training data exclusively for training baseline type separatebaselinedata cbool param cachebaselinetagger cache baseline tagger to this file only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions type cachebaselinetagger cstring note on separatebaselinedata if true reuse training data both for baseline and rule learner this is fast and fine for a demo but is likely to generalize worse on unseen data also cannot be sensibly used for learning curves on training data the baseline will be artificially high defaults some prebuilt template sets taken from typical systems or publications are available print a list with describetemplatesets for instance creating or reloading from cache a baseline tagger unigram tagger this is just a mechanism for getting deterministic output from the baseline between python versions creating a brill tagger printing the learned rules if learned silently printing template statistics optionally including comparison with the training data note if not separatebaselinedata then baseline accuracy will be artificially high writing error analysis to file serializing the tagger to a pickle file and reloading just to see it works train is the proportion of data used in training the rest is reserved for testing natural language toolkit transformation based learning c 2001 2023 nltk project marcus uneson marcus uneson gmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt run a demo with defaults see source comments for details or docstrings of any of the more specific demo_ functions exemplify repr rule see also str rule and rule format verbose exemplify repr rule see also str rule and rule format verbose exemplify rule format verbose the feature s of a template takes a list of positions relative to the current word where the feature should be looked for conceptually joined by logical or for instance pos 1 1 given a value v will hold whenever v is found one step to the left and or one step to the right for contiguous ranges a 2 arg form giving inclusive end points can also be used pos 3 1 is the same as the arg below templates can have more than a single feature show aggregate statistics per template little used templates are candidates for deletion much used templates may possibly be refined deleting unused templates is mostly about saving time and or space training is basically o t in the number of templates t also in terms of memory usage which often will be the limiting factor template expand and feature expand are class methods facilitating generating large amounts of templates see their documentation for details note training with 500 templates can easily fill all available even on relatively small corpora plot a learning curve the contribution on tagging accuracy of the individual rules note requires matplotlib writes a file with context for each erroneous word after tagging testing data serializes the learned tagger to a file in pickle format reloads it and validates the process discard rules with low accuracy this may hurt performance a bit but will often produce rules which are more interesting read to a human brill tagger demonstration param templates how many sentences of training and testing data to use type templates list of template param tagged_data maximum number of rule instances to create type tagged_data c int param num_sents how many sentences of training and testing data to use type num_sents c int param max_rules maximum number of rule instances to create type max_rules c int param min_score the minimum score for a rule in order for it to be considered type min_score c int param min_acc the minimum score for a rule in order for it to be considered type min_acc c float param train the fraction of the the corpus to be used for training 1 all type train c float param trace the level of diagnostic tracing output to produce 0 4 type trace c int param randomize whether the training data should be a random subset of the corpus type randomize c bool param ruleformat rule output format one of str repr verbose type ruleformat c str param incremental_stats if true will tag incrementally and collect stats for each rule rather slow type incremental_stats c bool param template_stats if true will print per template statistics collected in training and optionally testing type template_stats c bool param error_output the file where errors will be saved type error_output c string param serialize_output the file where the learned tbl tagger will be saved type serialize_output c string param learning_curve_output filename of plot of learning curve s train and also test if available type learning_curve_output c string param learning_curve_take how many rules plotted type learning_curve_take c int param baseline_backoff_tagger the file where rules will be saved type baseline_backoff_tagger tagger param separate_baseline_data use a fraction of the training data exclusively for training baseline type separate_baseline_data c bool param cache_baseline_tagger cache baseline tagger to this file only interesting as a temporary workaround to get deterministic output from the baseline unigram tagger between python versions type cache_baseline_tagger c string note on separate_baseline_data if true reuse training data both for baseline and rule learner this is fast and fine for a demo but is likely to generalize worse on unseen data also cannot be sensibly used for learning curves on training data the baseline will be artificially high defaults some pre built template sets taken from typical systems or publications are available print a list with describe_template_sets for instance creating or reloading from cache a baseline tagger unigram tagger this is just a mechanism for getting deterministic output from the baseline between python versions creating a brill tagger printing the learned rules if learned silently printing template statistics optionally including comparison with the training data note if not separate_baseline_data then baseline accuracy will be artificially high writing error analysis to file serializing the tagger to a pickle file and reloading just to see it works train is the proportion of data used in training the rest is reserved for testing cardinal numbers articles adjectives nouns formed from adjectives adverbs plural nouns gerunds past tense verbs nouns default
import os import pickle import random import time from nltk.corpus import treebank from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger from nltk.tag.brill import Pos, Word from nltk.tbl import Template, error_list def demo(): postag() def demo_repr_rule_format(): postag(ruleformat="repr") def demo_str_rule_format(): postag(ruleformat="str") def demo_verbose_rule_format(): postag(ruleformat="verbose") def demo_multiposition_feature(): postag(templates=[Template(Pos([-3, -2, -1]))]) def demo_multifeature_template(): postag(templates=[Template(Word([0]), Pos([-2, -1]))]) def demo_template_statistics(): postag(incremental_stats=True, template_stats=True) def demo_generated_templates(): wordtpls = Word.expand([-1, 0, 1], [1, 2], excludezero=False) tagtpls = Pos.expand([-2, -1, 0, 1], [1, 2], excludezero=True) templates = list(Template.expand([wordtpls, tagtpls], combinations=(1, 3))) print( "Generated {} templates for transformation-based learning".format( len(templates) ) ) postag(templates=templates, incremental_stats=True, template_stats=True) def demo_learning_curve(): postag( incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png", ) def demo_error_analysis(): postag(error_output="errors.txt") def demo_serialize_tagger(): postag(serialize_output="tagger.pcl") def demo_high_accuracy_rules(): postag(num_sents=3000, min_acc=0.96, min_score=10) def postag( templates=None, tagged_data=None, num_sents=1000, max_rules=300, min_score=3, min_acc=None, train=0.8, trace=3, randomize=False, ruleformat="str", incremental_stats=False, template_stats=False, error_output=None, serialize_output=None, learning_curve_output=None, learning_curve_take=300, baseline_backoff_tagger=None, separate_baseline_data=False, cache_baseline_tagger=None, ): baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER if templates is None: from nltk.tag.brill import brill24, describe_template_sets templates = brill24() (training_data, baseline_data, gold_data, testing_data) = _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ) if cache_baseline_tagger: if not os.path.exists(cache_baseline_tagger): baseline_tagger = UnigramTagger( baseline_data, backoff=baseline_backoff_tagger ) with open(cache_baseline_tagger, "w") as print_rules: pickle.dump(baseline_tagger, print_rules) print( "Trained baseline tagger, pickled it to {}".format( cache_baseline_tagger ) ) with open(cache_baseline_tagger) as print_rules: baseline_tagger = pickle.load(print_rules) print(f"Reloaded pickled tagger from {cache_baseline_tagger}") else: baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger) print("Trained baseline tagger") if gold_data: print( " Accuracy on test set: {:0.4f}".format( baseline_tagger.accuracy(gold_data) ) ) tbrill = time.time() trainer = BrillTaggerTrainer( baseline_tagger, templates, trace, ruleformat=ruleformat ) print("Training tbl tagger...") brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc) print(f"Trained tbl tagger in {time.time() - tbrill:0.2f} seconds") if gold_data: print(" Accuracy on test set: %.4f" % brill_tagger.accuracy(gold_data)) if trace == 1: print("\nLearned rules: ") for (ruleno, rule) in enumerate(brill_tagger.rules(), 1): print(f"{ruleno:4d} {rule.format(ruleformat):s}") if incremental_stats: print( "Incrementally tagging the test data, collecting individual rule statistics" ) (taggedtest, teststats) = brill_tagger.batch_tag_incremental( testing_data, gold_data ) print(" Rule statistics collected") if not separate_baseline_data: print( "WARNING: train_stats asked for separate_baseline_data=True; the baseline " "will be artificially high" ) trainstats = brill_tagger.train_stats() if template_stats: brill_tagger.print_template_statistics(teststats) if learning_curve_output: _demo_plot( learning_curve_output, teststats, trainstats, take=learning_curve_take ) print(f"Wrote plot of learning curve to {learning_curve_output}") else: print("Tagging the test data") taggedtest = brill_tagger.tag_sents(testing_data) if template_stats: brill_tagger.print_template_statistics() if error_output is not None: with open(error_output, "w") as f: f.write("Errors for Brill Tagger %r\n\n" % serialize_output) f.write("\n".join(error_list(gold_data, taggedtest)).encode("utf-8") + "\n") print(f"Wrote tagger errors including context to {error_output}") if serialize_output is not None: taggedtest = brill_tagger.tag_sents(testing_data) with open(serialize_output, "w") as print_rules: pickle.dump(brill_tagger, print_rules) print(f"Wrote pickled tagger to {serialize_output}") with open(serialize_output) as print_rules: brill_tagger_reloaded = pickle.load(print_rules) print(f"Reloaded pickled tagger from {serialize_output}") taggedtest_reloaded = brill_tagger.tag_sents(testing_data) if taggedtest == taggedtest_reloaded: print("Reloaded tagger tried on test set, results identical") else: print("PROBLEM: Reloaded tagger gave different results on test set") def _demo_prepare_data( tagged_data, train, num_sents, randomize, separate_baseline_data ): if tagged_data is None: print("Loading tagged data from treebank... ") tagged_data = treebank.tagged_sents() if num_sents is None or len(tagged_data) <= num_sents: num_sents = len(tagged_data) if randomize: random.seed(len(tagged_data)) random.shuffle(tagged_data) cutoff = int(num_sents * train) training_data = tagged_data[:cutoff] gold_data = tagged_data[cutoff:num_sents] testing_data = [[t[0] for t in sent] for sent in gold_data] if not separate_baseline_data: baseline_data = training_data else: bl_cutoff = len(training_data) // 3 (baseline_data, training_data) = ( training_data[:bl_cutoff], training_data[bl_cutoff:], ) (trainseqs, traintokens) = corpus_size(training_data) (testseqs, testtokens) = corpus_size(testing_data) (bltrainseqs, bltraintokens) = corpus_size(baseline_data) print(f"Read testing data ({testseqs:d} sents/{testtokens:d} wds)") print(f"Read training data ({trainseqs:d} sents/{traintokens:d} wds)") print( "Read baseline data ({:d} sents/{:d} wds) {:s}".format( bltrainseqs, bltraintokens, "" if separate_baseline_data else "[reused the training set]", ) ) return (training_data, baseline_data, gold_data, testing_data) def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None): testcurve = [teststats["initialerrors"]] for rulescore in teststats["rulescores"]: testcurve.append(testcurve[-1] - rulescore) testcurve = [1 - x / teststats["tokencount"] for x in testcurve[:take]] traincurve = [trainstats["initialerrors"]] for rulescore in trainstats["rulescores"]: traincurve.append(traincurve[-1] - rulescore) traincurve = [1 - x / trainstats["tokencount"] for x in traincurve[:take]] import matplotlib.pyplot as plt r = list(range(len(testcurve))) plt.plot(r, testcurve, r, traincurve) plt.axis([None, None, None, 1.0]) plt.savefig(learning_curve_output) NN_CD_TAGGER = RegexpTagger([(r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r".*", "NN")]) REGEXP_TAGGER = RegexpTagger( [ (r"^-?[0-9]+(\.[0-9]+)?$", "CD"), (r"(The|the|A|a|An|an)$", "AT"), (r".*able$", "JJ"), (r".*ness$", "NN"), (r".*ly$", "RB"), (r".*s$", "NNS"), (r".*ing$", "VBG"), (r".*ed$", "VBD"), (r".*", "NN"), ] ) def corpus_size(seqs): return (len(seqs), sum(len(x) for x in seqs)) if __name__ == "__main__": demo_learning_curve()
natural language toolkit transformationbased learning c 20012023 nltk project marcus uneson marcus unesongmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt returns a list of errors in string format returns a list of humanreadable strings indicating the errors in the given tagging of the corpus param trainsents the correct tagging of the corpus type trainsents listtuple param testsents the tagged corpus type testsents listtuple natural language toolkit transformation based learning c 2001 2023 nltk project marcus uneson marcus uneson gmail com based on previous nltk2 version by christopher maloof edward loper steven bird url https www nltk org for license information see license txt returns a list of errors in string format returns a list of human readable strings indicating the errors in the given tagging of the corpus param train_sents the correct tagging of the corpus type train_sents list tuple param test_sents the tagged corpus type test_sents list tuple
def error_list(train_sents, test_sents): hdr = ("%25s | %s | %s\n" + "-" * 26 + "+" + "-" * 24 + "+" + "-" * 26) % ( "left context", "word/test->gold".center(22), "right context", ) errors = [hdr] for (train_sent, test_sent) in zip(train_sents, test_sents): for wordnum, (word, train_pos) in enumerate(train_sent): test_pos = test_sent[wordnum][1] if train_pos != test_pos: left = " ".join("%s/%s" % w for w in train_sent[:wordnum]) right = " ".join("%s/%s" % w for w in train_sent[wordnum + 1 :]) mid = f"{word}/{test_pos}->{train_pos}" errors.append(f"{left[-25:]:>25} | {mid.center(22)} | {right[:25]}") return errors
test suite that runs all nltk tests this module nltk test all is named as the nltk testsuite in the project s setupeggs py file here we create a test suite that runs all of our doctests and return it for processing by the setuptools test harness printhere000000000000000 print globos path joinos path dirnamefile doctest if os path splitpath1 index rst skips timedependent doctest in index rst print here 000000000000000 print glob os path join os path dirname __file__ doctest if os path split path 1 index rst skips time dependent doctest in index rst
import doctest import os.path import unittest from glob import glob def additional_tests(): dir = os.path.dirname(__file__) paths = glob(os.path.join(dir, "*.doctest")) files = [os.path.basename(path) for path in paths] return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
most of classify doctest requires numpy most of classify doctest requires numpy
def setup_module(): import pytest pytest.importorskip("numpy")