id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequencelengths 20
707
| docstring
stringlengths 3
17.3k
| docstring_tokens
sequencelengths 3
222
| sha
stringlengths 40
40
| url
stringlengths 87
242
| docstring_summary
stringclasses 1
value | parameters
stringclasses 1
value | return_statement
stringclasses 1
value | argument_list
stringclasses 1
value | identifier
stringclasses 1
value | nwo
stringclasses 1
value | score
float32 -1
-1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.settext | def settext(self, text, cls='current'):
"""Set the text for this element.
Arguments:
text (str): The text
cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be only one text content element of each class associated with the element.
"""
self.replace(TextContent, value=text, cls=cls) | python | def settext(self, text, cls='current'):
"""Set the text for this element.
Arguments:
text (str): The text
cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be only one text content element of each class associated with the element.
"""
self.replace(TextContent, value=text, cls=cls) | [
"def",
"settext",
"(",
"self",
",",
"text",
",",
"cls",
"=",
"'current'",
")",
":",
"self",
".",
"replace",
"(",
"TextContent",
",",
"value",
"=",
"text",
",",
"cls",
"=",
"cls",
")"
] | Set the text for this element.
Arguments:
text (str): The text
cls (str): The class of the text, defaults to ``current`` (leave this unless you know what you are doing). There may be only one text content element of each class associated with the element. | [
"Set",
"the",
"text",
"for",
"this",
"element",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1357-L1364 | -1 |
||||||
1 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.setdocument | def setdocument(self, doc):
"""Associate a document with this element.
Arguments:
doc (:class:`Document`): A document
Each element must be associated with a FoLiA document.
"""
assert isinstance(doc, Document)
if not self.doc:
self.doc = doc
if self.id:
if self.id in doc:
raise DuplicateIDError(self.id)
else:
self.doc.index[id] = self
for e in self: #recursive for all children
if isinstance(e,AbstractElement): e.setdocument(doc) | python | def setdocument(self, doc):
"""Associate a document with this element.
Arguments:
doc (:class:`Document`): A document
Each element must be associated with a FoLiA document.
"""
assert isinstance(doc, Document)
if not self.doc:
self.doc = doc
if self.id:
if self.id in doc:
raise DuplicateIDError(self.id)
else:
self.doc.index[id] = self
for e in self: #recursive for all children
if isinstance(e,AbstractElement): e.setdocument(doc) | [
"def",
"setdocument",
"(",
"self",
",",
"doc",
")",
":",
"assert",
"isinstance",
"(",
"doc",
",",
"Document",
")",
"if",
"not",
"self",
".",
"doc",
":",
"self",
".",
"doc",
"=",
"doc",
"if",
"self",
".",
"id",
":",
"if",
"self",
".",
"id",
"in",
"doc",
":",
"raise",
"DuplicateIDError",
"(",
"self",
".",
"id",
")",
"else",
":",
"self",
".",
"doc",
".",
"index",
"[",
"id",
"]",
"=",
"self",
"for",
"e",
"in",
"self",
":",
"#recursive for all children",
"if",
"isinstance",
"(",
"e",
",",
"AbstractElement",
")",
":",
"e",
".",
"setdocument",
"(",
"doc",
")"
] | Associate a document with this element.
Arguments:
doc (:class:`Document`): A document
Each element must be associated with a FoLiA document. | [
"Associate",
"a",
"document",
"with",
"this",
"element",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1366-L1385 | -1 |
||||||
2 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.addable | def addable(Class, parent, set=None, raiseexceptions=True):
"""Tests whether a new element of this class can be added to the parent.
This method is mostly for internal use.
This will use the ``OCCURRENCES`` property, but may be overidden by subclasses for more customised behaviour.
Parameters:
parent (:class:`AbstractElement`): The element that is being added to
set (str or None): The set
raiseexceptions (bool): Raise an exception if the element can't be added?
Returns:
bool
Raises:
ValueError
"""
if not parent.__class__.accepts(Class, raiseexceptions, parent):
return False
if Class.OCCURRENCES > 0:
#check if the parent doesn't have too many already
count = parent.count(Class,None,True,[True, AbstractStructureElement]) #never descend into embedded structure annotatioton
if count >= Class.OCCURRENCES:
if raiseexceptions:
if parent.id:
extra = ' (id=' + parent.id + ')'
else:
extra = ''
raise DuplicateAnnotationError("Unable to add another object of type " + Class.__name__ + " to " + parent.__class__.__name__ + " " + extra + ". There are already " + str(count) + " instances of this class, which is the maximum.")
else:
return False
if Class.OCCURRENCES_PER_SET > 0 and set and Class.REQUIRED_ATTRIBS and Attrib.CLASS in Class.REQUIRED_ATTRIBS:
count = parent.count(Class,set,True, [True, AbstractStructureElement])
if count >= Class.OCCURRENCES_PER_SET:
if raiseexceptions:
if parent.id:
extra = ' (id=' + parent.id + ')'
else:
extra = ''
raise DuplicateAnnotationError("Unable to add another object of set " + set + " and type " + Class.__name__ + " to " + parent.__class__.__name__ + " " + extra + ". There are already " + str(count) + " instances of this class, which is the maximum for the set.")
else:
return False
return True | python | def addable(Class, parent, set=None, raiseexceptions=True):
"""Tests whether a new element of this class can be added to the parent.
This method is mostly for internal use.
This will use the ``OCCURRENCES`` property, but may be overidden by subclasses for more customised behaviour.
Parameters:
parent (:class:`AbstractElement`): The element that is being added to
set (str or None): The set
raiseexceptions (bool): Raise an exception if the element can't be added?
Returns:
bool
Raises:
ValueError
"""
if not parent.__class__.accepts(Class, raiseexceptions, parent):
return False
if Class.OCCURRENCES > 0:
#check if the parent doesn't have too many already
count = parent.count(Class,None,True,[True, AbstractStructureElement]) #never descend into embedded structure annotatioton
if count >= Class.OCCURRENCES:
if raiseexceptions:
if parent.id:
extra = ' (id=' + parent.id + ')'
else:
extra = ''
raise DuplicateAnnotationError("Unable to add another object of type " + Class.__name__ + " to " + parent.__class__.__name__ + " " + extra + ". There are already " + str(count) + " instances of this class, which is the maximum.")
else:
return False
if Class.OCCURRENCES_PER_SET > 0 and set and Class.REQUIRED_ATTRIBS and Attrib.CLASS in Class.REQUIRED_ATTRIBS:
count = parent.count(Class,set,True, [True, AbstractStructureElement])
if count >= Class.OCCURRENCES_PER_SET:
if raiseexceptions:
if parent.id:
extra = ' (id=' + parent.id + ')'
else:
extra = ''
raise DuplicateAnnotationError("Unable to add another object of set " + set + " and type " + Class.__name__ + " to " + parent.__class__.__name__ + " " + extra + ". There are already " + str(count) + " instances of this class, which is the maximum for the set.")
else:
return False
return True | [
"def",
"addable",
"(",
"Class",
",",
"parent",
",",
"set",
"=",
"None",
",",
"raiseexceptions",
"=",
"True",
")",
":",
"if",
"not",
"parent",
".",
"__class__",
".",
"accepts",
"(",
"Class",
",",
"raiseexceptions",
",",
"parent",
")",
":",
"return",
"False",
"if",
"Class",
".",
"OCCURRENCES",
">",
"0",
":",
"#check if the parent doesn't have too many already",
"count",
"=",
"parent",
".",
"count",
"(",
"Class",
",",
"None",
",",
"True",
",",
"[",
"True",
",",
"AbstractStructureElement",
"]",
")",
"#never descend into embedded structure annotatioton",
"if",
"count",
">=",
"Class",
".",
"OCCURRENCES",
":",
"if",
"raiseexceptions",
":",
"if",
"parent",
".",
"id",
":",
"extra",
"=",
"' (id='",
"+",
"parent",
".",
"id",
"+",
"')'",
"else",
":",
"extra",
"=",
"''",
"raise",
"DuplicateAnnotationError",
"(",
"\"Unable to add another object of type \"",
"+",
"Class",
".",
"__name__",
"+",
"\" to \"",
"+",
"parent",
".",
"__class__",
".",
"__name__",
"+",
"\" \"",
"+",
"extra",
"+",
"\". There are already \"",
"+",
"str",
"(",
"count",
")",
"+",
"\" instances of this class, which is the maximum.\"",
")",
"else",
":",
"return",
"False",
"if",
"Class",
".",
"OCCURRENCES_PER_SET",
">",
"0",
"and",
"set",
"and",
"Class",
".",
"REQUIRED_ATTRIBS",
"and",
"Attrib",
".",
"CLASS",
"in",
"Class",
".",
"REQUIRED_ATTRIBS",
":",
"count",
"=",
"parent",
".",
"count",
"(",
"Class",
",",
"set",
",",
"True",
",",
"[",
"True",
",",
"AbstractStructureElement",
"]",
")",
"if",
"count",
">=",
"Class",
".",
"OCCURRENCES_PER_SET",
":",
"if",
"raiseexceptions",
":",
"if",
"parent",
".",
"id",
":",
"extra",
"=",
"' (id='",
"+",
"parent",
".",
"id",
"+",
"')'",
"else",
":",
"extra",
"=",
"''",
"raise",
"DuplicateAnnotationError",
"(",
"\"Unable to add another object of set \"",
"+",
"set",
"+",
"\" and type \"",
"+",
"Class",
".",
"__name__",
"+",
"\" to \"",
"+",
"parent",
".",
"__class__",
".",
"__name__",
"+",
"\" \"",
"+",
"extra",
"+",
"\". There are already \"",
"+",
"str",
"(",
"count",
")",
"+",
"\" instances of this class, which is the maximum for the set.\"",
")",
"else",
":",
"return",
"False",
"return",
"True"
] | Tests whether a new element of this class can be added to the parent.
This method is mostly for internal use.
This will use the ``OCCURRENCES`` property, but may be overidden by subclasses for more customised behaviour.
Parameters:
parent (:class:`AbstractElement`): The element that is being added to
set (str or None): The set
raiseexceptions (bool): Raise an exception if the element can't be added?
Returns:
bool
Raises:
ValueError | [
"Tests",
"whether",
"a",
"new",
"element",
"of",
"this",
"class",
"can",
"be",
"added",
"to",
"the",
"parent",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1406-L1455 | -1 |
||||||
3 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.postappend | def postappend(self):
"""This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
#If the element was not associated with a document yet, do so now (and for all unassociated children:
if not self.doc and self.parent.doc:
self.setdocument(self.parent.doc)
if self.doc and self.doc.deepvalidation:
self.deepvalidation() | python | def postappend(self):
"""This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use.
"""
#If the element was not associated with a document yet, do so now (and for all unassociated children:
if not self.doc and self.parent.doc:
self.setdocument(self.parent.doc)
if self.doc and self.doc.deepvalidation:
self.deepvalidation() | [
"def",
"postappend",
"(",
"self",
")",
":",
"#If the element was not associated with a document yet, do so now (and for all unassociated children:",
"if",
"not",
"self",
".",
"doc",
"and",
"self",
".",
"parent",
".",
"doc",
":",
"self",
".",
"setdocument",
"(",
"self",
".",
"parent",
".",
"doc",
")",
"if",
"self",
".",
"doc",
"and",
"self",
".",
"doc",
".",
"deepvalidation",
":",
"self",
".",
"deepvalidation",
"(",
")"
] | This method will be called after an element is added to another and does some checks.
It can do extra checks and if necessary raise exceptions to prevent addition. By default makes sure the right document is associated.
This method is mostly for internal use. | [
"This",
"method",
"will",
"be",
"called",
"after",
"an",
"element",
"is",
"added",
"to",
"another",
"and",
"does",
"some",
"checks",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1458-L1471 | -1 |
||||||
4 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.updatetext | def updatetext(self):
"""Recompute textual value based on the text content of the children. Only supported on elements that are a ``TEXTCONTAINER``"""
if self.TEXTCONTAINER:
s = ""
for child in self:
if isinstance(child, AbstractElement):
child.updatetext()
s += child.text()
elif isstring(child):
s += child
self.data = [s] | python | def updatetext(self):
"""Recompute textual value based on the text content of the children. Only supported on elements that are a ``TEXTCONTAINER``"""
if self.TEXTCONTAINER:
s = ""
for child in self:
if isinstance(child, AbstractElement):
child.updatetext()
s += child.text()
elif isstring(child):
s += child
self.data = [s] | [
"def",
"updatetext",
"(",
"self",
")",
":",
"if",
"self",
".",
"TEXTCONTAINER",
":",
"s",
"=",
"\"\"",
"for",
"child",
"in",
"self",
":",
"if",
"isinstance",
"(",
"child",
",",
"AbstractElement",
")",
":",
"child",
".",
"updatetext",
"(",
")",
"s",
"+=",
"child",
".",
"text",
"(",
")",
"elif",
"isstring",
"(",
"child",
")",
":",
"s",
"+=",
"child",
"self",
".",
"data",
"=",
"[",
"s",
"]"
] | Recompute textual value based on the text content of the children. Only supported on elements that are a ``TEXTCONTAINER`` | [
"Recompute",
"textual",
"value",
"based",
"on",
"the",
"text",
"content",
"of",
"the",
"children",
".",
"Only",
"supported",
"on",
"elements",
"that",
"are",
"a",
"TEXTCONTAINER"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1772-L1782 | -1 |
||||||
5 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.ancestors | def ancestors(self, Class=None):
"""Generator yielding all ancestors of this element, effectively back-tracing its path to the root element. A tuple of multiple classes may be specified.
Arguments:
*Class: The class or classes (:class:`AbstractElement` or subclasses). Not instances!
Yields:
elements (instances derived from :class:`AbstractElement`)
"""
e = self
while e:
if e.parent:
e = e.parent
if not Class or isinstance(e,Class):
yield e
elif isinstance(Class, tuple):
for C in Class:
if isinstance(e,C):
yield e
else:
break | python | def ancestors(self, Class=None):
"""Generator yielding all ancestors of this element, effectively back-tracing its path to the root element. A tuple of multiple classes may be specified.
Arguments:
*Class: The class or classes (:class:`AbstractElement` or subclasses). Not instances!
Yields:
elements (instances derived from :class:`AbstractElement`)
"""
e = self
while e:
if e.parent:
e = e.parent
if not Class or isinstance(e,Class):
yield e
elif isinstance(Class, tuple):
for C in Class:
if isinstance(e,C):
yield e
else:
break | [
"def",
"ancestors",
"(",
"self",
",",
"Class",
"=",
"None",
")",
":",
"e",
"=",
"self",
"while",
"e",
":",
"if",
"e",
".",
"parent",
":",
"e",
"=",
"e",
".",
"parent",
"if",
"not",
"Class",
"or",
"isinstance",
"(",
"e",
",",
"Class",
")",
":",
"yield",
"e",
"elif",
"isinstance",
"(",
"Class",
",",
"tuple",
")",
":",
"for",
"C",
"in",
"Class",
":",
"if",
"isinstance",
"(",
"e",
",",
"C",
")",
":",
"yield",
"e",
"else",
":",
"break"
] | Generator yielding all ancestors of this element, effectively back-tracing its path to the root element. A tuple of multiple classes may be specified.
Arguments:
*Class: The class or classes (:class:`AbstractElement` or subclasses). Not instances!
Yields:
elements (instances derived from :class:`AbstractElement`) | [
"Generator",
"yielding",
"all",
"ancestors",
"of",
"this",
"element",
"effectively",
"back",
"-",
"tracing",
"its",
"path",
"to",
"the",
"root",
"element",
".",
"A",
"tuple",
"of",
"multiple",
"classes",
"may",
"be",
"specified",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1840-L1860 | -1 |
||||||
6 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.ancestor | def ancestor(self, *Classes):
"""Find the most immediate ancestor of the specified type, multiple classes may be specified.
Arguments:
*Classes: The possible classes (:class:`AbstractElement` or subclasses) to select from. Not instances!
Example::
paragraph = word.ancestor(folia.Paragraph)
"""
for e in self.ancestors(tuple(Classes)):
return e
raise NoSuchAnnotation | python | def ancestor(self, *Classes):
"""Find the most immediate ancestor of the specified type, multiple classes may be specified.
Arguments:
*Classes: The possible classes (:class:`AbstractElement` or subclasses) to select from. Not instances!
Example::
paragraph = word.ancestor(folia.Paragraph)
"""
for e in self.ancestors(tuple(Classes)):
return e
raise NoSuchAnnotation | [
"def",
"ancestor",
"(",
"self",
",",
"*",
"Classes",
")",
":",
"for",
"e",
"in",
"self",
".",
"ancestors",
"(",
"tuple",
"(",
"Classes",
")",
")",
":",
"return",
"e",
"raise",
"NoSuchAnnotation"
] | Find the most immediate ancestor of the specified type, multiple classes may be specified.
Arguments:
*Classes: The possible classes (:class:`AbstractElement` or subclasses) to select from. Not instances!
Example::
paragraph = word.ancestor(folia.Paragraph) | [
"Find",
"the",
"most",
"immediate",
"ancestor",
"of",
"the",
"specified",
"type",
"multiple",
"classes",
"may",
"be",
"specified",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L1862-L1874 | -1 |
||||||
7 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.json | def json(self, attribs=None, recurse=True, ignorelist=False):
"""Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON.
Example::
import json
json.dumps(word.json())
Returns:
dict
"""
jsonnode = {}
jsonnode['type'] = self.XMLTAG
if self.id:
jsonnode['id'] = self.id
if self.set:
jsonnode['set'] = self.set
if self.cls:
jsonnode['class'] = self.cls
if self.annotator:
jsonnode['annotator'] = self.annotator
if self.annotatortype:
if self.annotatortype == AnnotatorType.AUTO:
jsonnode['annotatortype'] = "auto"
elif self.annotatortype == AnnotatorType.MANUAL:
jsonnode['annotatortype'] = "manual"
if self.confidence is not None:
jsonnode['confidence'] = self.confidence
if self.n:
jsonnode['n'] = self.n
if self.auth:
jsonnode['auth'] = self.auth
if self.datetime:
jsonnode['datetime'] = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
if recurse: #pylint: disable=too-many-nested-blocks
jsonnode['children'] = []
if self.TEXTCONTAINER:
jsonnode['text'] = self.text()
if self.PHONCONTAINER:
jsonnode['phon'] = self.phon()
for child in self:
if self.TEXTCONTAINER and isstring(child):
jsonnode['children'].append(child)
elif not self.PHONCONTAINER:
#check ignore list
ignore = False
if ignorelist:
for e in ignorelist:
if isinstance(child,e):
ignore = True
break
if not ignore:
jsonnode['children'].append(child.json(attribs,recurse,ignorelist))
if attribs:
for attrib in attribs:
jsonnode[attrib] = attribs
return jsonnode | python | def json(self, attribs=None, recurse=True, ignorelist=False):
"""Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON.
Example::
import json
json.dumps(word.json())
Returns:
dict
"""
jsonnode = {}
jsonnode['type'] = self.XMLTAG
if self.id:
jsonnode['id'] = self.id
if self.set:
jsonnode['set'] = self.set
if self.cls:
jsonnode['class'] = self.cls
if self.annotator:
jsonnode['annotator'] = self.annotator
if self.annotatortype:
if self.annotatortype == AnnotatorType.AUTO:
jsonnode['annotatortype'] = "auto"
elif self.annotatortype == AnnotatorType.MANUAL:
jsonnode['annotatortype'] = "manual"
if self.confidence is not None:
jsonnode['confidence'] = self.confidence
if self.n:
jsonnode['n'] = self.n
if self.auth:
jsonnode['auth'] = self.auth
if self.datetime:
jsonnode['datetime'] = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
if recurse: #pylint: disable=too-many-nested-blocks
jsonnode['children'] = []
if self.TEXTCONTAINER:
jsonnode['text'] = self.text()
if self.PHONCONTAINER:
jsonnode['phon'] = self.phon()
for child in self:
if self.TEXTCONTAINER and isstring(child):
jsonnode['children'].append(child)
elif not self.PHONCONTAINER:
#check ignore list
ignore = False
if ignorelist:
for e in ignorelist:
if isinstance(child,e):
ignore = True
break
if not ignore:
jsonnode['children'].append(child.json(attribs,recurse,ignorelist))
if attribs:
for attrib in attribs:
jsonnode[attrib] = attribs
return jsonnode | [
"def",
"json",
"(",
"self",
",",
"attribs",
"=",
"None",
",",
"recurse",
"=",
"True",
",",
"ignorelist",
"=",
"False",
")",
":",
"jsonnode",
"=",
"{",
"}",
"jsonnode",
"[",
"'type'",
"]",
"=",
"self",
".",
"XMLTAG",
"if",
"self",
".",
"id",
":",
"jsonnode",
"[",
"'id'",
"]",
"=",
"self",
".",
"id",
"if",
"self",
".",
"set",
":",
"jsonnode",
"[",
"'set'",
"]",
"=",
"self",
".",
"set",
"if",
"self",
".",
"cls",
":",
"jsonnode",
"[",
"'class'",
"]",
"=",
"self",
".",
"cls",
"if",
"self",
".",
"annotator",
":",
"jsonnode",
"[",
"'annotator'",
"]",
"=",
"self",
".",
"annotator",
"if",
"self",
".",
"annotatortype",
":",
"if",
"self",
".",
"annotatortype",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"jsonnode",
"[",
"'annotatortype'",
"]",
"=",
"\"auto\"",
"elif",
"self",
".",
"annotatortype",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"jsonnode",
"[",
"'annotatortype'",
"]",
"=",
"\"manual\"",
"if",
"self",
".",
"confidence",
"is",
"not",
"None",
":",
"jsonnode",
"[",
"'confidence'",
"]",
"=",
"self",
".",
"confidence",
"if",
"self",
".",
"n",
":",
"jsonnode",
"[",
"'n'",
"]",
"=",
"self",
".",
"n",
"if",
"self",
".",
"auth",
":",
"jsonnode",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"if",
"self",
".",
"datetime",
":",
"jsonnode",
"[",
"'datetime'",
"]",
"=",
"self",
".",
"datetime",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"if",
"recurse",
":",
"#pylint: disable=too-many-nested-blocks",
"jsonnode",
"[",
"'children'",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"TEXTCONTAINER",
":",
"jsonnode",
"[",
"'text'",
"]",
"=",
"self",
".",
"text",
"(",
")",
"if",
"self",
".",
"PHONCONTAINER",
":",
"jsonnode",
"[",
"'phon'",
"]",
"=",
"self",
".",
"phon",
"(",
")",
"for",
"child",
"in",
"self",
":",
"if",
"self",
".",
"TEXTCONTAINER",
"and",
"isstring",
"(",
"child",
")",
":",
"jsonnode",
"[",
"'children'",
"]",
".",
"append",
"(",
"child",
")",
"elif",
"not",
"self",
".",
"PHONCONTAINER",
":",
"#check ignore list",
"ignore",
"=",
"False",
"if",
"ignorelist",
":",
"for",
"e",
"in",
"ignorelist",
":",
"if",
"isinstance",
"(",
"child",
",",
"e",
")",
":",
"ignore",
"=",
"True",
"break",
"if",
"not",
"ignore",
":",
"jsonnode",
"[",
"'children'",
"]",
".",
"append",
"(",
"child",
".",
"json",
"(",
"attribs",
",",
"recurse",
",",
"ignorelist",
")",
")",
"if",
"attribs",
":",
"for",
"attrib",
"in",
"attribs",
":",
"jsonnode",
"[",
"attrib",
"]",
"=",
"attribs",
"return",
"jsonnode"
] | Serialises the FoLiA element and all its contents to a Python dictionary suitable for serialisation to JSON.
Example::
import json
json.dumps(word.json())
Returns:
dict | [
"Serialises",
"the",
"FoLiA",
"element",
"and",
"all",
"its",
"contents",
"to",
"a",
"Python",
"dictionary",
"suitable",
"for",
"serialisation",
"to",
"JSON",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2050-L2110 | -1 |
||||||
8 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.xmlstring | def xmlstring(self, pretty_print=False):
"""Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children"""
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if sys.version < '3':
if isinstance(s, str):
s = unicode(s,'utf-8') #pylint: disable=undefined-variable
else:
if isinstance(s,bytes):
s = str(s,'utf-8')
s = s.replace('ns0:','') #ugly patch to get rid of namespace prefix
s = s.replace(':ns0','')
return s | python | def xmlstring(self, pretty_print=False):
"""Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children"""
s = ElementTree.tostring(self.xml(), xml_declaration=False, pretty_print=pretty_print, encoding='utf-8')
if sys.version < '3':
if isinstance(s, str):
s = unicode(s,'utf-8') #pylint: disable=undefined-variable
else:
if isinstance(s,bytes):
s = str(s,'utf-8')
s = s.replace('ns0:','') #ugly patch to get rid of namespace prefix
s = s.replace(':ns0','')
return s | [
"def",
"xmlstring",
"(",
"self",
",",
"pretty_print",
"=",
"False",
")",
":",
"s",
"=",
"ElementTree",
".",
"tostring",
"(",
"self",
".",
"xml",
"(",
")",
",",
"xml_declaration",
"=",
"False",
",",
"pretty_print",
"=",
"pretty_print",
",",
"encoding",
"=",
"'utf-8'",
")",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"s",
"=",
"unicode",
"(",
"s",
",",
"'utf-8'",
")",
"#pylint: disable=undefined-variable",
"else",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"str",
"(",
"s",
",",
"'utf-8'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'ns0:'",
",",
"''",
")",
"#ugly patch to get rid of namespace prefix",
"s",
"=",
"s",
".",
"replace",
"(",
"':ns0'",
",",
"''",
")",
"return",
"s"
] | Serialises this FoLiA element and all its contents to XML.
Returns:
str: a string with XML representation for this element and all its children | [
"Serialises",
"this",
"FoLiA",
"element",
"and",
"all",
"its",
"contents",
"to",
"XML",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2114-L2129 | -1 |
||||||
9 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.select | def select(self, Class, set=None, recursive=True, ignore=True, node=None): #pylint: disable=bad-classmethod-argument,redefined-builtin
"""Select child elements of the specified class.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
recursive (bool): Select recursively? Descending into child elements? Defaults to ``True``.
ignore: A list of Classes to ignore, if set to ``True`` instead of a list, all non-authoritative elements will be skipped (this is the default behaviour and corresponds to the following elements: :class:`Alternative`, :class:`AlternativeLayer`, :class:`Suggestion`, and :class:`folia.Original`. These elements and those contained within are never *authorative*. You may also include the boolean True as a member of a list, if you want to skip additional tags along the predefined non-authoritative ones.
* ``node``: Reserved for internal usage, used in recursion.
Yields:
Elements (instances derived from :class:`AbstractElement`)
Example::
for sense in text.select(folia.Sense, 'cornetto', True, [folia.Original, folia.Suggestion, folia.Alternative] ):
..
"""
#if ignorelist is True:
# ignorelist = default_ignore
if not node:
node = self
for e in self.data: #pylint: disable=too-many-nested-blocks
if (not self.TEXTCONTAINER and not self.PHONCONTAINER) or isinstance(e, AbstractElement):
if ignore is True:
try:
if not e.auth:
continue
except AttributeError:
#not all elements have auth attribute..
pass
elif ignore: #list
doignore = False
for c in ignore:
if c is True:
try:
if not e.auth:
doignore =True
break
except AttributeError:
#not all elements have auth attribute..
pass
elif c == e.__class__ or issubclass(e.__class__,c):
doignore = True
break
if doignore:
continue
if isinstance(e, Class):
if not set is None:
try:
if e.set != set:
continue
except AttributeError:
continue
yield e
if recursive:
for e2 in e.select(Class, set, recursive, ignore, e):
if not set is None:
try:
if e2.set != set:
continue
except AttributeError:
continue
yield e2 | python | def select(self, Class, set=None, recursive=True, ignore=True, node=None): #pylint: disable=bad-classmethod-argument,redefined-builtin
"""Select child elements of the specified class.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
recursive (bool): Select recursively? Descending into child elements? Defaults to ``True``.
ignore: A list of Classes to ignore, if set to ``True`` instead of a list, all non-authoritative elements will be skipped (this is the default behaviour and corresponds to the following elements: :class:`Alternative`, :class:`AlternativeLayer`, :class:`Suggestion`, and :class:`folia.Original`. These elements and those contained within are never *authorative*. You may also include the boolean True as a member of a list, if you want to skip additional tags along the predefined non-authoritative ones.
* ``node``: Reserved for internal usage, used in recursion.
Yields:
Elements (instances derived from :class:`AbstractElement`)
Example::
for sense in text.select(folia.Sense, 'cornetto', True, [folia.Original, folia.Suggestion, folia.Alternative] ):
..
"""
#if ignorelist is True:
# ignorelist = default_ignore
if not node:
node = self
for e in self.data: #pylint: disable=too-many-nested-blocks
if (not self.TEXTCONTAINER and not self.PHONCONTAINER) or isinstance(e, AbstractElement):
if ignore is True:
try:
if not e.auth:
continue
except AttributeError:
#not all elements have auth attribute..
pass
elif ignore: #list
doignore = False
for c in ignore:
if c is True:
try:
if not e.auth:
doignore =True
break
except AttributeError:
#not all elements have auth attribute..
pass
elif c == e.__class__ or issubclass(e.__class__,c):
doignore = True
break
if doignore:
continue
if isinstance(e, Class):
if not set is None:
try:
if e.set != set:
continue
except AttributeError:
continue
yield e
if recursive:
for e2 in e.select(Class, set, recursive, ignore, e):
if not set is None:
try:
if e2.set != set:
continue
except AttributeError:
continue
yield e2 | [
"def",
"select",
"(",
"self",
",",
"Class",
",",
"set",
"=",
"None",
",",
"recursive",
"=",
"True",
",",
"ignore",
"=",
"True",
",",
"node",
"=",
"None",
")",
":",
"#pylint: disable=bad-classmethod-argument,redefined-builtin",
"#if ignorelist is True:",
"# ignorelist = default_ignore",
"if",
"not",
"node",
":",
"node",
"=",
"self",
"for",
"e",
"in",
"self",
".",
"data",
":",
"#pylint: disable=too-many-nested-blocks",
"if",
"(",
"not",
"self",
".",
"TEXTCONTAINER",
"and",
"not",
"self",
".",
"PHONCONTAINER",
")",
"or",
"isinstance",
"(",
"e",
",",
"AbstractElement",
")",
":",
"if",
"ignore",
"is",
"True",
":",
"try",
":",
"if",
"not",
"e",
".",
"auth",
":",
"continue",
"except",
"AttributeError",
":",
"#not all elements have auth attribute..",
"pass",
"elif",
"ignore",
":",
"#list",
"doignore",
"=",
"False",
"for",
"c",
"in",
"ignore",
":",
"if",
"c",
"is",
"True",
":",
"try",
":",
"if",
"not",
"e",
".",
"auth",
":",
"doignore",
"=",
"True",
"break",
"except",
"AttributeError",
":",
"#not all elements have auth attribute..",
"pass",
"elif",
"c",
"==",
"e",
".",
"__class__",
"or",
"issubclass",
"(",
"e",
".",
"__class__",
",",
"c",
")",
":",
"doignore",
"=",
"True",
"break",
"if",
"doignore",
":",
"continue",
"if",
"isinstance",
"(",
"e",
",",
"Class",
")",
":",
"if",
"not",
"set",
"is",
"None",
":",
"try",
":",
"if",
"e",
".",
"set",
"!=",
"set",
":",
"continue",
"except",
"AttributeError",
":",
"continue",
"yield",
"e",
"if",
"recursive",
":",
"for",
"e2",
"in",
"e",
".",
"select",
"(",
"Class",
",",
"set",
",",
"recursive",
",",
"ignore",
",",
"e",
")",
":",
"if",
"not",
"set",
"is",
"None",
":",
"try",
":",
"if",
"e2",
".",
"set",
"!=",
"set",
":",
"continue",
"except",
"AttributeError",
":",
"continue",
"yield",
"e2"
] | Select child elements of the specified class.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
recursive (bool): Select recursively? Descending into child elements? Defaults to ``True``.
ignore: A list of Classes to ignore, if set to ``True`` instead of a list, all non-authoritative elements will be skipped (this is the default behaviour and corresponds to the following elements: :class:`Alternative`, :class:`AlternativeLayer`, :class:`Suggestion`, and :class:`folia.Original`. These elements and those contained within are never *authorative*. You may also include the boolean True as a member of a list, if you want to skip additional tags along the predefined non-authoritative ones.
* ``node``: Reserved for internal usage, used in recursion.
Yields:
Elements (instances derived from :class:`AbstractElement`)
Example::
for sense in text.select(folia.Sense, 'cornetto', True, [folia.Original, folia.Suggestion, folia.Alternative] ):
.. | [
"Select",
"child",
"elements",
"of",
"the",
"specified",
"class",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2132-L2201 | -1 |
||||||
10 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.getmetadata | def getmetadata(self, key=None):
"""Get the metadata that applies to this element, automatically inherited from parent elements"""
if self.metadata:
d = self.doc.submetadata[self.metadata]
elif self.parent:
d = self.parent.getmetadata()
elif self.doc:
d = self.doc.metadata
else:
return None
if key:
return d[key]
else:
return d | python | def getmetadata(self, key=None):
"""Get the metadata that applies to this element, automatically inherited from parent elements"""
if self.metadata:
d = self.doc.submetadata[self.metadata]
elif self.parent:
d = self.parent.getmetadata()
elif self.doc:
d = self.doc.metadata
else:
return None
if key:
return d[key]
else:
return d | [
"def",
"getmetadata",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"self",
".",
"metadata",
":",
"d",
"=",
"self",
".",
"doc",
".",
"submetadata",
"[",
"self",
".",
"metadata",
"]",
"elif",
"self",
".",
"parent",
":",
"d",
"=",
"self",
".",
"parent",
".",
"getmetadata",
"(",
")",
"elif",
"self",
".",
"doc",
":",
"d",
"=",
"self",
".",
"doc",
".",
"metadata",
"else",
":",
"return",
"None",
"if",
"key",
":",
"return",
"d",
"[",
"key",
"]",
"else",
":",
"return",
"d"
] | Get the metadata that applies to this element, automatically inherited from parent elements | [
"Get",
"the",
"metadata",
"that",
"applies",
"to",
"this",
"element",
"automatically",
"inherited",
"from",
"parent",
"elements"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2221-L2234 | -1 |
||||||
11 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.getindex | def getindex(self, child, recursive=True, ignore=True):
"""Get the index at which an element occurs, recursive by default!
Returns:
int
"""
#breadth first search
for i, c in enumerate(self.data):
if c is child:
return i
if recursive: #pylint: disable=too-many-nested-blocks
for i, c in enumerate(self.data):
if ignore is True:
try:
if not c.auth:
continue
except AttributeError:
#not all elements have auth attribute..
pass
elif ignore: #list
doignore = False
for e in ignore:
if e is True:
try:
if not c.auth:
doignore =True
break
except AttributeError:
#not all elements have auth attribute..
pass
elif e == c.__class__ or issubclass(c.__class__,e):
doignore = True
break
if doignore:
continue
if isinstance(c, AbstractElement):
j = c.getindex(child, recursive)
if j != -1:
return i #yes, i ... not j!
return -1 | python | def getindex(self, child, recursive=True, ignore=True):
"""Get the index at which an element occurs, recursive by default!
Returns:
int
"""
#breadth first search
for i, c in enumerate(self.data):
if c is child:
return i
if recursive: #pylint: disable=too-many-nested-blocks
for i, c in enumerate(self.data):
if ignore is True:
try:
if not c.auth:
continue
except AttributeError:
#not all elements have auth attribute..
pass
elif ignore: #list
doignore = False
for e in ignore:
if e is True:
try:
if not c.auth:
doignore =True
break
except AttributeError:
#not all elements have auth attribute..
pass
elif e == c.__class__ or issubclass(c.__class__,e):
doignore = True
break
if doignore:
continue
if isinstance(c, AbstractElement):
j = c.getindex(child, recursive)
if j != -1:
return i #yes, i ... not j!
return -1 | [
"def",
"getindex",
"(",
"self",
",",
"child",
",",
"recursive",
"=",
"True",
",",
"ignore",
"=",
"True",
")",
":",
"#breadth first search",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"c",
"is",
"child",
":",
"return",
"i",
"if",
"recursive",
":",
"#pylint: disable=too-many-nested-blocks",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"self",
".",
"data",
")",
":",
"if",
"ignore",
"is",
"True",
":",
"try",
":",
"if",
"not",
"c",
".",
"auth",
":",
"continue",
"except",
"AttributeError",
":",
"#not all elements have auth attribute..",
"pass",
"elif",
"ignore",
":",
"#list",
"doignore",
"=",
"False",
"for",
"e",
"in",
"ignore",
":",
"if",
"e",
"is",
"True",
":",
"try",
":",
"if",
"not",
"c",
".",
"auth",
":",
"doignore",
"=",
"True",
"break",
"except",
"AttributeError",
":",
"#not all elements have auth attribute..",
"pass",
"elif",
"e",
"==",
"c",
".",
"__class__",
"or",
"issubclass",
"(",
"c",
".",
"__class__",
",",
"e",
")",
":",
"doignore",
"=",
"True",
"break",
"if",
"doignore",
":",
"continue",
"if",
"isinstance",
"(",
"c",
",",
"AbstractElement",
")",
":",
"j",
"=",
"c",
".",
"getindex",
"(",
"child",
",",
"recursive",
")",
"if",
"j",
"!=",
"-",
"1",
":",
"return",
"i",
"#yes, i ... not j!",
"return",
"-",
"1"
] | Get the index at which an element occurs, recursive by default!
Returns:
int | [
"Get",
"the",
"index",
"at",
"which",
"an",
"element",
"occurs",
"recursive",
"by",
"default!"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2238-L2278 | -1 |
||||||
12 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.precedes | def precedes(self, other):
"""Returns a boolean indicating whether this element precedes the other element"""
try:
ancestor = next(commonancestors(AbstractElement, self, other))
except StopIteration:
raise Exception("Elements share no common ancestor")
#now we just do a depth first search and see who comes first
def callback(e):
if e is self:
return True
elif e is other:
return False
return None
result = ancestor.depthfirstsearch(callback)
if result is None:
raise Exception("Unable to find relation between elements! (shouldn't happen)")
return result | python | def precedes(self, other):
"""Returns a boolean indicating whether this element precedes the other element"""
try:
ancestor = next(commonancestors(AbstractElement, self, other))
except StopIteration:
raise Exception("Elements share no common ancestor")
#now we just do a depth first search and see who comes first
def callback(e):
if e is self:
return True
elif e is other:
return False
return None
result = ancestor.depthfirstsearch(callback)
if result is None:
raise Exception("Unable to find relation between elements! (shouldn't happen)")
return result | [
"def",
"precedes",
"(",
"self",
",",
"other",
")",
":",
"try",
":",
"ancestor",
"=",
"next",
"(",
"commonancestors",
"(",
"AbstractElement",
",",
"self",
",",
"other",
")",
")",
"except",
"StopIteration",
":",
"raise",
"Exception",
"(",
"\"Elements share no common ancestor\"",
")",
"#now we just do a depth first search and see who comes first",
"def",
"callback",
"(",
"e",
")",
":",
"if",
"e",
"is",
"self",
":",
"return",
"True",
"elif",
"e",
"is",
"other",
":",
"return",
"False",
"return",
"None",
"result",
"=",
"ancestor",
".",
"depthfirstsearch",
"(",
"callback",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Unable to find relation between elements! (shouldn't happen)\"",
")",
"return",
"result"
] | Returns a boolean indicating whether this element precedes the other element | [
"Returns",
"a",
"boolean",
"indicating",
"whether",
"this",
"element",
"precedes",
"the",
"other",
"element"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2280-L2296 | -1 |
||||||
13 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.depthfirstsearch | def depthfirstsearch(self, function):
"""Generic depth first search algorithm using a callback function, continues as long as the callback function returns None"""
result = function(self)
if result is not None:
return result
for e in self:
result = e.depthfirstsearch(function)
if result is not None:
return result
return None | python | def depthfirstsearch(self, function):
"""Generic depth first search algorithm using a callback function, continues as long as the callback function returns None"""
result = function(self)
if result is not None:
return result
for e in self:
result = e.depthfirstsearch(function)
if result is not None:
return result
return None | [
"def",
"depthfirstsearch",
"(",
"self",
",",
"function",
")",
":",
"result",
"=",
"function",
"(",
"self",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"for",
"e",
"in",
"self",
":",
"result",
"=",
"e",
".",
"depthfirstsearch",
"(",
"function",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"return",
"None"
] | Generic depth first search algorithm using a callback function, continues as long as the callback function returns None | [
"Generic",
"depth",
"first",
"search",
"algorithm",
"using",
"a",
"callback",
"function",
"continues",
"as",
"long",
"as",
"the",
"callback",
"function",
"returns",
"None"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2299-L2308 | -1 |
||||||
14 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.next | def next(self, Class=True, scope=True, reverse=False):
"""Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
if Class is True: Class = self.__class__
if scope is True: scope = STRUCTURESCOPE
structural = Class is not None and issubclass(Class,AbstractStructureElement)
if reverse:
order = reversed
descendindex = -1
else:
order = lambda x: x #pylint: disable=redefined-variable-type
descendindex = 0
child = self
parent = self.parent
while parent: #pylint: disable=too-many-nested-blocks
if len(parent) > 1:
returnnext = False
for e in order(parent):
if e is child:
#we found the current item, next item will be the one to return
returnnext = True
elif returnnext and e.auth and not isinstance(e,AbstractAnnotationLayer) and (not structural or (structural and (not isinstance(e,(AbstractTokenAnnotation,TextContent)) ) )):
if structural and isinstance(e,Correction):
if not list(e.select(AbstractStructureElement)): #skip-over non-structural correction
continue
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#this is not yet the element of the type we are looking for, we are going to descend again in the very leftmost (rightmost if reversed) branch only
while e.data:
e = e.data[descendindex]
if not isinstance(e, AbstractElement):
return None #we've gone too far
if e.auth and not isinstance(e,AbstractAnnotationLayer):
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#descend deeper
continue
return None
#generational iteration
child = parent
if scope is not None and child.__class__ in scope:
#you shall not pass!
break
parent = parent.parent
return None | python | def next(self, Class=True, scope=True, reverse=False):
"""Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
if Class is True: Class = self.__class__
if scope is True: scope = STRUCTURESCOPE
structural = Class is not None and issubclass(Class,AbstractStructureElement)
if reverse:
order = reversed
descendindex = -1
else:
order = lambda x: x #pylint: disable=redefined-variable-type
descendindex = 0
child = self
parent = self.parent
while parent: #pylint: disable=too-many-nested-blocks
if len(parent) > 1:
returnnext = False
for e in order(parent):
if e is child:
#we found the current item, next item will be the one to return
returnnext = True
elif returnnext and e.auth and not isinstance(e,AbstractAnnotationLayer) and (not structural or (structural and (not isinstance(e,(AbstractTokenAnnotation,TextContent)) ) )):
if structural and isinstance(e,Correction):
if not list(e.select(AbstractStructureElement)): #skip-over non-structural correction
continue
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#this is not yet the element of the type we are looking for, we are going to descend again in the very leftmost (rightmost if reversed) branch only
while e.data:
e = e.data[descendindex]
if not isinstance(e, AbstractElement):
return None #we've gone too far
if e.auth and not isinstance(e,AbstractAnnotationLayer):
if Class is None or (isinstance(Class,tuple) and (any(isinstance(e,C) for C in Class))) or isinstance(e,Class):
return e
else:
#descend deeper
continue
return None
#generational iteration
child = parent
if scope is not None and child.__class__ in scope:
#you shall not pass!
break
parent = parent.parent
return None | [
"def",
"next",
"(",
"self",
",",
"Class",
"=",
"True",
",",
"scope",
"=",
"True",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"Class",
"is",
"True",
":",
"Class",
"=",
"self",
".",
"__class__",
"if",
"scope",
"is",
"True",
":",
"scope",
"=",
"STRUCTURESCOPE",
"structural",
"=",
"Class",
"is",
"not",
"None",
"and",
"issubclass",
"(",
"Class",
",",
"AbstractStructureElement",
")",
"if",
"reverse",
":",
"order",
"=",
"reversed",
"descendindex",
"=",
"-",
"1",
"else",
":",
"order",
"=",
"lambda",
"x",
":",
"x",
"#pylint: disable=redefined-variable-type",
"descendindex",
"=",
"0",
"child",
"=",
"self",
"parent",
"=",
"self",
".",
"parent",
"while",
"parent",
":",
"#pylint: disable=too-many-nested-blocks",
"if",
"len",
"(",
"parent",
")",
">",
"1",
":",
"returnnext",
"=",
"False",
"for",
"e",
"in",
"order",
"(",
"parent",
")",
":",
"if",
"e",
"is",
"child",
":",
"#we found the current item, next item will be the one to return",
"returnnext",
"=",
"True",
"elif",
"returnnext",
"and",
"e",
".",
"auth",
"and",
"not",
"isinstance",
"(",
"e",
",",
"AbstractAnnotationLayer",
")",
"and",
"(",
"not",
"structural",
"or",
"(",
"structural",
"and",
"(",
"not",
"isinstance",
"(",
"e",
",",
"(",
"AbstractTokenAnnotation",
",",
"TextContent",
")",
")",
")",
")",
")",
":",
"if",
"structural",
"and",
"isinstance",
"(",
"e",
",",
"Correction",
")",
":",
"if",
"not",
"list",
"(",
"e",
".",
"select",
"(",
"AbstractStructureElement",
")",
")",
":",
"#skip-over non-structural correction",
"continue",
"if",
"Class",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"Class",
",",
"tuple",
")",
"and",
"(",
"any",
"(",
"isinstance",
"(",
"e",
",",
"C",
")",
"for",
"C",
"in",
"Class",
")",
")",
")",
"or",
"isinstance",
"(",
"e",
",",
"Class",
")",
":",
"return",
"e",
"else",
":",
"#this is not yet the element of the type we are looking for, we are going to descend again in the very leftmost (rightmost if reversed) branch only",
"while",
"e",
".",
"data",
":",
"e",
"=",
"e",
".",
"data",
"[",
"descendindex",
"]",
"if",
"not",
"isinstance",
"(",
"e",
",",
"AbstractElement",
")",
":",
"return",
"None",
"#we've gone too far",
"if",
"e",
".",
"auth",
"and",
"not",
"isinstance",
"(",
"e",
",",
"AbstractAnnotationLayer",
")",
":",
"if",
"Class",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"Class",
",",
"tuple",
")",
"and",
"(",
"any",
"(",
"isinstance",
"(",
"e",
",",
"C",
")",
"for",
"C",
"in",
"Class",
")",
")",
")",
"or",
"isinstance",
"(",
"e",
",",
"Class",
")",
":",
"return",
"e",
"else",
":",
"#descend deeper",
"continue",
"return",
"None",
"#generational iteration",
"child",
"=",
"parent",
"if",
"scope",
"is",
"not",
"None",
"and",
"child",
".",
"__class__",
"in",
"scope",
":",
"#you shall not pass!",
"break",
"parent",
"=",
"parent",
".",
"parent",
"return",
"None"
] | Returns the next element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``, may also be a tuple of multiple classes. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all. | [
"Returns",
"the",
"next",
"element",
"if",
"it",
"is",
"of",
"the",
"specified",
"type",
"and",
"if",
"it",
"does",
"not",
"cross",
"the",
"boundary",
"of",
"the",
"defined",
"scope",
".",
"Returns",
"None",
"if",
"no",
"next",
"element",
"is",
"found",
".",
"Non",
"-",
"authoritative",
"elements",
"are",
"never",
"returned",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2310-L2367 | -1 |
||||||
15 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.previous | def previous(self, Class=True, scope=True):
"""Returns the previous element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
return self.next(Class,scope, True) | python | def previous(self, Class=True, scope=True):
"""Returns the previous element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all.
"""
return self.next(Class,scope, True) | [
"def",
"previous",
"(",
"self",
",",
"Class",
"=",
"True",
",",
"scope",
"=",
"True",
")",
":",
"return",
"self",
".",
"next",
"(",
"Class",
",",
"scope",
",",
"True",
")"
] | Returns the previous element, if it is of the specified type and if it does not cross the boundary of the defined scope. Returns None if no next element is found. Non-authoritative elements are never returned.
Arguments:
* ``Class``: The class to select; any python class subclassed off `'AbstractElement``. Set to ``True`` to constrain to the same class as that of the current instance, set to ``None`` to not constrain at all
* ``scope``: A list of classes which are never crossed looking for a next element. Set to ``True`` to constrain to a default list of structure elements (Sentence,Paragraph,Division,Event, ListItem,Caption), set to ``None`` to not constrain at all. | [
"Returns",
"the",
"previous",
"element",
"if",
"it",
"is",
"of",
"the",
"specified",
"type",
"and",
"if",
"it",
"does",
"not",
"cross",
"the",
"boundary",
"of",
"the",
"defined",
"scope",
".",
"Returns",
"None",
"if",
"no",
"next",
"element",
"is",
"found",
".",
"Non",
"-",
"authoritative",
"elements",
"are",
"never",
"returned",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2371-L2379 | -1 |
||||||
16 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractElement.remove | def remove(self, child):
"""Removes the child element"""
if not isinstance(child, AbstractElement):
raise ValueError("Expected AbstractElement, got " + str(type(child)))
if child.parent == self:
child.parent = None
self.data.remove(child)
#delete from index
if child.id and self.doc and child.id in self.doc.index:
del self.doc.index[child.id] | python | def remove(self, child):
"""Removes the child element"""
if not isinstance(child, AbstractElement):
raise ValueError("Expected AbstractElement, got " + str(type(child)))
if child.parent == self:
child.parent = None
self.data.remove(child)
#delete from index
if child.id and self.doc and child.id in self.doc.index:
del self.doc.index[child.id] | [
"def",
"remove",
"(",
"self",
",",
"child",
")",
":",
"if",
"not",
"isinstance",
"(",
"child",
",",
"AbstractElement",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected AbstractElement, got \"",
"+",
"str",
"(",
"type",
"(",
"child",
")",
")",
")",
"if",
"child",
".",
"parent",
"==",
"self",
":",
"child",
".",
"parent",
"=",
"None",
"self",
".",
"data",
".",
"remove",
"(",
"child",
")",
"#delete from index",
"if",
"child",
".",
"id",
"and",
"self",
".",
"doc",
"and",
"child",
".",
"id",
"in",
"self",
".",
"doc",
".",
"index",
":",
"del",
"self",
".",
"doc",
".",
"index",
"[",
"child",
".",
"id",
"]"
] | Removes the child element | [
"Removes",
"the",
"child",
"element"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L2729-L2738 | -1 |
||||||
17 | proycon/pynlpl | pynlpl/formats/folia.py | AllowTokenAnnotation.hasannotation | def hasannotation(self,Class,set=None):
"""Returns an integer indicating whether such as annotation exists, and if so, how many.
See :meth:`AllowTokenAnnotation.annotations`` for a description of the parameters."""
return sum( 1 for _ in self.select(Class,set,True,default_ignore_annotations)) | python | def hasannotation(self,Class,set=None):
"""Returns an integer indicating whether such as annotation exists, and if so, how many.
See :meth:`AllowTokenAnnotation.annotations`` for a description of the parameters."""
return sum( 1 for _ in self.select(Class,set,True,default_ignore_annotations)) | [
"def",
"hasannotation",
"(",
"self",
",",
"Class",
",",
"set",
"=",
"None",
")",
":",
"return",
"sum",
"(",
"1",
"for",
"_",
"in",
"self",
".",
"select",
"(",
"Class",
",",
"set",
",",
"True",
",",
"default_ignore_annotations",
")",
")"
] | Returns an integer indicating whether such as annotation exists, and if so, how many.
See :meth:`AllowTokenAnnotation.annotations`` for a description of the parameters. | [
"Returns",
"an",
"integer",
"indicating",
"whether",
"such",
"as",
"annotation",
"exists",
"and",
"if",
"so",
"how",
"many",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3046-L3050 | -1 |
||||||
18 | proycon/pynlpl | pynlpl/formats/folia.py | AllowTokenAnnotation.annotation | def annotation(self, type, set=None):
"""Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists
"""
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
for e in self.select(type,set,True,default_ignore_annotations):
return e
raise NoSuchAnnotation() | python | def annotation(self, type, set=None):
"""Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists
"""
"""Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found"""
for e in self.select(type,set,True,default_ignore_annotations):
return e
raise NoSuchAnnotation() | [
"def",
"annotation",
"(",
"self",
",",
"type",
",",
"set",
"=",
"None",
")",
":",
"\"\"\"Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found\"\"\"",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"type",
",",
"set",
",",
"True",
",",
"default_ignore_annotations",
")",
":",
"return",
"e",
"raise",
"NoSuchAnnotation",
"(",
")"
] | Obtain a single annotation element.
A further restriction can be made based on set.
Arguments:
Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement`
Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned.
Returns:
An element (instance derived from :class:`AbstractElement`)
Example::
sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls
See also:
:meth:`AllowTokenAnnotation.annotations`
:meth:`AbstractElement.select`
Raises:
:class:`NoSuchAnnotation` if no such annotation exists | [
"Obtain",
"a",
"single",
"annotation",
"element",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3052-L3078 | -1 |
||||||
19 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractStructureElement.hasannotationlayer | def hasannotationlayer(self, annotationtype=None,set=None):
"""Does the specified annotation layer exist?"""
l = self.layers(annotationtype, set)
return (len(l) > 0) | python | def hasannotationlayer(self, annotationtype=None,set=None):
"""Does the specified annotation layer exist?"""
l = self.layers(annotationtype, set)
return (len(l) > 0) | [
"def",
"hasannotationlayer",
"(",
"self",
",",
"annotationtype",
"=",
"None",
",",
"set",
"=",
"None",
")",
":",
"l",
"=",
"self",
".",
"layers",
"(",
"annotationtype",
",",
"set",
")",
"return",
"(",
"len",
"(",
"l",
")",
">",
"0",
")"
] | Does the specified annotation layer exist? | [
"Does",
"the",
"specified",
"annotation",
"layer",
"exist?"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3268-L3271 | -1 |
||||||
20 | proycon/pynlpl | pynlpl/formats/folia.py | TextContent.getreference | def getreference(self, validate=True):
"""Returns and validates the Text Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for textcontent not found!")
elif not ref.hastext(self.cls):
raise UnresolvableTextContent("Reference (ID " + str(ref.id) + ") has no such text (class=" + self.cls+")")
elif validate and self.text() != ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])]:
raise UnresolvableTextContent("Reference (ID " + str(ref.id) + ", class=" + self.cls+") found but no text match at specified offset ("+str(self.offset)+")! Expected '" + self.text() + "', got '" + ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])] +"'")
else:
#finally, we made it!
return ref | python | def getreference(self, validate=True):
"""Returns and validates the Text Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for textcontent not found!")
elif not ref.hastext(self.cls):
raise UnresolvableTextContent("Reference (ID " + str(ref.id) + ") has no such text (class=" + self.cls+")")
elif validate and self.text() != ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])]:
raise UnresolvableTextContent("Reference (ID " + str(ref.id) + ", class=" + self.cls+") found but no text match at specified offset ("+str(self.offset)+")! Expected '" + self.text() + "', got '" + ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])] +"'")
else:
#finally, we made it!
return ref | [
"def",
"getreference",
"(",
"self",
",",
"validate",
"=",
"True",
")",
":",
"if",
"self",
".",
"offset",
"is",
"None",
":",
"return",
"None",
"#nothing to test",
"if",
"self",
".",
"ref",
":",
"ref",
"=",
"self",
".",
"doc",
"[",
"self",
".",
"ref",
"]",
"else",
":",
"ref",
"=",
"self",
".",
"finddefaultreference",
"(",
")",
"if",
"not",
"ref",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Default reference for textcontent not found!\"",
")",
"elif",
"not",
"ref",
".",
"hastext",
"(",
"self",
".",
"cls",
")",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Reference (ID \"",
"+",
"str",
"(",
"ref",
".",
"id",
")",
"+",
"\") has no such text (class=\"",
"+",
"self",
".",
"cls",
"+",
"\")\"",
")",
"elif",
"validate",
"and",
"self",
".",
"text",
"(",
")",
"!=",
"ref",
".",
"textcontent",
"(",
"self",
".",
"cls",
")",
".",
"text",
"(",
")",
"[",
"self",
".",
"offset",
":",
"self",
".",
"offset",
"+",
"len",
"(",
"self",
".",
"data",
"[",
"0",
"]",
")",
"]",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Reference (ID \"",
"+",
"str",
"(",
"ref",
".",
"id",
")",
"+",
"\", class=\"",
"+",
"self",
".",
"cls",
"+",
"\") found but no text match at specified offset (\"",
"+",
"str",
"(",
"self",
".",
"offset",
")",
"+",
"\")! Expected '\"",
"+",
"self",
".",
"text",
"(",
")",
"+",
"\"', got '\"",
"+",
"ref",
".",
"textcontent",
"(",
"self",
".",
"cls",
")",
".",
"text",
"(",
")",
"[",
"self",
".",
"offset",
":",
"self",
".",
"offset",
"+",
"len",
"(",
"self",
".",
"data",
"[",
"0",
"]",
")",
"]",
"+",
"\"'\"",
")",
"else",
":",
"#finally, we made it!",
"return",
"ref"
] | Returns and validates the Text Content's reference. Raises UnresolvableTextContent when invalid | [
"Returns",
"and",
"validates",
"the",
"Text",
"Content",
"s",
"reference",
".",
"Raises",
"UnresolvableTextContent",
"when",
"invalid"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3502-L3519 | -1 |
||||||
21 | proycon/pynlpl | pynlpl/formats/folia.py | PhonContent.getreference | def getreference(self, validate=True):
"""Return and validate the Phonetic Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for phonetic content not found!")
elif not ref.hasphon(self.cls):
raise UnresolvableTextContent("Reference has no such phonetic content (class=" + self.cls+")")
elif validate and self.phon() != ref.textcontent(self.cls).phon()[self.offset:self.offset+len(self.data[0])]:
raise UnresolvableTextContent("Reference (class=" + self.cls+") found but no phonetic match at specified offset ("+str(self.offset)+")! Expected '" + self.text() + "', got '" + ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])] +"'")
else:
#finally, we made it!
return ref | python | def getreference(self, validate=True):
"""Return and validate the Phonetic Content's reference. Raises UnresolvableTextContent when invalid"""
if self.offset is None: return None #nothing to test
if self.ref:
ref = self.doc[self.ref]
else:
ref = self.finddefaultreference()
if not ref:
raise UnresolvableTextContent("Default reference for phonetic content not found!")
elif not ref.hasphon(self.cls):
raise UnresolvableTextContent("Reference has no such phonetic content (class=" + self.cls+")")
elif validate and self.phon() != ref.textcontent(self.cls).phon()[self.offset:self.offset+len(self.data[0])]:
raise UnresolvableTextContent("Reference (class=" + self.cls+") found but no phonetic match at specified offset ("+str(self.offset)+")! Expected '" + self.text() + "', got '" + ref.textcontent(self.cls).text()[self.offset:self.offset+len(self.data[0])] +"'")
else:
#finally, we made it!
return ref | [
"def",
"getreference",
"(",
"self",
",",
"validate",
"=",
"True",
")",
":",
"if",
"self",
".",
"offset",
"is",
"None",
":",
"return",
"None",
"#nothing to test",
"if",
"self",
".",
"ref",
":",
"ref",
"=",
"self",
".",
"doc",
"[",
"self",
".",
"ref",
"]",
"else",
":",
"ref",
"=",
"self",
".",
"finddefaultreference",
"(",
")",
"if",
"not",
"ref",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Default reference for phonetic content not found!\"",
")",
"elif",
"not",
"ref",
".",
"hasphon",
"(",
"self",
".",
"cls",
")",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Reference has no such phonetic content (class=\"",
"+",
"self",
".",
"cls",
"+",
"\")\"",
")",
"elif",
"validate",
"and",
"self",
".",
"phon",
"(",
")",
"!=",
"ref",
".",
"textcontent",
"(",
"self",
".",
"cls",
")",
".",
"phon",
"(",
")",
"[",
"self",
".",
"offset",
":",
"self",
".",
"offset",
"+",
"len",
"(",
"self",
".",
"data",
"[",
"0",
"]",
")",
"]",
":",
"raise",
"UnresolvableTextContent",
"(",
"\"Reference (class=\"",
"+",
"self",
".",
"cls",
"+",
"\") found but no phonetic match at specified offset (\"",
"+",
"str",
"(",
"self",
".",
"offset",
")",
"+",
"\")! Expected '\"",
"+",
"self",
".",
"text",
"(",
")",
"+",
"\"', got '\"",
"+",
"ref",
".",
"textcontent",
"(",
"self",
".",
"cls",
")",
".",
"text",
"(",
")",
"[",
"self",
".",
"offset",
":",
"self",
".",
"offset",
"+",
"len",
"(",
"self",
".",
"data",
"[",
"0",
"]",
")",
"]",
"+",
"\"'\"",
")",
"else",
":",
"#finally, we made it!",
"return",
"ref"
] | Return and validate the Phonetic Content's reference. Raises UnresolvableTextContent when invalid | [
"Return",
"and",
"validate",
"the",
"Phonetic",
"Content",
"s",
"reference",
".",
"Raises",
"UnresolvableTextContent",
"when",
"invalid"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3715-L3732 | -1 |
||||||
22 | proycon/pynlpl | pynlpl/formats/folia.py | Word.findspans | def findspans(self, type,set=None):
"""Yields span annotation elements of the specified type that include this word.
Arguments:
type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class.
set (str or None): Constrain by set
Example::
for chunk in word.findspans(folia.Chunk):
print(" Chunk class=", chunk.cls, " words=")
for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part)
print(word2, end="")
print()
Yields:
Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`)
"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
e = self
while True:
if not e.parent: break
e = e.parent
for layer in e.select(layerclass,set,False):
if type is layerclass:
for e2 in layer.select(AbstractSpanAnnotation,set,True, (True, Word, Morpheme)):
if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs():
yield e2
else:
for e2 in layer.select(type,set,True, (True, Word, Morpheme)):
if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs():
yield e2 | python | def findspans(self, type,set=None):
"""Yields span annotation elements of the specified type that include this word.
Arguments:
type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class.
set (str or None): Constrain by set
Example::
for chunk in word.findspans(folia.Chunk):
print(" Chunk class=", chunk.cls, " words=")
for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part)
print(word2, end="")
print()
Yields:
Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`)
"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
e = self
while True:
if not e.parent: break
e = e.parent
for layer in e.select(layerclass,set,False):
if type is layerclass:
for e2 in layer.select(AbstractSpanAnnotation,set,True, (True, Word, Morpheme)):
if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs():
yield e2
else:
for e2 in layer.select(type,set,True, (True, Word, Morpheme)):
if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs():
yield e2 | [
"def",
"findspans",
"(",
"self",
",",
"type",
",",
"set",
"=",
"None",
")",
":",
"if",
"issubclass",
"(",
"type",
",",
"AbstractAnnotationLayer",
")",
":",
"layerclass",
"=",
"type",
"else",
":",
"layerclass",
"=",
"ANNOTATIONTYPE2LAYERCLASS",
"[",
"type",
".",
"ANNOTATIONTYPE",
"]",
"e",
"=",
"self",
"while",
"True",
":",
"if",
"not",
"e",
".",
"parent",
":",
"break",
"e",
"=",
"e",
".",
"parent",
"for",
"layer",
"in",
"e",
".",
"select",
"(",
"layerclass",
",",
"set",
",",
"False",
")",
":",
"if",
"type",
"is",
"layerclass",
":",
"for",
"e2",
"in",
"layer",
".",
"select",
"(",
"AbstractSpanAnnotation",
",",
"set",
",",
"True",
",",
"(",
"True",
",",
"Word",
",",
"Morpheme",
")",
")",
":",
"if",
"not",
"isinstance",
"(",
"e2",
",",
"AbstractSpanRole",
")",
"and",
"self",
"in",
"e2",
".",
"wrefs",
"(",
")",
":",
"yield",
"e2",
"else",
":",
"for",
"e2",
"in",
"layer",
".",
"select",
"(",
"type",
",",
"set",
",",
"True",
",",
"(",
"True",
",",
"Word",
",",
"Morpheme",
")",
")",
":",
"if",
"not",
"isinstance",
"(",
"e2",
",",
"AbstractSpanRole",
")",
"and",
"self",
"in",
"e2",
".",
"wrefs",
"(",
")",
":",
"yield",
"e2"
] | Yields span annotation elements of the specified type that include this word.
Arguments:
type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class.
set (str or None): Constrain by set
Example::
for chunk in word.findspans(folia.Chunk):
print(" Chunk class=", chunk.cls, " words=")
for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part)
print(word2, end="")
print()
Yields:
Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`) | [
"Yields",
"span",
"annotation",
"elements",
"of",
"the",
"specified",
"type",
"that",
"include",
"this",
"word",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4178-L4213 | -1 |
||||||
23 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractSpanAnnotation.setspan | def setspan(self, *args):
"""Sets the span of the span element anew, erases all data inside.
Arguments:
*args: Instances of :class:`Word`, :class:`Morpheme` or :class:`Phoneme`
"""
self.data = []
for child in args:
self.append(child) | python | def setspan(self, *args):
"""Sets the span of the span element anew, erases all data inside.
Arguments:
*args: Instances of :class:`Word`, :class:`Morpheme` or :class:`Phoneme`
"""
self.data = []
for child in args:
self.append(child) | [
"def",
"setspan",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"data",
"=",
"[",
"]",
"for",
"child",
"in",
"args",
":",
"self",
".",
"append",
"(",
"child",
")"
] | Sets the span of the span element anew, erases all data inside.
Arguments:
*args: Instances of :class:`Word`, :class:`Morpheme` or :class:`Phoneme` | [
"Sets",
"the",
"span",
"of",
"the",
"span",
"element",
"anew",
"erases",
"all",
"data",
"inside",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4373-L4381 | -1 |
||||||
24 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractSpanAnnotation._helper_wrefs | def _helper_wrefs(self, targets, recurse=True):
"""Internal helper function"""
for c in self:
if isinstance(c,Word) or isinstance(c,Morpheme) or isinstance(c, Phoneme):
targets.append(c)
elif isinstance(c,WordReference):
try:
targets.append(self.doc[c.id]) #try to resolve
except KeyError:
targets.append(c) #add unresolved
elif isinstance(c, AbstractSpanAnnotation) and recurse:
#recursion
c._helper_wrefs(targets) #pylint: disable=protected-access
elif isinstance(c, Correction) and c.auth: #recurse into corrections
for e in c:
if isinstance(e, AbstractCorrectionChild) and e.auth:
for e2 in e:
if isinstance(e2, AbstractSpanAnnotation):
#recursion
e2._helper_wrefs(targets) | python | def _helper_wrefs(self, targets, recurse=True):
"""Internal helper function"""
for c in self:
if isinstance(c,Word) or isinstance(c,Morpheme) or isinstance(c, Phoneme):
targets.append(c)
elif isinstance(c,WordReference):
try:
targets.append(self.doc[c.id]) #try to resolve
except KeyError:
targets.append(c) #add unresolved
elif isinstance(c, AbstractSpanAnnotation) and recurse:
#recursion
c._helper_wrefs(targets) #pylint: disable=protected-access
elif isinstance(c, Correction) and c.auth: #recurse into corrections
for e in c:
if isinstance(e, AbstractCorrectionChild) and e.auth:
for e2 in e:
if isinstance(e2, AbstractSpanAnnotation):
#recursion
e2._helper_wrefs(targets) | [
"def",
"_helper_wrefs",
"(",
"self",
",",
"targets",
",",
"recurse",
"=",
"True",
")",
":",
"for",
"c",
"in",
"self",
":",
"if",
"isinstance",
"(",
"c",
",",
"Word",
")",
"or",
"isinstance",
"(",
"c",
",",
"Morpheme",
")",
"or",
"isinstance",
"(",
"c",
",",
"Phoneme",
")",
":",
"targets",
".",
"append",
"(",
"c",
")",
"elif",
"isinstance",
"(",
"c",
",",
"WordReference",
")",
":",
"try",
":",
"targets",
".",
"append",
"(",
"self",
".",
"doc",
"[",
"c",
".",
"id",
"]",
")",
"#try to resolve",
"except",
"KeyError",
":",
"targets",
".",
"append",
"(",
"c",
")",
"#add unresolved",
"elif",
"isinstance",
"(",
"c",
",",
"AbstractSpanAnnotation",
")",
"and",
"recurse",
":",
"#recursion",
"c",
".",
"_helper_wrefs",
"(",
"targets",
")",
"#pylint: disable=protected-access",
"elif",
"isinstance",
"(",
"c",
",",
"Correction",
")",
"and",
"c",
".",
"auth",
":",
"#recurse into corrections",
"for",
"e",
"in",
"c",
":",
"if",
"isinstance",
"(",
"e",
",",
"AbstractCorrectionChild",
")",
"and",
"e",
".",
"auth",
":",
"for",
"e2",
"in",
"e",
":",
"if",
"isinstance",
"(",
"e2",
",",
"AbstractSpanAnnotation",
")",
":",
"#recursion",
"e2",
".",
"_helper_wrefs",
"(",
"targets",
")"
] | Internal helper function | [
"Internal",
"helper",
"function"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4418-L4437 | -1 |
||||||
25 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractSpanAnnotation.wrefs | def wrefs(self, index = None, recurse=True):
"""Returns a list of word references, these can be Words but also Morphemes or Phonemes.
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
targets =[]
self._helper_wrefs(targets, recurse)
if index is None:
return targets
else:
return targets[index] | python | def wrefs(self, index = None, recurse=True):
"""Returns a list of word references, these can be Words but also Morphemes or Phonemes.
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all
"""
targets =[]
self._helper_wrefs(targets, recurse)
if index is None:
return targets
else:
return targets[index] | [
"def",
"wrefs",
"(",
"self",
",",
"index",
"=",
"None",
",",
"recurse",
"=",
"True",
")",
":",
"targets",
"=",
"[",
"]",
"self",
".",
"_helper_wrefs",
"(",
"targets",
",",
"recurse",
")",
"if",
"index",
"is",
"None",
":",
"return",
"targets",
"else",
":",
"return",
"targets",
"[",
"index",
"]"
] | Returns a list of word references, these can be Words but also Morphemes or Phonemes.
Arguments:
index (int or None): If set to an integer, will retrieve and return the n'th element (starting at 0) instead of returning the list of all | [
"Returns",
"a",
"list",
"of",
"word",
"references",
"these",
"can",
"be",
"Words",
"but",
"also",
"Morphemes",
"or",
"Phonemes",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4439-L4450 | -1 |
||||||
26 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractSpanAnnotation.copychildren | def copychildren(self, newdoc=None, idsuffix=""):
"""Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash"""
if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children
for c in self:
if isinstance(c, Word):
yield WordReference(newdoc, id=c.id)
else:
yield c.copy(newdoc,idsuffix) | python | def copychildren(self, newdoc=None, idsuffix=""):
"""Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash"""
if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children
for c in self:
if isinstance(c, Word):
yield WordReference(newdoc, id=c.id)
else:
yield c.copy(newdoc,idsuffix) | [
"def",
"copychildren",
"(",
"self",
",",
"newdoc",
"=",
"None",
",",
"idsuffix",
"=",
"\"\"",
")",
":",
"if",
"idsuffix",
"is",
"True",
":",
"idsuffix",
"=",
"\".copy.\"",
"+",
"\"%08x\"",
"%",
"random",
".",
"getrandbits",
"(",
"32",
")",
"#random 32-bit hash for each copy, same one will be reused for all children",
"for",
"c",
"in",
"self",
":",
"if",
"isinstance",
"(",
"c",
",",
"Word",
")",
":",
"yield",
"WordReference",
"(",
"newdoc",
",",
"id",
"=",
"c",
".",
"id",
")",
"else",
":",
"yield",
"c",
".",
"copy",
"(",
"newdoc",
",",
"idsuffix",
")"
] | Generator creating a deep copy of the children of this element. If idsuffix is a string, if set to True, a random idsuffix will be generated including a random 32-bit hash | [
"Generator",
"creating",
"a",
"deep",
"copy",
"of",
"the",
"children",
"of",
"this",
"element",
".",
"If",
"idsuffix",
"is",
"a",
"string",
"if",
"set",
"to",
"True",
"a",
"random",
"idsuffix",
"will",
"be",
"generated",
"including",
"a",
"random",
"32",
"-",
"bit",
"hash"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4465-L4472 | -1 |
||||||
27 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractAnnotationLayer.alternatives | def alternatives(self, Class=None, set=None):
"""Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
Generator over Alternative elements
"""
for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']): #pylint: disable=too-many-nested-blocks
if Class is None:
yield e
elif len(e) >= 1: #child elements?
for e2 in e:
try:
if isinstance(e2, Class):
try:
if set is None or e2.set == set:
yield e #not e2
break #yield an alternative only once (in case there are multiple matches)
except AttributeError:
continue
except AttributeError:
continue | python | def alternatives(self, Class=None, set=None):
"""Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
Generator over Alternative elements
"""
for e in self.select(AlternativeLayers,None, True, ['Original','Suggestion']): #pylint: disable=too-many-nested-blocks
if Class is None:
yield e
elif len(e) >= 1: #child elements?
for e2 in e:
try:
if isinstance(e2, Class):
try:
if set is None or e2.set == set:
yield e #not e2
break #yield an alternative only once (in case there are multiple matches)
except AttributeError:
continue
except AttributeError:
continue | [
"def",
"alternatives",
"(",
"self",
",",
"Class",
"=",
"None",
",",
"set",
"=",
"None",
")",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"AlternativeLayers",
",",
"None",
",",
"True",
",",
"[",
"'Original'",
",",
"'Suggestion'",
"]",
")",
":",
"#pylint: disable=too-many-nested-blocks",
"if",
"Class",
"is",
"None",
":",
"yield",
"e",
"elif",
"len",
"(",
"e",
")",
">=",
"1",
":",
"#child elements?",
"for",
"e2",
"in",
"e",
":",
"try",
":",
"if",
"isinstance",
"(",
"e2",
",",
"Class",
")",
":",
"try",
":",
"if",
"set",
"is",
"None",
"or",
"e2",
".",
"set",
"==",
"set",
":",
"yield",
"e",
"#not e2",
"break",
"#yield an alternative only once (in case there are multiple matches)",
"except",
"AttributeError",
":",
"continue",
"except",
"AttributeError",
":",
"continue"
] | Generator over alternatives, either all or only of a specific annotation type, and possibly restrained also by set.
Arguments:
* ``Class`` - The Class you want to retrieve (e.g. PosAnnotation). Or set to None to select all alternatives regardless of what type they are.
* ``set`` - The set you want to retrieve (defaults to None, which selects irregardless of set)
Returns:
Generator over Alternative elements | [
"Generator",
"over",
"alternatives",
"either",
"all",
"or",
"only",
"of",
"a",
"specific",
"annotation",
"type",
"and",
"possibly",
"restrained",
"also",
"by",
"set",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4574-L4599 | -1 |
||||||
28 | proycon/pynlpl | pynlpl/formats/folia.py | AbstractAnnotationLayer.findspan | def findspan(self, *words):
"""Returns the span element which spans over the specified words or morphemes.
See also:
:meth:`Word.findspans`
"""
for span in self.select(AbstractSpanAnnotation,None,True):
if tuple(span.wrefs()) == words:
return span
raise NoSuchAnnotation | python | def findspan(self, *words):
"""Returns the span element which spans over the specified words or morphemes.
See also:
:meth:`Word.findspans`
"""
for span in self.select(AbstractSpanAnnotation,None,True):
if tuple(span.wrefs()) == words:
return span
raise NoSuchAnnotation | [
"def",
"findspan",
"(",
"self",
",",
"*",
"words",
")",
":",
"for",
"span",
"in",
"self",
".",
"select",
"(",
"AbstractSpanAnnotation",
",",
"None",
",",
"True",
")",
":",
"if",
"tuple",
"(",
"span",
".",
"wrefs",
"(",
")",
")",
"==",
"words",
":",
"return",
"span",
"raise",
"NoSuchAnnotation"
] | Returns the span element which spans over the specified words or morphemes.
See also:
:meth:`Word.findspans` | [
"Returns",
"the",
"span",
"element",
"which",
"spans",
"over",
"the",
"specified",
"words",
"or",
"morphemes",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4601-L4611 | -1 |
||||||
29 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.hasnew | def hasnew(self,allowempty=False):
"""Does the correction define new corrected annotations?"""
for e in self.select(New,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | python | def hasnew(self,allowempty=False):
"""Does the correction define new corrected annotations?"""
for e in self.select(New,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | [
"def",
"hasnew",
"(",
"self",
",",
"allowempty",
"=",
"False",
")",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"New",
",",
"None",
",",
"False",
",",
"False",
")",
":",
"if",
"not",
"allowempty",
"and",
"len",
"(",
"e",
")",
"==",
"0",
":",
"continue",
"return",
"True",
"return",
"False"
] | Does the correction define new corrected annotations? | [
"Does",
"the",
"correction",
"define",
"new",
"corrected",
"annotations?"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4982-L4987 | -1 |
||||||
30 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.hasoriginal | def hasoriginal(self,allowempty=False):
"""Does the correction record the old annotations prior to correction?"""
for e in self.select(Original,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | python | def hasoriginal(self,allowempty=False):
"""Does the correction record the old annotations prior to correction?"""
for e in self.select(Original,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | [
"def",
"hasoriginal",
"(",
"self",
",",
"allowempty",
"=",
"False",
")",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"Original",
",",
"None",
",",
"False",
",",
"False",
")",
":",
"if",
"not",
"allowempty",
"and",
"len",
"(",
"e",
")",
"==",
"0",
":",
"continue",
"return",
"True",
"return",
"False"
] | Does the correction record the old annotations prior to correction? | [
"Does",
"the",
"correction",
"record",
"the",
"old",
"annotations",
"prior",
"to",
"correction?"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L4989-L4994 | -1 |
||||||
31 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.hassuggestions | def hassuggestions(self,allowempty=False):
"""Does the correction propose suggestions for correction?"""
for e in self.select(Suggestion,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | python | def hassuggestions(self,allowempty=False):
"""Does the correction propose suggestions for correction?"""
for e in self.select(Suggestion,None,False, False):
if not allowempty and len(e) == 0: continue
return True
return False | [
"def",
"hassuggestions",
"(",
"self",
",",
"allowempty",
"=",
"False",
")",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"Suggestion",
",",
"None",
",",
"False",
",",
"False",
")",
":",
"if",
"not",
"allowempty",
"and",
"len",
"(",
"e",
")",
"==",
"0",
":",
"continue",
"return",
"True",
"return",
"False"
] | Does the correction propose suggestions for correction? | [
"Does",
"the",
"correction",
"propose",
"suggestions",
"for",
"correction?"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5003-L5008 | -1 |
||||||
32 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.new | def new(self,index = None):
"""Get the new corrected annotation.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
return next(self.select(New,None,False))
except StopIteration:
raise NoSuchAnnotation
else:
for e in self.select(New,None,False):
return e[index]
raise NoSuchAnnotation | python | def new(self,index = None):
"""Get the new corrected annotation.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
return next(self.select(New,None,False))
except StopIteration:
raise NoSuchAnnotation
else:
for e in self.select(New,None,False):
return e[index]
raise NoSuchAnnotation | [
"def",
"new",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"select",
"(",
"New",
",",
"None",
",",
"False",
")",
")",
"except",
"StopIteration",
":",
"raise",
"NoSuchAnnotation",
"else",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"New",
",",
"None",
",",
"False",
")",
":",
"return",
"e",
"[",
"index",
"]",
"raise",
"NoSuchAnnotation"
] | Get the new corrected annotation.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation` | [
"Get",
"the",
"new",
"corrected",
"annotation",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5106-L5126 | -1 |
||||||
33 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.original | def original(self,index=None):
"""Get the old annotation prior to correction.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
return next(self.select(Original,None,False, False))
except StopIteration:
raise NoSuchAnnotation
else:
for e in self.select(Original,None,False, False):
return e[index]
raise NoSuchAnnotation | python | def original(self,index=None):
"""Get the old annotation prior to correction.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation`
"""
if index is None:
try:
return next(self.select(Original,None,False, False))
except StopIteration:
raise NoSuchAnnotation
else:
for e in self.select(Original,None,False, False):
return e[index]
raise NoSuchAnnotation | [
"def",
"original",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"try",
":",
"return",
"next",
"(",
"self",
".",
"select",
"(",
"Original",
",",
"None",
",",
"False",
",",
"False",
")",
")",
"except",
"StopIteration",
":",
"raise",
"NoSuchAnnotation",
"else",
":",
"for",
"e",
"in",
"self",
".",
"select",
"(",
"Original",
",",
"None",
",",
"False",
",",
"False",
")",
":",
"return",
"e",
"[",
"index",
"]",
"raise",
"NoSuchAnnotation"
] | Get the old annotation prior to correction.
This returns only one annotation if multiple exist, use `index` to select another in the sequence.
Returns:
an annotation element (:class:`AbstractElement`)
Raises:
:class:`NoSuchAnnotation` | [
"Get",
"the",
"old",
"annotation",
"prior",
"to",
"correction",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5128-L5147 | -1 |
||||||
34 | proycon/pynlpl | pynlpl/formats/folia.py | Correction.suggestions | def suggestions(self,index=None):
"""Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError`
"""
if index is None:
return self.select(Suggestion,None,False, False)
else:
for i, e in enumerate(self.select(Suggestion,None,False, False)):
if index == i:
return e
raise IndexError | python | def suggestions(self,index=None):
"""Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError`
"""
if index is None:
return self.select(Suggestion,None,False, False)
else:
for i, e in enumerate(self.select(Suggestion,None,False, False)):
if index == i:
return e
raise IndexError | [
"def",
"suggestions",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"return",
"self",
".",
"select",
"(",
"Suggestion",
",",
"None",
",",
"False",
",",
"False",
")",
"else",
":",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"self",
".",
"select",
"(",
"Suggestion",
",",
"None",
",",
"False",
",",
"False",
")",
")",
":",
"if",
"index",
"==",
"i",
":",
"return",
"e",
"raise",
"IndexError"
] | Get suggestions for correction.
Yields:
:class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default)
Returns:
a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set)
Raises:
:class:`IndexError` | [
"Get",
"suggestions",
"for",
"correction",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5170-L5188 | -1 |
||||||
35 | proycon/pynlpl | pynlpl/formats/folia.py | Morpheme.findspans | def findspans(self, type,set=None):
"""Find span annotation of the specified type that include this word"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
e = self
while True:
if not e.parent: break
e = e.parent
for layer in e.select(layerclass,set,False):
for e2 in layer:
if isinstance(e2, AbstractSpanAnnotation):
if self in e2.wrefs():
yield e2 | python | def findspans(self, type,set=None):
"""Find span annotation of the specified type that include this word"""
if issubclass(type, AbstractAnnotationLayer):
layerclass = type
else:
layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE]
e = self
while True:
if not e.parent: break
e = e.parent
for layer in e.select(layerclass,set,False):
for e2 in layer:
if isinstance(e2, AbstractSpanAnnotation):
if self in e2.wrefs():
yield e2 | [
"def",
"findspans",
"(",
"self",
",",
"type",
",",
"set",
"=",
"None",
")",
":",
"if",
"issubclass",
"(",
"type",
",",
"AbstractAnnotationLayer",
")",
":",
"layerclass",
"=",
"type",
"else",
":",
"layerclass",
"=",
"ANNOTATIONTYPE2LAYERCLASS",
"[",
"type",
".",
"ANNOTATIONTYPE",
"]",
"e",
"=",
"self",
"while",
"True",
":",
"if",
"not",
"e",
".",
"parent",
":",
"break",
"e",
"=",
"e",
".",
"parent",
"for",
"layer",
"in",
"e",
".",
"select",
"(",
"layerclass",
",",
"set",
",",
"False",
")",
":",
"for",
"e2",
"in",
"layer",
":",
"if",
"isinstance",
"(",
"e2",
",",
"AbstractSpanAnnotation",
")",
":",
"if",
"self",
"in",
"e2",
".",
"wrefs",
"(",
")",
":",
"yield",
"e2"
] | Find span annotation of the specified type that include this word | [
"Find",
"span",
"annotation",
"of",
"the",
"specified",
"type",
"that",
"include",
"this",
"word"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L5528-L5542 | -1 |
||||||
36 | proycon/pynlpl | pynlpl/formats/folia.py | Pattern.resolve | def resolve(self,size, distribution):
"""Resolve a variable sized pattern to all patterns of a certain fixed size"""
if not self.variablesize():
raise Exception("Can only resize patterns with * wildcards")
nrofwildcards = 0
for x in self.sequence:
if x == '*':
nrofwildcards += 1
assert (len(distribution) == nrofwildcards)
wildcardnr = 0
newsequence = []
for x in self.sequence:
if x == '*':
newsequence += [True] * distribution[wildcardnr]
wildcardnr += 1
else:
newsequence.append(x)
d = { 'matchannotation':self.matchannotation, 'matchannotationset':self.matchannotationset, 'casesensitive':self.casesensitive }
yield Pattern(*newsequence, **d ) | python | def resolve(self,size, distribution):
"""Resolve a variable sized pattern to all patterns of a certain fixed size"""
if not self.variablesize():
raise Exception("Can only resize patterns with * wildcards")
nrofwildcards = 0
for x in self.sequence:
if x == '*':
nrofwildcards += 1
assert (len(distribution) == nrofwildcards)
wildcardnr = 0
newsequence = []
for x in self.sequence:
if x == '*':
newsequence += [True] * distribution[wildcardnr]
wildcardnr += 1
else:
newsequence.append(x)
d = { 'matchannotation':self.matchannotation, 'matchannotationset':self.matchannotationset, 'casesensitive':self.casesensitive }
yield Pattern(*newsequence, **d ) | [
"def",
"resolve",
"(",
"self",
",",
"size",
",",
"distribution",
")",
":",
"if",
"not",
"self",
".",
"variablesize",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"Can only resize patterns with * wildcards\"",
")",
"nrofwildcards",
"=",
"0",
"for",
"x",
"in",
"self",
".",
"sequence",
":",
"if",
"x",
"==",
"'*'",
":",
"nrofwildcards",
"+=",
"1",
"assert",
"(",
"len",
"(",
"distribution",
")",
"==",
"nrofwildcards",
")",
"wildcardnr",
"=",
"0",
"newsequence",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"sequence",
":",
"if",
"x",
"==",
"'*'",
":",
"newsequence",
"+=",
"[",
"True",
"]",
"*",
"distribution",
"[",
"wildcardnr",
"]",
"wildcardnr",
"+=",
"1",
"else",
":",
"newsequence",
".",
"append",
"(",
"x",
")",
"d",
"=",
"{",
"'matchannotation'",
":",
"self",
".",
"matchannotation",
",",
"'matchannotationset'",
":",
"self",
".",
"matchannotationset",
",",
"'casesensitive'",
":",
"self",
".",
"casesensitive",
"}",
"yield",
"Pattern",
"(",
"*",
"newsequence",
",",
"*",
"*",
"d",
")"
] | Resolve a variable sized pattern to all patterns of a certain fixed size | [
"Resolve",
"a",
"variable",
"sized",
"pattern",
"to",
"all",
"patterns",
"of",
"a",
"certain",
"fixed",
"size"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6221-L6242 | -1 |
||||||
37 | proycon/pynlpl | pynlpl/formats/folia.py | Document.load | def load(self, filename):
"""Load a FoLiA XML file.
Argument:
filename (str): The file to load
"""
#if LXE and self.mode != Mode.XPATH:
# #workaround for xml:id problem (disabled)
# #f = open(filename)
# #s = f.read().replace(' xml:id=', ' id=')
# #f.close()
# self.tree = ElementTree.parse(filename)
#else:
self.tree = xmltreefromfile(filename)
self.parsexml(self.tree.getroot())
if self.mode != Mode.XPATH:
#XML Tree is now obsolete (only needed when partially loaded for xpath queries)
self.tree = None | python | def load(self, filename):
"""Load a FoLiA XML file.
Argument:
filename (str): The file to load
"""
#if LXE and self.mode != Mode.XPATH:
# #workaround for xml:id problem (disabled)
# #f = open(filename)
# #s = f.read().replace(' xml:id=', ' id=')
# #f.close()
# self.tree = ElementTree.parse(filename)
#else:
self.tree = xmltreefromfile(filename)
self.parsexml(self.tree.getroot())
if self.mode != Mode.XPATH:
#XML Tree is now obsolete (only needed when partially loaded for xpath queries)
self.tree = None | [
"def",
"load",
"(",
"self",
",",
"filename",
")",
":",
"#if LXE and self.mode != Mode.XPATH:",
"# #workaround for xml:id problem (disabled)",
"# #f = open(filename)",
"# #s = f.read().replace(' xml:id=', ' id=')",
"# #f.close()",
"# self.tree = ElementTree.parse(filename)",
"#else:",
"self",
".",
"tree",
"=",
"xmltreefromfile",
"(",
"filename",
")",
"self",
".",
"parsexml",
"(",
"self",
".",
"tree",
".",
"getroot",
"(",
")",
")",
"if",
"self",
".",
"mode",
"!=",
"Mode",
".",
"XPATH",
":",
"#XML Tree is now obsolete (only needed when partially loaded for xpath queries)",
"self",
".",
"tree",
"=",
"None"
] | Load a FoLiA XML file.
Argument:
filename (str): The file to load | [
"Load",
"a",
"FoLiA",
"XML",
"file",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6495-L6512 | -1 |
||||||
38 | proycon/pynlpl | pynlpl/formats/folia.py | Document.items | def items(self):
"""Returns a depth-first flat list of all items in the document"""
l = []
for e in self.data:
l += e.items()
return l | python | def items(self):
"""Returns a depth-first flat list of all items in the document"""
l = []
for e in self.data:
l += e.items()
return l | [
"def",
"items",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"data",
":",
"l",
"+=",
"e",
".",
"items",
"(",
")",
"return",
"l"
] | Returns a depth-first flat list of all items in the document | [
"Returns",
"a",
"depth",
"-",
"first",
"flat",
"list",
"of",
"all",
"items",
"in",
"the",
"document"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6514-L6519 | -1 |
||||||
39 | proycon/pynlpl | pynlpl/formats/folia.py | Document.save | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename,'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close()
else:
f = io.open(filename,'w',encoding='utf-8')
f.write(self.xmlstring())
f.close() | python | def save(self, filename=None):
"""Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from.
"""
if not filename:
filename = self.filename
if not filename:
raise Exception("No filename specified")
if filename[-4:].lower() == '.bz2':
f = bz2.BZ2File(filename,'wb')
f.write(self.xmlstring().encode('utf-8'))
f.close()
elif filename[-3:].lower() == '.gz':
f = gzip.GzipFile(filename,'wb') #pylint: disable=redefined-variable-type
f.write(self.xmlstring().encode('utf-8'))
f.close()
else:
f = io.open(filename,'w',encoding='utf-8')
f.write(self.xmlstring())
f.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"filename",
"if",
"not",
"filename",
":",
"raise",
"Exception",
"(",
"\"No filename specified\"",
")",
"if",
"filename",
"[",
"-",
"4",
":",
"]",
".",
"lower",
"(",
")",
"==",
"'.bz2'",
":",
"f",
"=",
"bz2",
".",
"BZ2File",
"(",
"filename",
",",
"'wb'",
")",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"elif",
"filename",
"[",
"-",
"3",
":",
"]",
".",
"lower",
"(",
")",
"==",
"'.gz'",
":",
"f",
"=",
"gzip",
".",
"GzipFile",
"(",
"filename",
",",
"'wb'",
")",
"#pylint: disable=redefined-variable-type",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"f",
".",
"close",
"(",
")",
"else",
":",
"f",
"=",
"io",
".",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"f",
".",
"write",
"(",
"self",
".",
"xmlstring",
"(",
")",
")",
"f",
".",
"close",
"(",
")"
] | Save the document to file.
Arguments:
* filename (str): The filename to save to. If not set (``None``, default), saves to the same file as loaded from. | [
"Save",
"the",
"document",
"to",
"file",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6547-L6568 | -1 |
||||||
40 | proycon/pynlpl | pynlpl/formats/folia.py | Document.xmldeclarations | def xmldeclarations(self):
"""Internal method to generate XML nodes for all declarations"""
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}' + key] = 'manual'
elif value == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}' + key] = 'auto'
elif key == 'datetime':
attribs['{' + NSFOLIA + '}' + key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
attribs['{' + NSFOLIA + '}' + key] = value
if label:
l.append( makeelement(E,'{' + NSFOLIA + '}' + label.lower() + '-annotation', **attribs) )
else:
raise Exception("Invalid annotation type")
return l | python | def xmldeclarations(self):
"""Internal method to generate XML nodes for all declarations"""
l = []
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
attribs = {}
if set and set != 'undefined':
attribs['{' + NSFOLIA + '}set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
attribs['{' + NSFOLIA + '}' + key] = 'manual'
elif value == AnnotatorType.AUTO:
attribs['{' + NSFOLIA + '}' + key] = 'auto'
elif key == 'datetime':
attribs['{' + NSFOLIA + '}' + key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
attribs['{' + NSFOLIA + '}' + key] = value
if label:
l.append( makeelement(E,'{' + NSFOLIA + '}' + label.lower() + '-annotation', **attribs) )
else:
raise Exception("Invalid annotation type")
return l | [
"def",
"xmldeclarations",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"None",
":",
"\"http://ilk.uvt.nl/folia\"",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"for",
"annotationtype",
",",
"set",
"in",
"self",
".",
"annotations",
":",
"label",
"=",
"None",
"#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"AnnotationType",
")",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"annotationtype",
":",
"label",
"=",
"key",
"break",
"#gather attribs",
"if",
"(",
"annotationtype",
"==",
"AnnotationType",
".",
"TEXT",
"or",
"annotationtype",
"==",
"AnnotationType",
".",
"PHON",
")",
"and",
"set",
"==",
"'undefined'",
"and",
"len",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
")",
"==",
"0",
":",
"#this is the implicit TextContent declaration, no need to output it explicitly",
"continue",
"attribs",
"=",
"{",
"}",
"if",
"set",
"and",
"set",
"!=",
"'undefined'",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}set'",
"]",
"=",
"set",
"for",
"key",
",",
"value",
"in",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'annotatortype'",
":",
"if",
"value",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"'manual'",
"elif",
"value",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"'auto'",
"elif",
"key",
"==",
"'datetime'",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"value",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"#proper iso-formatting",
"elif",
"value",
":",
"attribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"key",
"]",
"=",
"value",
"if",
"label",
":",
"l",
".",
"append",
"(",
"makeelement",
"(",
"E",
",",
"'{'",
"+",
"NSFOLIA",
"+",
"'}'",
"+",
"label",
".",
"lower",
"(",
")",
"+",
"'-annotation'",
",",
"*",
"*",
"attribs",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid annotation type\"",
")",
"return",
"l"
] | Internal method to generate XML nodes for all declarations | [
"Internal",
"method",
"to",
"generate",
"XML",
"nodes",
"for",
"all",
"declarations"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6653-L6690 | -1 |
||||||
41 | proycon/pynlpl | pynlpl/formats/folia.py | Document.jsondeclarations | def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
jsonnode[key] = 'manual'
elif value == AnnotatorType.AUTO:
jsonnode[key] = 'auto'
elif key == 'datetime':
jsonnode[key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
jsonnode[key] = value
if label:
l.append( jsonnode )
else:
raise Exception("Invalid annotation type")
return l | python | def jsondeclarations(self):
"""Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict
"""
l = []
for annotationtype, set in self.annotations:
label = None
#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)
for key, value in vars(AnnotationType).items():
if value == annotationtype:
label = key
break
#gather attribs
if (annotationtype == AnnotationType.TEXT or annotationtype == AnnotationType.PHON) and set == 'undefined' and len(self.annotationdefaults[annotationtype][set]) == 0:
#this is the implicit TextContent declaration, no need to output it explicitly
continue
jsonnode = {'annotationtype': label.lower()}
if set and set != 'undefined':
jsonnode['set'] = set
for key, value in self.annotationdefaults[annotationtype][set].items():
if key == 'annotatortype':
if value == AnnotatorType.MANUAL:
jsonnode[key] = 'manual'
elif value == AnnotatorType.AUTO:
jsonnode[key] = 'auto'
elif key == 'datetime':
jsonnode[key] = value.strftime("%Y-%m-%dT%H:%M:%S") #proper iso-formatting
elif value:
jsonnode[key] = value
if label:
l.append( jsonnode )
else:
raise Exception("Invalid annotation type")
return l | [
"def",
"jsondeclarations",
"(",
"self",
")",
":",
"l",
"=",
"[",
"]",
"for",
"annotationtype",
",",
"set",
"in",
"self",
".",
"annotations",
":",
"label",
"=",
"None",
"#Find the 'label' for the declarations dynamically (aka: AnnotationType --> String)",
"for",
"key",
",",
"value",
"in",
"vars",
"(",
"AnnotationType",
")",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"annotationtype",
":",
"label",
"=",
"key",
"break",
"#gather attribs",
"if",
"(",
"annotationtype",
"==",
"AnnotationType",
".",
"TEXT",
"or",
"annotationtype",
"==",
"AnnotationType",
".",
"PHON",
")",
"and",
"set",
"==",
"'undefined'",
"and",
"len",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
")",
"==",
"0",
":",
"#this is the implicit TextContent declaration, no need to output it explicitly",
"continue",
"jsonnode",
"=",
"{",
"'annotationtype'",
":",
"label",
".",
"lower",
"(",
")",
"}",
"if",
"set",
"and",
"set",
"!=",
"'undefined'",
":",
"jsonnode",
"[",
"'set'",
"]",
"=",
"set",
"for",
"key",
",",
"value",
"in",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'annotatortype'",
":",
"if",
"value",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"'manual'",
"elif",
"value",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"'auto'",
"elif",
"key",
"==",
"'datetime'",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"value",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%S\"",
")",
"#proper iso-formatting",
"elif",
"value",
":",
"jsonnode",
"[",
"key",
"]",
"=",
"value",
"if",
"label",
":",
"l",
".",
"append",
"(",
"jsonnode",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid annotation type\"",
")",
"return",
"l"
] | Return all declarations in a form ready to be serialised to JSON.
Returns:
list of dict | [
"Return",
"all",
"declarations",
"in",
"a",
"form",
"ready",
"to",
"be",
"serialised",
"to",
"JSON",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6692-L6731 | -1 |
||||||
42 | proycon/pynlpl | pynlpl/formats/folia.py | Document.xml | def xml(self):
"""Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
"""
self.pendingvalidation()
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#if self.version:
# attribs['version'] = self.version
#else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
metadataattribs['{' + NSFOLIA + '}type'] = self.metadatatype
if isinstance(self.metadata, ExternalMetaData):
metadataattribs['{' + NSFOLIA + '}src'] = self.metadata.url
e = E.FoLiA(
E.metadata(
E.annotations(
*self.xmldeclarations()
),
*self.xmlmetadata(),
**metadataattribs
)
, **attribs)
for text in self.data:
e.append(text.xml())
return e | python | def xml(self):
"""Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
"""
self.pendingvalidation()
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={'xml' : "http://www.w3.org/XML/1998/namespace", 'xlink':"http://www.w3.org/1999/xlink"})
attribs = {}
attribs['{http://www.w3.org/XML/1998/namespace}id'] = self.id
#if self.version:
# attribs['version'] = self.version
#else:
attribs['version'] = FOLIAVERSION
attribs['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
metadataattribs = {}
metadataattribs['{' + NSFOLIA + '}type'] = self.metadatatype
if isinstance(self.metadata, ExternalMetaData):
metadataattribs['{' + NSFOLIA + '}src'] = self.metadata.url
e = E.FoLiA(
E.metadata(
E.annotations(
*self.xmldeclarations()
),
*self.xmlmetadata(),
**metadataattribs
)
, **attribs)
for text in self.data:
e.append(text.xml())
return e | [
"def",
"xml",
"(",
"self",
")",
":",
"self",
".",
"pendingvalidation",
"(",
")",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
",",
"'xlink'",
":",
"\"http://www.w3.org/1999/xlink\"",
"}",
")",
"attribs",
"=",
"{",
"}",
"attribs",
"[",
"'{http://www.w3.org/XML/1998/namespace}id'",
"]",
"=",
"self",
".",
"id",
"#if self.version:",
"# attribs['version'] = self.version",
"#else:",
"attribs",
"[",
"'version'",
"]",
"=",
"FOLIAVERSION",
"attribs",
"[",
"'generator'",
"]",
"=",
"'pynlpl.formats.folia-v'",
"+",
"LIBVERSION",
"metadataattribs",
"=",
"{",
"}",
"metadataattribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}type'",
"]",
"=",
"self",
".",
"metadatatype",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"ExternalMetaData",
")",
":",
"metadataattribs",
"[",
"'{'",
"+",
"NSFOLIA",
"+",
"'}src'",
"]",
"=",
"self",
".",
"metadata",
".",
"url",
"e",
"=",
"E",
".",
"FoLiA",
"(",
"E",
".",
"metadata",
"(",
"E",
".",
"annotations",
"(",
"*",
"self",
".",
"xmldeclarations",
"(",
")",
")",
",",
"*",
"self",
".",
"xmlmetadata",
"(",
")",
",",
"*",
"*",
"metadataattribs",
")",
",",
"*",
"*",
"attribs",
")",
"for",
"text",
"in",
"self",
".",
"data",
":",
"e",
".",
"append",
"(",
"text",
".",
"xml",
"(",
")",
")",
"return",
"e"
] | Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring` | [
"Serialise",
"the",
"document",
"to",
"XML",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6733-L6773 | -1 |
||||||
43 | proycon/pynlpl | pynlpl/formats/folia.py | Document.json | def json(self):
"""Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json())
"""
self.pendingvalidation()
jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() }
if self.version:
jsondoc['version'] = self.version
else:
jsondoc['version'] = FOLIAVERSION
jsondoc['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
for text in self.data:
jsondoc['children'].append(text.json())
return jsondoc | python | def json(self):
"""Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json())
"""
self.pendingvalidation()
jsondoc = {'id': self.id, 'children': [], 'declarations': self.jsondeclarations() }
if self.version:
jsondoc['version'] = self.version
else:
jsondoc['version'] = FOLIAVERSION
jsondoc['generator'] = 'pynlpl.formats.folia-v' + LIBVERSION
for text in self.data:
jsondoc['children'].append(text.json())
return jsondoc | [
"def",
"json",
"(",
"self",
")",
":",
"self",
".",
"pendingvalidation",
"(",
")",
"jsondoc",
"=",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'children'",
":",
"[",
"]",
",",
"'declarations'",
":",
"self",
".",
"jsondeclarations",
"(",
")",
"}",
"if",
"self",
".",
"version",
":",
"jsondoc",
"[",
"'version'",
"]",
"=",
"self",
".",
"version",
"else",
":",
"jsondoc",
"[",
"'version'",
"]",
"=",
"FOLIAVERSION",
"jsondoc",
"[",
"'generator'",
"]",
"=",
"'pynlpl.formats.folia-v'",
"+",
"LIBVERSION",
"for",
"text",
"in",
"self",
".",
"data",
":",
"jsondoc",
"[",
"'children'",
"]",
".",
"append",
"(",
"text",
".",
"json",
"(",
")",
")",
"return",
"jsondoc"
] | Serialise the document to a ``dict`` ready for serialisation to JSON.
Example::
import json
jsondoc = json.dumps(doc.json()) | [
"Serialise",
"the",
"document",
"to",
"a",
"dict",
"ready",
"for",
"serialisation",
"to",
"JSON",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6775-L6794 | -1 |
||||||
44 | proycon/pynlpl | pynlpl/formats/folia.py | Document.xmlmetadata | def xmlmetadata(self):
"""Internal method to serialize metadata to XML"""
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
elements = []
if self.metadatatype == "native":
if isinstance(self.metadata, NativeMetaData):
for key, value in self.metadata.items():
elements.append(E.meta(value,id=key) )
else:
if isinstance(self.metadata, ForeignData):
#in-document
m = self.metadata
while m is not None:
elements.append(m.xml())
m = m.next
for metadata_id, submetadata in self.submetadata.items():
subelements = []
attribs = {
"{http://www.w3.org/XML/1998/namespace}id": metadata_id,
"type": self.submetadatatype[metadata_id] }
if isinstance(submetadata, NativeMetaData):
for key, value in submetadata.items():
subelements.append(E.meta(value,id=key) )
elif isinstance(submetadata, ExternalMetaData):
attribs['src'] = submetadata.url
elif isinstance(submetadata, ForeignData):
#in-document
m = submetadata
while m is not None:
subelements.append(m.xml())
m = m.next
elements.append( E.submetadata(*subelements, **attribs))
return elements | python | def xmlmetadata(self):
"""Internal method to serialize metadata to XML"""
E = ElementMaker(namespace="http://ilk.uvt.nl/folia",nsmap={None: "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace"})
elements = []
if self.metadatatype == "native":
if isinstance(self.metadata, NativeMetaData):
for key, value in self.metadata.items():
elements.append(E.meta(value,id=key) )
else:
if isinstance(self.metadata, ForeignData):
#in-document
m = self.metadata
while m is not None:
elements.append(m.xml())
m = m.next
for metadata_id, submetadata in self.submetadata.items():
subelements = []
attribs = {
"{http://www.w3.org/XML/1998/namespace}id": metadata_id,
"type": self.submetadatatype[metadata_id] }
if isinstance(submetadata, NativeMetaData):
for key, value in submetadata.items():
subelements.append(E.meta(value,id=key) )
elif isinstance(submetadata, ExternalMetaData):
attribs['src'] = submetadata.url
elif isinstance(submetadata, ForeignData):
#in-document
m = submetadata
while m is not None:
subelements.append(m.xml())
m = m.next
elements.append( E.submetadata(*subelements, **attribs))
return elements | [
"def",
"xmlmetadata",
"(",
"self",
")",
":",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"\"http://ilk.uvt.nl/folia\"",
",",
"nsmap",
"=",
"{",
"None",
":",
"\"http://ilk.uvt.nl/folia\"",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"elements",
"=",
"[",
"]",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"NativeMetaData",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"metadata",
".",
"items",
"(",
")",
":",
"elements",
".",
"append",
"(",
"E",
".",
"meta",
"(",
"value",
",",
"id",
"=",
"key",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"self",
".",
"metadata",
",",
"ForeignData",
")",
":",
"#in-document",
"m",
"=",
"self",
".",
"metadata",
"while",
"m",
"is",
"not",
"None",
":",
"elements",
".",
"append",
"(",
"m",
".",
"xml",
"(",
")",
")",
"m",
"=",
"m",
".",
"next",
"for",
"metadata_id",
",",
"submetadata",
"in",
"self",
".",
"submetadata",
".",
"items",
"(",
")",
":",
"subelements",
"=",
"[",
"]",
"attribs",
"=",
"{",
"\"{http://www.w3.org/XML/1998/namespace}id\"",
":",
"metadata_id",
",",
"\"type\"",
":",
"self",
".",
"submetadatatype",
"[",
"metadata_id",
"]",
"}",
"if",
"isinstance",
"(",
"submetadata",
",",
"NativeMetaData",
")",
":",
"for",
"key",
",",
"value",
"in",
"submetadata",
".",
"items",
"(",
")",
":",
"subelements",
".",
"append",
"(",
"E",
".",
"meta",
"(",
"value",
",",
"id",
"=",
"key",
")",
")",
"elif",
"isinstance",
"(",
"submetadata",
",",
"ExternalMetaData",
")",
":",
"attribs",
"[",
"'src'",
"]",
"=",
"submetadata",
".",
"url",
"elif",
"isinstance",
"(",
"submetadata",
",",
"ForeignData",
")",
":",
"#in-document",
"m",
"=",
"submetadata",
"while",
"m",
"is",
"not",
"None",
":",
"subelements",
".",
"append",
"(",
"m",
".",
"xml",
"(",
")",
")",
"m",
"=",
"m",
".",
"next",
"elements",
".",
"append",
"(",
"E",
".",
"submetadata",
"(",
"*",
"subelements",
",",
"*",
"*",
"attribs",
")",
")",
"return",
"elements"
] | Internal method to serialize metadata to XML | [
"Internal",
"method",
"to",
"serialize",
"metadata",
"to",
"XML"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6796-L6828 | -1 |
||||||
45 | proycon/pynlpl | pynlpl/formats/folia.py | Document.declare | def declare(self, annotationtype, set, **kwargs):
"""Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO)
"""
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))):
raise ValueError("Set parameter for declare() must be a string")
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if annotationtype in self.alias_set and set in self.alias_set[annotationtype]:
raise ValueError("Set " + set + " conflicts with alias, may not be equal!")
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if set and self.loadsetdefinitions and not set in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
if not annotationtype in self.annotationdefaults:
self.annotationdefaults[annotationtype] = {}
self.annotationdefaults[annotationtype][set] = kwargs
if 'alias' in kwargs:
if annotationtype in self.set_alias and set in self.set_alias[annotationtype] and self.set_alias[annotationtype][set] != kwargs['alias']:
raise ValueError("Redeclaring set " + set + " with another alias ('"+kwargs['alias']+"') is not allowed!")
if annotationtype in self.alias_set and kwargs['alias'] in self.alias_set[annotationtype] and self.alias_set[annotationtype][kwargs['alias']] != set:
raise ValueError("Redeclaring alias " + kwargs['alias'] + " with another set ('"+set+"') is not allowed!")
if annotationtype in self.set_alias and kwargs['alias'] in self.set_alias[annotationtype]:
raise ValueError("Alias " + kwargs['alias'] + " conflicts with set name, may not be equal!")
if annotationtype not in self.alias_set:
self.alias_set[annotationtype] = {}
if annotationtype not in self.set_alias:
self.set_alias[annotationtype] = {}
self.alias_set[annotationtype][kwargs['alias']] = set
self.set_alias[annotationtype][set] = kwargs['alias'] | python | def declare(self, annotationtype, set, **kwargs):
"""Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO)
"""
if (sys.version > '3' and not isinstance(set,str)) or (sys.version < '3' and not isinstance(set,(str,unicode))):
raise ValueError("Set parameter for declare() must be a string")
if inspect.isclass(annotationtype):
annotationtype = annotationtype.ANNOTATIONTYPE
if annotationtype in self.alias_set and set in self.alias_set[annotationtype]:
raise ValueError("Set " + set + " conflicts with alias, may not be equal!")
if not (annotationtype, set) in self.annotations:
self.annotations.append( (annotationtype,set) )
if set and self.loadsetdefinitions and not set in self.setdefinitions:
if set[:7] == "http://" or set[:8] == "https://" or set[:6] == "ftp://":
self.setdefinitions[set] = SetDefinition(set,verbose=self.verbose) #will raise exception on error
if not annotationtype in self.annotationdefaults:
self.annotationdefaults[annotationtype] = {}
self.annotationdefaults[annotationtype][set] = kwargs
if 'alias' in kwargs:
if annotationtype in self.set_alias and set in self.set_alias[annotationtype] and self.set_alias[annotationtype][set] != kwargs['alias']:
raise ValueError("Redeclaring set " + set + " with another alias ('"+kwargs['alias']+"') is not allowed!")
if annotationtype in self.alias_set and kwargs['alias'] in self.alias_set[annotationtype] and self.alias_set[annotationtype][kwargs['alias']] != set:
raise ValueError("Redeclaring alias " + kwargs['alias'] + " with another set ('"+set+"') is not allowed!")
if annotationtype in self.set_alias and kwargs['alias'] in self.set_alias[annotationtype]:
raise ValueError("Alias " + kwargs['alias'] + " conflicts with set name, may not be equal!")
if annotationtype not in self.alias_set:
self.alias_set[annotationtype] = {}
if annotationtype not in self.set_alias:
self.set_alias[annotationtype] = {}
self.alias_set[annotationtype][kwargs['alias']] = set
self.set_alias[annotationtype][set] = kwargs['alias'] | [
"def",
"declare",
"(",
"self",
",",
"annotationtype",
",",
"set",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"sys",
".",
"version",
">",
"'3'",
"and",
"not",
"isinstance",
"(",
"set",
",",
"str",
")",
")",
"or",
"(",
"sys",
".",
"version",
"<",
"'3'",
"and",
"not",
"isinstance",
"(",
"set",
",",
"(",
"str",
",",
"unicode",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Set parameter for declare() must be a string\"",
")",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"if",
"annotationtype",
"in",
"self",
".",
"alias_set",
"and",
"set",
"in",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
":",
"raise",
"ValueError",
"(",
"\"Set \"",
"+",
"set",
"+",
"\" conflicts with alias, may not be equal!\"",
")",
"if",
"not",
"(",
"annotationtype",
",",
"set",
")",
"in",
"self",
".",
"annotations",
":",
"self",
".",
"annotations",
".",
"append",
"(",
"(",
"annotationtype",
",",
"set",
")",
")",
"if",
"set",
"and",
"self",
".",
"loadsetdefinitions",
"and",
"not",
"set",
"in",
"self",
".",
"setdefinitions",
":",
"if",
"set",
"[",
":",
"7",
"]",
"==",
"\"http://\"",
"or",
"set",
"[",
":",
"8",
"]",
"==",
"\"https://\"",
"or",
"set",
"[",
":",
"6",
"]",
"==",
"\"ftp://\"",
":",
"self",
".",
"setdefinitions",
"[",
"set",
"]",
"=",
"SetDefinition",
"(",
"set",
",",
"verbose",
"=",
"self",
".",
"verbose",
")",
"#will raise exception on error",
"if",
"not",
"annotationtype",
"in",
"self",
".",
"annotationdefaults",
":",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"=",
"kwargs",
"if",
"'alias'",
"in",
"kwargs",
":",
"if",
"annotationtype",
"in",
"self",
".",
"set_alias",
"and",
"set",
"in",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"and",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"!=",
"kwargs",
"[",
"'alias'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Redeclaring set \"",
"+",
"set",
"+",
"\" with another alias ('\"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\"') is not allowed!\"",
")",
"if",
"annotationtype",
"in",
"self",
".",
"alias_set",
"and",
"kwargs",
"[",
"'alias'",
"]",
"in",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"and",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"[",
"kwargs",
"[",
"'alias'",
"]",
"]",
"!=",
"set",
":",
"raise",
"ValueError",
"(",
"\"Redeclaring alias \"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\" with another set ('\"",
"+",
"set",
"+",
"\"') is not allowed!\"",
")",
"if",
"annotationtype",
"in",
"self",
".",
"set_alias",
"and",
"kwargs",
"[",
"'alias'",
"]",
"in",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
":",
"raise",
"ValueError",
"(",
"\"Alias \"",
"+",
"kwargs",
"[",
"'alias'",
"]",
"+",
"\" conflicts with set name, may not be equal!\"",
")",
"if",
"annotationtype",
"not",
"in",
"self",
".",
"alias_set",
":",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"if",
"annotationtype",
"not",
"in",
"self",
".",
"set_alias",
":",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"=",
"{",
"}",
"self",
".",
"alias_set",
"[",
"annotationtype",
"]",
"[",
"kwargs",
"[",
"'alias'",
"]",
"]",
"=",
"set",
"self",
".",
"set_alias",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"=",
"kwargs",
"[",
"'alias'",
"]"
] | Declare a new annotation type to be used in the document.
Keyword arguments can be used to set defaults for any annotation of this type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Keyword Arguments:
annotator (str): Sets a default annotator
annotatortype: Should be either ``AnnotatorType.MANUAL`` or ``AnnotatorType.AUTO``, indicating whether the annotation was performed manually or by an automated process.
datetime (datetime.datetime): Sets the default datetime
alias (str): Defines alias that may be used in set attribute of elements instead of the full set name
Example::
doc.declare(folia.PosAnnotation, 'http://some/path/brown-tag-set', annotator="mytagger", annotatortype=folia.AnnotatorType.AUTO) | [
"Declare",
"a",
"new",
"annotation",
"type",
"to",
"be",
"used",
"in",
"the",
"document",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6972-L7018 | -1 |
||||||
46 | proycon/pynlpl | pynlpl/formats/folia.py | Document.defaultset | def defaultset(self, annotationtype):
"""Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
try:
return list(self.annotationdefaults[annotationtype].keys())[0]
except KeyError:
raise NoDefaultError
except IndexError:
raise NoDefaultError | python | def defaultset(self, annotationtype):
"""Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
try:
return list(self.annotationdefaults[annotationtype].keys())[0]
except KeyError:
raise NoDefaultError
except IndexError:
raise NoDefaultError | [
"def",
"defaultset",
"(",
"self",
",",
"annotationtype",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
"or",
"isinstance",
"(",
"annotationtype",
",",
"AbstractElement",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"try",
":",
"return",
"list",
"(",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"except",
"KeyError",
":",
"raise",
"NoDefaultError",
"except",
"IndexError",
":",
"raise",
"NoDefaultError"
] | Obtain the default set for the specified annotation type.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type) | [
"Obtain",
"the",
"default",
"set",
"for",
"the",
"specified",
"annotation",
"type",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7039-L7058 | -1 |
||||||
47 | proycon/pynlpl | pynlpl/formats/folia.py | Document.defaultannotator | def defaultannotator(self, annotationtype, set=None):
"""Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError | python | def defaultannotator(self, annotationtype, set=None):
"""Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type)
"""
if inspect.isclass(annotationtype) or isinstance(annotationtype,AbstractElement): annotationtype = annotationtype.ANNOTATIONTYPE
if not set: set = self.defaultset(annotationtype)
try:
return self.annotationdefaults[annotationtype][set]['annotator']
except KeyError:
raise NoDefaultError | [
"def",
"defaultannotator",
"(",
"self",
",",
"annotationtype",
",",
"set",
"=",
"None",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"annotationtype",
")",
"or",
"isinstance",
"(",
"annotationtype",
",",
"AbstractElement",
")",
":",
"annotationtype",
"=",
"annotationtype",
".",
"ANNOTATIONTYPE",
"if",
"not",
"set",
":",
"set",
"=",
"self",
".",
"defaultset",
"(",
"annotationtype",
")",
"try",
":",
"return",
"self",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"set",
"]",
"[",
"'annotator'",
"]",
"except",
"KeyError",
":",
"raise",
"NoDefaultError"
] | Obtain the default annotator for the specified annotation type and set.
Arguments:
annotationtype: The type of annotation, this is conveyed by passing the corresponding annototion class (such as :class:`PosAnnotation` for example), or a member of :class:`AnnotationType`, such as ``AnnotationType.POS``.
set (str): the set, should formally be a URL pointing to the set definition
Returns:
the set (str)
Raises:
:class:`NoDefaultError` if the annotation type does not exist or if there is ambiguity (multiple sets for the same type) | [
"Obtain",
"the",
"default",
"annotator",
"for",
"the",
"specified",
"annotation",
"type",
"and",
"set",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7061-L7080 | -1 |
||||||
48 | proycon/pynlpl | pynlpl/formats/folia.py | Document.parsemetadata | def parsemetadata(self, node):
"""Internal method to parse metadata"""
if 'type' in node.attrib:
self.metadatatype = node.attrib['type']
else:
#no type specified, default to native
self.metadatatype = "native"
if 'src' in node.attrib:
self.metadata = ExternalMetaData(node.attrib['src'])
elif self.metadatatype == "native":
self.metadata = NativeMetaData()
else:
self.metadata = None #may be set below to ForeignData
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
elif subnode.tag == '{' + NSFOLIA + '}meta':
if self.metadatatype == "native":
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
else:
raise MetaDataError("Encountered a meta element but metadata type is not native!")
elif subnode.tag == '{' + NSFOLIA + '}provenance':
#forward compatibility with FoLiA 2.0; ignore provenance
print("WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!",file=sys.stderr)
pass
elif subnode.tag == '{' + NSFOLIA + '}foreign-data':
if self.metadatatype == "native":
raise MetaDataError("Encountered a foreign-data element but metadata type is native!")
elif self.metadata is not None:
#multiple foreign-data elements, chain:
e = self.metadata
while e.next is not None:
e = e.next
e.next = ForeignData(self, node=subnode)
else:
self.metadata = ForeignData(self, node=subnode)
elif subnode.tag == '{' + NSFOLIA + '}submetadata':
self.parsesubmetadata(subnode)
elif subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT': #backward-compatibility for old IMDI without foreign-key
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
self.metadatatype = "imdi"
self.metadata = ForeignData(self, node=subnode) | python | def parsemetadata(self, node):
"""Internal method to parse metadata"""
if 'type' in node.attrib:
self.metadatatype = node.attrib['type']
else:
#no type specified, default to native
self.metadatatype = "native"
if 'src' in node.attrib:
self.metadata = ExternalMetaData(node.attrib['src'])
elif self.metadatatype == "native":
self.metadata = NativeMetaData()
else:
self.metadata = None #may be set below to ForeignData
for subnode in node:
if subnode.tag == '{' + NSFOLIA + '}annotations':
self.parsexmldeclarations(subnode)
elif subnode.tag == '{' + NSFOLIA + '}meta':
if self.metadatatype == "native":
if subnode.text:
self.metadata[subnode.attrib['id']] = subnode.text
else:
raise MetaDataError("Encountered a meta element but metadata type is not native!")
elif subnode.tag == '{' + NSFOLIA + '}provenance':
#forward compatibility with FoLiA 2.0; ignore provenance
print("WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!",file=sys.stderr)
pass
elif subnode.tag == '{' + NSFOLIA + '}foreign-data':
if self.metadatatype == "native":
raise MetaDataError("Encountered a foreign-data element but metadata type is native!")
elif self.metadata is not None:
#multiple foreign-data elements, chain:
e = self.metadata
while e.next is not None:
e = e.next
e.next = ForeignData(self, node=subnode)
else:
self.metadata = ForeignData(self, node=subnode)
elif subnode.tag == '{' + NSFOLIA + '}submetadata':
self.parsesubmetadata(subnode)
elif subnode.tag == '{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT': #backward-compatibility for old IMDI without foreign-key
E = ElementMaker(namespace=NSFOLIA,nsmap={None: NSFOLIA, 'xml' : "http://www.w3.org/XML/1998/namespace"})
self.metadatatype = "imdi"
self.metadata = ForeignData(self, node=subnode) | [
"def",
"parsemetadata",
"(",
"self",
",",
"node",
")",
":",
"if",
"'type'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"metadatatype",
"=",
"node",
".",
"attrib",
"[",
"'type'",
"]",
"else",
":",
"#no type specified, default to native",
"self",
".",
"metadatatype",
"=",
"\"native\"",
"if",
"'src'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"metadata",
"=",
"ExternalMetaData",
"(",
"node",
".",
"attrib",
"[",
"'src'",
"]",
")",
"elif",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"self",
".",
"metadata",
"=",
"NativeMetaData",
"(",
")",
"else",
":",
"self",
".",
"metadata",
"=",
"None",
"#may be set below to ForeignData",
"for",
"subnode",
"in",
"node",
":",
"if",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}annotations'",
":",
"self",
".",
"parsexmldeclarations",
"(",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}meta'",
":",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"if",
"subnode",
".",
"text",
":",
"self",
".",
"metadata",
"[",
"subnode",
".",
"attrib",
"[",
"'id'",
"]",
"]",
"=",
"subnode",
".",
"text",
"else",
":",
"raise",
"MetaDataError",
"(",
"\"Encountered a meta element but metadata type is not native!\"",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}provenance'",
":",
"#forward compatibility with FoLiA 2.0; ignore provenance",
"print",
"(",
"\"WARNING: Ignoring provenance data. Use foliapy instead of pynlpl.formats.folia for FoLiA v2.0 compatibility!\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"pass",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}foreign-data'",
":",
"if",
"self",
".",
"metadatatype",
"==",
"\"native\"",
":",
"raise",
"MetaDataError",
"(",
"\"Encountered a foreign-data element but metadata type is native!\"",
")",
"elif",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"#multiple foreign-data elements, chain:",
"e",
"=",
"self",
".",
"metadata",
"while",
"e",
".",
"next",
"is",
"not",
"None",
":",
"e",
"=",
"e",
".",
"next",
"e",
".",
"next",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")",
"else",
":",
"self",
".",
"metadata",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{'",
"+",
"NSFOLIA",
"+",
"'}submetadata'",
":",
"self",
".",
"parsesubmetadata",
"(",
"subnode",
")",
"elif",
"subnode",
".",
"tag",
"==",
"'{http://www.mpi.nl/IMDI/Schema/IMDI}METATRANSCRIPT'",
":",
"#backward-compatibility for old IMDI without foreign-key",
"E",
"=",
"ElementMaker",
"(",
"namespace",
"=",
"NSFOLIA",
",",
"nsmap",
"=",
"{",
"None",
":",
"NSFOLIA",
",",
"'xml'",
":",
"\"http://www.w3.org/XML/1998/namespace\"",
"}",
")",
"self",
".",
"metadatatype",
"=",
"\"imdi\"",
"self",
".",
"metadata",
"=",
"ForeignData",
"(",
"self",
",",
"node",
"=",
"subnode",
")"
] | Internal method to parse metadata | [
"Internal",
"method",
"to",
"parse",
"metadata"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7216-L7261 | -1 |
||||||
49 | proycon/pynlpl | pynlpl/formats/folia.py | Document.pendingvalidation | def pendingvalidation(self, warnonly=None):
"""Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr)
if warnonly is None and self and self.version:
warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
if self.textvalidation:
while self.offsetvalidationbuffer:
structureelement, textclass = self.offsetvalidationbuffer.pop()
if self.debug: print("[PyNLPl FoLiA DEBUG] Performing offset validation on " + repr(structureelement) + " textclass " + textclass,file=stderr)
#validate offsets
tc = structureelement.textcontent(textclass)
if tc.offset is not None:
try:
tc.getreference(validate=True)
except UnresolvableTextContent:
msg = "Text for " + structureelement.__class__.__name__ + ", ID " + str(structureelement.id) + ", textclass " + textclass + ", has incorrect offset " + str(tc.offset) + " or invalid reference"
print("TEXT VALIDATION ERROR: " + msg,file=sys.stderr)
if not warnonly:
raise | python | def pendingvalidation(self, warnonly=None):
"""Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr)
if warnonly is None and self and self.version:
warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
if self.textvalidation:
while self.offsetvalidationbuffer:
structureelement, textclass = self.offsetvalidationbuffer.pop()
if self.debug: print("[PyNLPl FoLiA DEBUG] Performing offset validation on " + repr(structureelement) + " textclass " + textclass,file=stderr)
#validate offsets
tc = structureelement.textcontent(textclass)
if tc.offset is not None:
try:
tc.getreference(validate=True)
except UnresolvableTextContent:
msg = "Text for " + structureelement.__class__.__name__ + ", ID " + str(structureelement.id) + ", textclass " + textclass + ", has incorrect offset " + str(tc.offset) + " or invalid reference"
print("TEXT VALIDATION ERROR: " + msg,file=sys.stderr)
if not warnonly:
raise | [
"def",
"pendingvalidation",
"(",
"self",
",",
"warnonly",
"=",
"None",
")",
":",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Processing pending validations (if any)\"",
",",
"file",
"=",
"stderr",
")",
"if",
"warnonly",
"is",
"None",
"and",
"self",
"and",
"self",
".",
"version",
":",
"warnonly",
"=",
"(",
"checkversion",
"(",
"self",
".",
"version",
",",
"'1.5.0'",
")",
"<",
"0",
")",
"#warn only for documents older than FoLiA v1.5",
"if",
"self",
".",
"textvalidation",
":",
"while",
"self",
".",
"offsetvalidationbuffer",
":",
"structureelement",
",",
"textclass",
"=",
"self",
".",
"offsetvalidationbuffer",
".",
"pop",
"(",
")",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Performing offset validation on \"",
"+",
"repr",
"(",
"structureelement",
")",
"+",
"\" textclass \"",
"+",
"textclass",
",",
"file",
"=",
"stderr",
")",
"#validate offsets",
"tc",
"=",
"structureelement",
".",
"textcontent",
"(",
"textclass",
")",
"if",
"tc",
".",
"offset",
"is",
"not",
"None",
":",
"try",
":",
"tc",
".",
"getreference",
"(",
"validate",
"=",
"True",
")",
"except",
"UnresolvableTextContent",
":",
"msg",
"=",
"\"Text for \"",
"+",
"structureelement",
".",
"__class__",
".",
"__name__",
"+",
"\", ID \"",
"+",
"str",
"(",
"structureelement",
".",
"id",
")",
"+",
"\", textclass \"",
"+",
"textclass",
"+",
"\", has incorrect offset \"",
"+",
"str",
"(",
"tc",
".",
"offset",
")",
"+",
"\" or invalid reference\"",
"print",
"(",
"\"TEXT VALIDATION ERROR: \"",
"+",
"msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"not",
"warnonly",
":",
"raise"
] | Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool | [
"Perform",
"any",
"pending",
"validations"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7396-L7424 | -1 |
||||||
50 | proycon/pynlpl | pynlpl/formats/folia.py | Document.paragraphs | def paragraphs(self, index = None):
"""Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
return self.select(Paragraph)
else:
if index < 0:
index = sum(t.count(Paragraph) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Paragraph)) :
if i == index:
return e
raise IndexError | python | def paragraphs(self, index = None):
"""Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0)"""
if index is None:
return self.select(Paragraph)
else:
if index < 0:
index = sum(t.count(Paragraph) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Paragraph)) :
if i == index:
return e
raise IndexError | [
"def",
"paragraphs",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"return",
"self",
".",
"select",
"(",
"Paragraph",
")",
"else",
":",
"if",
"index",
"<",
"0",
":",
"index",
"=",
"sum",
"(",
"t",
".",
"count",
"(",
"Paragraph",
")",
"for",
"t",
"in",
"self",
".",
"data",
")",
"+",
"index",
"for",
"t",
"in",
"self",
".",
"data",
":",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"t",
".",
"select",
"(",
"Paragraph",
")",
")",
":",
"if",
"i",
"==",
"index",
":",
"return",
"e",
"raise",
"IndexError"
] | Return a generator of all paragraphs found in the document.
If an index is specified, return the n'th paragraph only (starting at 0) | [
"Return",
"a",
"generator",
"of",
"all",
"paragraphs",
"found",
"in",
"the",
"document",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7445-L7458 | -1 |
||||||
51 | proycon/pynlpl | pynlpl/formats/folia.py | Document.sentences | def sentences(self, index = None):
"""Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return self.select(Sentence,None,True,[Quote])
else:
if index < 0:
index = sum(t.count(Sentence,None,True,[Quote]) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Sentence,None,True,[Quote])) :
if i == index:
return e
raise IndexError | python | def sentences(self, index = None):
"""Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0)"""
if index is None:
return self.select(Sentence,None,True,[Quote])
else:
if index < 0:
index = sum(t.count(Sentence,None,True,[Quote]) for t in self.data) + index
for t in self.data:
for i,e in enumerate(t.select(Sentence,None,True,[Quote])) :
if i == index:
return e
raise IndexError | [
"def",
"sentences",
"(",
"self",
",",
"index",
"=",
"None",
")",
":",
"if",
"index",
"is",
"None",
":",
"return",
"self",
".",
"select",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
"else",
":",
"if",
"index",
"<",
"0",
":",
"index",
"=",
"sum",
"(",
"t",
".",
"count",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
"for",
"t",
"in",
"self",
".",
"data",
")",
"+",
"index",
"for",
"t",
"in",
"self",
".",
"data",
":",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"t",
".",
"select",
"(",
"Sentence",
",",
"None",
",",
"True",
",",
"[",
"Quote",
"]",
")",
")",
":",
"if",
"i",
"==",
"index",
":",
"return",
"e",
"raise",
"IndexError"
] | Return a generator of all sentence found in the document. Except for sentences in quotes.
If an index is specified, return the n'th sentence only (starting at 0) | [
"Return",
"a",
"generator",
"of",
"all",
"sentence",
"found",
"in",
"the",
"document",
".",
"Except",
"for",
"sentences",
"in",
"quotes",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L7460-L7473 | -1 |
||||||
52 | proycon/pynlpl | pynlpl/fsa.py | NFA._states | def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value
"""Iterate over all states in no particular order"""
processedstates.append(state)
for nextstate in state.epsilon:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
for _, nextstate in state.transitions:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
return processedstates | python | def _states(self, state, processedstates=[]): #pylint: disable=dangerous-default-value
"""Iterate over all states in no particular order"""
processedstates.append(state)
for nextstate in state.epsilon:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
for _, nextstate in state.transitions:
if not nextstate in processedstates:
self._states(nextstate, processedstates)
return processedstates | [
"def",
"_states",
"(",
"self",
",",
"state",
",",
"processedstates",
"=",
"[",
"]",
")",
":",
"#pylint: disable=dangerous-default-value",
"processedstates",
".",
"append",
"(",
"state",
")",
"for",
"nextstate",
"in",
"state",
".",
"epsilon",
":",
"if",
"not",
"nextstate",
"in",
"processedstates",
":",
"self",
".",
"_states",
"(",
"nextstate",
",",
"processedstates",
")",
"for",
"_",
",",
"nextstate",
"in",
"state",
".",
"transitions",
":",
"if",
"not",
"nextstate",
"in",
"processedstates",
":",
"self",
".",
"_states",
"(",
"nextstate",
",",
"processedstates",
")",
"return",
"processedstates"
] | Iterate over all states in no particular order | [
"Iterate",
"over",
"all",
"states",
"in",
"no",
"particular",
"order"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/fsa.py#L97-L109 | -1 |
||||||
53 | proycon/pynlpl | pynlpl/common.py | log | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in kwargs:
s += "[" + system + "] "
if 'indent' in kwargs:
s += ("\t" * int(kwargs['indent']))
s += u(msg)
if s[-1] != '\n':
s += '\n'
if 'streams' in kwargs:
streams = kwargs['streams']
elif 'stream' in kwargs:
streams = [kwargs['stream']]
else:
streams = [stderr]
for stream in streams:
stream.write(s)
return s | python | def log(msg, **kwargs):
"""Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams)
"""
if 'debug' in kwargs:
if 'currentdebug' in kwargs:
if kwargs['currentdebug'] < kwargs['debug']:
return False
else:
return False #no currentdebug passed, assuming no debug mode and thus skipping message
s = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
if 'system' in kwargs:
s += "[" + system + "] "
if 'indent' in kwargs:
s += ("\t" * int(kwargs['indent']))
s += u(msg)
if s[-1] != '\n':
s += '\n'
if 'streams' in kwargs:
streams = kwargs['streams']
elif 'stream' in kwargs:
streams = [kwargs['stream']]
else:
streams = [stderr]
for stream in streams:
stream.write(s)
return s | [
"def",
"log",
"(",
"msg",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'debug'",
"in",
"kwargs",
":",
"if",
"'currentdebug'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'currentdebug'",
"]",
"<",
"kwargs",
"[",
"'debug'",
"]",
":",
"return",
"False",
"else",
":",
"return",
"False",
"#no currentdebug passed, assuming no debug mode and thus skipping message",
"s",
"=",
"\"[\"",
"+",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"+",
"\"] \"",
"if",
"'system'",
"in",
"kwargs",
":",
"s",
"+=",
"\"[\"",
"+",
"system",
"+",
"\"] \"",
"if",
"'indent'",
"in",
"kwargs",
":",
"s",
"+=",
"(",
"\"\\t\"",
"*",
"int",
"(",
"kwargs",
"[",
"'indent'",
"]",
")",
")",
"s",
"+=",
"u",
"(",
"msg",
")",
"if",
"s",
"[",
"-",
"1",
"]",
"!=",
"'\\n'",
":",
"s",
"+=",
"'\\n'",
"if",
"'streams'",
"in",
"kwargs",
":",
"streams",
"=",
"kwargs",
"[",
"'streams'",
"]",
"elif",
"'stream'",
"in",
"kwargs",
":",
"streams",
"=",
"[",
"kwargs",
"[",
"'stream'",
"]",
"]",
"else",
":",
"streams",
"=",
"[",
"stderr",
"]",
"for",
"stream",
"in",
"streams",
":",
"stream",
".",
"write",
"(",
"s",
")",
"return",
"s"
] | Generic log method. Will prepend timestamp.
Keyword arguments:
system - Name of the system/module
indent - Integer denoting the desired level of indentation
streams - List of streams to output to
stream - Stream to output to (singleton version of streams) | [
"Generic",
"log",
"method",
".",
"Will",
"prepend",
"timestamp",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/common.py#L98-L136 | -1 |
||||||
54 | proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_syn_ids_by_lemma | def get_syn_ids_by_lemma(self, lemma):
"""Returns a list of synset IDs based on a lemma"""
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path )
query_opt = "dict_search"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: query_opt: %s" % query_opt )
qdict = {}
qdict[ "action" ] = "queryList"
qdict[ "word" ] = lemma.encode('utf-8')
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) )
dict_list = []
dict_list = eval( content ) # string to list
synsets = []
items = len( dict_list )
if self.debug:
printf( "items: %d" % items )
# syn dict: like lu dict, but without pos: part-of-speech
for dict in dict_list:
if self.debug:
printf( dict )
seq_nr = dict[ "seq_nr" ] # sense number
value = dict[ "value" ] # lexical unit identifier
form = dict[ "form" ] # lemma
label = dict[ "label" ] # label to be shown
if self.debug:
printf( "seq_nr: %s" % seq_nr )
printf( "value: %s" % value )
printf( "form: %s" % form )
printf( "label: %s" % label )
if value != "":
synsets.append( value )
return synsets | python | def get_syn_ids_by_lemma(self, lemma):
"""Returns a list of synset IDs based on a lemma"""
if not isinstance(lemma,unicode):
lemma = unicode(lemma,'utf-8')
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: db_opt: %s" % path )
query_opt = "dict_search"
if self.debug:
printf( "cornettodb/views/query_remote_syn_lemma: query_opt: %s" % query_opt )
qdict = {}
qdict[ "action" ] = "queryList"
qdict[ "word" ] = lemma.encode('utf-8')
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) )
dict_list = []
dict_list = eval( content ) # string to list
synsets = []
items = len( dict_list )
if self.debug:
printf( "items: %d" % items )
# syn dict: like lu dict, but without pos: part-of-speech
for dict in dict_list:
if self.debug:
printf( dict )
seq_nr = dict[ "seq_nr" ] # sense number
value = dict[ "value" ] # lexical unit identifier
form = dict[ "form" ] # lemma
label = dict[ "label" ] # label to be shown
if self.debug:
printf( "seq_nr: %s" % seq_nr )
printf( "value: %s" % value )
printf( "form: %s" % form )
printf( "label: %s" % label )
if value != "":
synsets.append( value )
return synsets | [
"def",
"get_syn_ids_by_lemma",
"(",
"self",
",",
"lemma",
")",
":",
"if",
"not",
"isinstance",
"(",
"lemma",
",",
"unicode",
")",
":",
"lemma",
"=",
"unicode",
"(",
"lemma",
",",
"'utf-8'",
")",
"http",
",",
"resp",
",",
"content",
"=",
"self",
".",
"connect",
"(",
")",
"params",
"=",
"\"\"",
"fragment",
"=",
"\"\"",
"path",
"=",
"\"cdb_syn\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_lemma: db_opt: %s\"",
"%",
"path",
")",
"query_opt",
"=",
"\"dict_search\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_lemma: query_opt: %s\"",
"%",
"query_opt",
")",
"qdict",
"=",
"{",
"}",
"qdict",
"[",
"\"action\"",
"]",
"=",
"\"queryList\"",
"qdict",
"[",
"\"word\"",
"]",
"=",
"lemma",
".",
"encode",
"(",
"'utf-8'",
")",
"query",
"=",
"urllib",
".",
"urlencode",
"(",
"qdict",
")",
"db_url_tuple",
"=",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"port",
")",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
")",
"db_url",
"=",
"urlparse",
".",
"urlunparse",
"(",
"db_url_tuple",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"db_url: %s\"",
"%",
"db_url",
")",
"resp",
",",
"content",
"=",
"http",
".",
"request",
"(",
"db_url",
",",
"\"GET\"",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"resp:\\n%s\"",
"%",
"resp",
")",
"printf",
"(",
"\"content:\\n%s\"",
"%",
"content",
")",
"# printf( \"content is of type: %s\" % type( content ) )",
"dict_list",
"=",
"[",
"]",
"dict_list",
"=",
"eval",
"(",
"content",
")",
"# string to list",
"synsets",
"=",
"[",
"]",
"items",
"=",
"len",
"(",
"dict_list",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"items: %d\"",
"%",
"items",
")",
"# syn dict: like lu dict, but without pos: part-of-speech",
"for",
"dict",
"in",
"dict_list",
":",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"dict",
")",
"seq_nr",
"=",
"dict",
"[",
"\"seq_nr\"",
"]",
"# sense number",
"value",
"=",
"dict",
"[",
"\"value\"",
"]",
"# lexical unit identifier",
"form",
"=",
"dict",
"[",
"\"form\"",
"]",
"# lemma",
"label",
"=",
"dict",
"[",
"\"label\"",
"]",
"# label to be shown",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"seq_nr: %s\"",
"%",
"seq_nr",
")",
"printf",
"(",
"\"value: %s\"",
"%",
"value",
")",
"printf",
"(",
"\"form: %s\"",
"%",
"form",
")",
"printf",
"(",
"\"label: %s\"",
"%",
"label",
")",
"if",
"value",
"!=",
"\"\"",
":",
"synsets",
".",
"append",
"(",
"value",
")",
"return",
"synsets"
] | Returns a list of synset IDs based on a lemma | [
"Returns",
"a",
"list",
"of",
"synset",
"IDs",
"based",
"on",
"a",
"lemma"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L96-L160 | -1 |
||||||
55 | proycon/pynlpl | pynlpl/clients/cornetto.py | CornettoClient.get_synset_xml | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) | python | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) | [
"def",
"get_synset_xml",
"(",
"self",
",",
"syn_id",
")",
":",
"http",
",",
"resp",
",",
"content",
"=",
"self",
".",
"connect",
"(",
")",
"params",
"=",
"\"\"",
"fragment",
"=",
"\"\"",
"path",
"=",
"\"cdb_syn\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: db_opt: %s\"",
"%",
"path",
")",
"# output_opt: plain, html, xml",
"# 'xml' is actually xhtml (with markup), but it is not valid xml!",
"# 'plain' is actually valid xml (without markup)",
"output_opt",
"=",
"\"plain\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: output_opt: %s\"",
"%",
"output_opt",
")",
"action",
"=",
"\"runQuery\"",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: action: %s\"",
"%",
"action",
")",
"printf",
"(",
"\"cornettodb/views/query_remote_syn_id: query: %s\"",
"%",
"syn_id",
")",
"qdict",
"=",
"{",
"}",
"qdict",
"[",
"\"action\"",
"]",
"=",
"action",
"qdict",
"[",
"\"query\"",
"]",
"=",
"syn_id",
"qdict",
"[",
"\"outtype\"",
"]",
"=",
"output_opt",
"query",
"=",
"urllib",
".",
"urlencode",
"(",
"qdict",
")",
"db_url_tuple",
"=",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"port",
")",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
")",
"db_url",
"=",
"urlparse",
".",
"urlunparse",
"(",
"db_url_tuple",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"db_url: %s\"",
"%",
"db_url",
")",
"resp",
",",
"content",
"=",
"http",
".",
"request",
"(",
"db_url",
",",
"\"GET\"",
")",
"if",
"self",
".",
"debug",
":",
"printf",
"(",
"\"resp:\\n%s\"",
"%",
"resp",
")",
"# printf( \"content:\\n%s\" % content )",
"# printf( \"content is of type: %s\" % type( content ) ) #<type 'str'>",
"xml_data",
"=",
"eval",
"(",
"content",
")",
"return",
"etree",
".",
"fromstring",
"(",
"xml_data",
")"
] | call cdb_syn with synset identifier -> returns the synset xml; | [
"call",
"cdb_syn",
"with",
"synset",
"identifier",
"-",
">",
"returns",
"the",
"synset",
"xml",
";"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/cornetto.py#L227-L272 | -1 |
||||||
56 | proycon/pynlpl | pynlpl/formats/dutchsemcor.py | WSDSystemOutput.senses | def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | python | def senses(self, bestonly=False):
"""Returns a list of all predicted senses"""
l = []
for word_id, senses,distance in self:
for sense, confidence in senses:
if not sense in l: l.append(sense)
if bestonly:
break
return l | [
"def",
"senses",
"(",
"self",
",",
"bestonly",
"=",
"False",
")",
":",
"l",
"=",
"[",
"]",
"for",
"word_id",
",",
"senses",
",",
"distance",
"in",
"self",
":",
"for",
"sense",
",",
"confidence",
"in",
"senses",
":",
"if",
"not",
"sense",
"in",
"l",
":",
"l",
".",
"append",
"(",
"sense",
")",
"if",
"bestonly",
":",
"break",
"return",
"l"
] | Returns a list of all predicted senses | [
"Returns",
"a",
"list",
"of",
"all",
"predicted",
"senses"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/dutchsemcor.py#L139-L147 | -1 |
||||||
57 | proycon/pynlpl | pynlpl/clients/frogclient.py | FrogClient.align | def align(self,inputwords, outputwords):
"""For each inputword, provides the index of the outputword"""
alignment = []
cursor = 0
for inputword in inputwords:
if len(outputwords) > cursor and outputwords[cursor] == inputword:
alignment.append(cursor)
cursor += 1
elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword:
alignment.append(cursor+1)
cursor += 2
else:
alignment.append(None)
cursor += 1
return alignment | python | def align(self,inputwords, outputwords):
"""For each inputword, provides the index of the outputword"""
alignment = []
cursor = 0
for inputword in inputwords:
if len(outputwords) > cursor and outputwords[cursor] == inputword:
alignment.append(cursor)
cursor += 1
elif len(outputwords) > cursor+1 and outputwords[cursor+1] == inputword:
alignment.append(cursor+1)
cursor += 2
else:
alignment.append(None)
cursor += 1
return alignment | [
"def",
"align",
"(",
"self",
",",
"inputwords",
",",
"outputwords",
")",
":",
"alignment",
"=",
"[",
"]",
"cursor",
"=",
"0",
"for",
"inputword",
"in",
"inputwords",
":",
"if",
"len",
"(",
"outputwords",
")",
">",
"cursor",
"and",
"outputwords",
"[",
"cursor",
"]",
"==",
"inputword",
":",
"alignment",
".",
"append",
"(",
"cursor",
")",
"cursor",
"+=",
"1",
"elif",
"len",
"(",
"outputwords",
")",
">",
"cursor",
"+",
"1",
"and",
"outputwords",
"[",
"cursor",
"+",
"1",
"]",
"==",
"inputword",
":",
"alignment",
".",
"append",
"(",
"cursor",
"+",
"1",
")",
"cursor",
"+=",
"2",
"else",
":",
"alignment",
".",
"append",
"(",
"None",
")",
"cursor",
"+=",
"1",
"return",
"alignment"
] | For each inputword, provides the index of the outputword | [
"For",
"each",
"inputword",
"provides",
"the",
"index",
"of",
"the",
"outputword"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/clients/frogclient.py#L115-L129 | -1 |
||||||
58 | proycon/pynlpl | pynlpl/textprocessors.py | tokenize | def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens | python | def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens | [
"def",
"tokenize",
"(",
"text",
",",
"regexps",
"=",
"TOKENIZERRULES",
")",
":",
"for",
"i",
",",
"regexp",
"in",
"list",
"(",
"enumerate",
"(",
"regexps",
")",
")",
":",
"if",
"isstring",
"(",
"regexp",
")",
":",
"regexps",
"[",
"i",
"]",
"=",
"re",
".",
"compile",
"(",
"regexp",
")",
"tokens",
"=",
"[",
"]",
"begin",
"=",
"0",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"text",
")",
":",
"if",
"begin",
">",
"i",
":",
"continue",
"elif",
"i",
"==",
"begin",
":",
"m",
"=",
"False",
"for",
"regexp",
"in",
"regexps",
":",
"m",
"=",
"regexp",
".",
"findall",
"(",
"text",
"[",
"i",
":",
"i",
"+",
"300",
"]",
")",
"if",
"m",
":",
"tokens",
".",
"append",
"(",
"m",
"[",
"0",
"]",
")",
"begin",
"=",
"i",
"+",
"len",
"(",
"m",
"[",
"0",
"]",
")",
"break",
"if",
"m",
":",
"continue",
"if",
"c",
"in",
"string",
".",
"punctuation",
"or",
"c",
"in",
"WHITESPACE",
":",
"prev",
"=",
"text",
"[",
"i",
"-",
"1",
"]",
"if",
"i",
">",
"0",
"else",
"\"\"",
"next",
"=",
"text",
"[",
"i",
"+",
"1",
"]",
"if",
"i",
"<",
"len",
"(",
"text",
")",
"-",
"1",
"else",
"\"\"",
"if",
"(",
"c",
"==",
"'.'",
"or",
"c",
"==",
"','",
")",
"and",
"prev",
".",
"isdigit",
"(",
")",
"and",
"next",
".",
"isdigit",
"(",
")",
":",
"#punctuation in between numbers, keep as one token",
"pass",
"elif",
"(",
"c",
"==",
"\"'\"",
"or",
"c",
"==",
"\"`\"",
")",
"and",
"prev",
".",
"isalpha",
"(",
")",
"and",
"next",
".",
"isalpha",
"(",
")",
":",
"#quote in between chars, keep...",
"pass",
"elif",
"c",
"not",
"in",
"WHITESPACE",
"and",
"next",
"==",
"c",
":",
"#group clusters of identical punctuation together",
"continue",
"elif",
"c",
"==",
"'\\r'",
"and",
"prev",
"==",
"'\\n'",
":",
"#ignore",
"begin",
"=",
"i",
"+",
"1",
"continue",
"else",
":",
"token",
"=",
"text",
"[",
"begin",
":",
"i",
"]",
"if",
"token",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"if",
"c",
"not",
"in",
"WHITESPACE",
":",
"tokens",
".",
"append",
"(",
"c",
")",
"#anything but spaces and newlines (i.e. punctuation) counts as a token too",
"begin",
"=",
"i",
"+",
"1",
"#set the begin cursor",
"if",
"begin",
"<=",
"len",
"(",
"text",
")",
"-",
"1",
":",
"token",
"=",
"text",
"[",
"begin",
":",
"]",
"tokens",
".",
"append",
"(",
"token",
")",
"return",
"tokens"
] | Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
. | [
"Tokenizes",
"a",
"string",
"and",
"returns",
"a",
"list",
"of",
"tokens"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L317-L386 | -1 |
||||||
59 | proycon/pynlpl | pynlpl/textprocessors.py | strip_accents | def strip_accents(s, encoding= 'utf-8'):
"""Strip characters with diacritics and return a flat ascii representation"""
if sys.version < '3':
if isinstance(s,unicode):
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
else:
return unicodedata.normalize('NFKD', unicode(s,encoding)).encode('ASCII', 'ignore')
else:
if isinstance(s,bytes): s = str(s,encoding)
return str(unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore'),'ascii') | python | def strip_accents(s, encoding= 'utf-8'):
"""Strip characters with diacritics and return a flat ascii representation"""
if sys.version < '3':
if isinstance(s,unicode):
return unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
else:
return unicodedata.normalize('NFKD', unicode(s,encoding)).encode('ASCII', 'ignore')
else:
if isinstance(s,bytes): s = str(s,encoding)
return str(unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore'),'ascii') | [
"def",
"strip_accents",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"sys",
".",
"version",
"<",
"'3'",
":",
"if",
"isinstance",
"(",
"s",
",",
"unicode",
")",
":",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"s",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
"else",
":",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"unicode",
"(",
"s",
",",
"encoding",
")",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
"else",
":",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"s",
"=",
"str",
"(",
"s",
",",
"encoding",
")",
"return",
"str",
"(",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"s",
")",
".",
"encode",
"(",
"'ASCII'",
",",
"'ignore'",
")",
",",
"'ascii'",
")"
] | Strip characters with diacritics and return a flat ascii representation | [
"Strip",
"characters",
"with",
"diacritics",
"and",
"return",
"a",
"flat",
"ascii",
"representation"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L415-L424 | -1 |
||||||
60 | proycon/pynlpl | pynlpl/textprocessors.py | swap | def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
for permutation in permutations(tokens[i:i+maxdist]):
if permutation != tuple(tokens[i:i+maxdist]):
newtokens = tokens[:i]
newtokens += permutation
newtokens += tokens[i+maxdist:]
yield newtokens
if maxdist == len(tokens):
break | python | def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
for permutation in permutations(tokens[i:i+maxdist]):
if permutation != tuple(tokens[i:i+maxdist]):
newtokens = tokens[:i]
newtokens += permutation
newtokens += tokens[i+maxdist:]
yield newtokens
if maxdist == len(tokens):
break | [
"def",
"swap",
"(",
"tokens",
",",
"maxdist",
"=",
"2",
")",
":",
"assert",
"maxdist",
">=",
"2",
"tokens",
"=",
"list",
"(",
"tokens",
")",
"if",
"maxdist",
">",
"len",
"(",
"tokens",
")",
":",
"maxdist",
"=",
"len",
"(",
"tokens",
")",
"l",
"=",
"len",
"(",
"tokens",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"l",
"-",
"1",
")",
":",
"for",
"permutation",
"in",
"permutations",
"(",
"tokens",
"[",
"i",
":",
"i",
"+",
"maxdist",
"]",
")",
":",
"if",
"permutation",
"!=",
"tuple",
"(",
"tokens",
"[",
"i",
":",
"i",
"+",
"maxdist",
"]",
")",
":",
"newtokens",
"=",
"tokens",
"[",
":",
"i",
"]",
"newtokens",
"+=",
"permutation",
"newtokens",
"+=",
"tokens",
"[",
"i",
"+",
"maxdist",
":",
"]",
"yield",
"newtokens",
"if",
"maxdist",
"==",
"len",
"(",
"tokens",
")",
":",
"break"
] | Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations. | [
"Perform",
"a",
"swap",
"operation",
"on",
"a",
"sequence",
"of",
"tokens",
"exhaustively",
"swapping",
"all",
"tokens",
"up",
"to",
"the",
"maximum",
"specified",
"distance",
".",
"This",
"is",
"a",
"subset",
"of",
"all",
"permutations",
"."
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L426-L441 | -1 |
||||||
61 | proycon/pynlpl | pynlpl/textprocessors.py | find_keyword_in_context | def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] | python | def find_keyword_in_context(tokens, keyword, contextsize=1):
"""Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list"""
if isinstance(keyword,tuple) and isinstance(keyword,list):
l = len(keyword)
else:
keyword = (keyword,)
l = 1
n = l + contextsize*2
focuspos = contextsize + 1
for ngram in Windower(tokens,n,None,None):
if ngram[focuspos:focuspos+l] == keyword:
yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:] | [
"def",
"find_keyword_in_context",
"(",
"tokens",
",",
"keyword",
",",
"contextsize",
"=",
"1",
")",
":",
"if",
"isinstance",
"(",
"keyword",
",",
"tuple",
")",
"and",
"isinstance",
"(",
"keyword",
",",
"list",
")",
":",
"l",
"=",
"len",
"(",
"keyword",
")",
"else",
":",
"keyword",
"=",
"(",
"keyword",
",",
")",
"l",
"=",
"1",
"n",
"=",
"l",
"+",
"contextsize",
"*",
"2",
"focuspos",
"=",
"contextsize",
"+",
"1",
"for",
"ngram",
"in",
"Windower",
"(",
"tokens",
",",
"n",
",",
"None",
",",
"None",
")",
":",
"if",
"ngram",
"[",
"focuspos",
":",
"focuspos",
"+",
"l",
"]",
"==",
"keyword",
":",
"yield",
"ngram",
"[",
":",
"focuspos",
"]",
",",
"ngram",
"[",
"focuspos",
":",
"focuspos",
"+",
"l",
"]",
",",
"ngram",
"[",
"focuspos",
"+",
"l",
"+",
"1",
":",
"]"
] | Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list | [
"Find",
"a",
"keyword",
"in",
"a",
"particular",
"sequence",
"of",
"tokens",
"and",
"return",
"the",
"local",
"context",
".",
"Contextsize",
"is",
"the",
"number",
"of",
"words",
"to",
"the",
"left",
"and",
"right",
".",
"The",
"keyword",
"may",
"have",
"multiple",
"word",
"in",
"which",
"case",
"it",
"should",
"to",
"passed",
"as",
"a",
"tuple",
"or",
"list"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L444-L455 | -1 |
||||||
62 | proycon/pynlpl | pynlpl/datatypes.py | PriorityQueue.randomprune | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | python | def randomprune(self,n):
"""prune down to n items at random, disregarding their score"""
self.data = random.sample(self.data, n) | [
"def",
"randomprune",
"(",
"self",
",",
"n",
")",
":",
"self",
".",
"data",
"=",
"random",
".",
"sample",
"(",
"self",
".",
"data",
",",
"n",
")"
] | prune down to n items at random, disregarding their score | [
"prune",
"down",
"to",
"n",
"items",
"at",
"random",
"disregarding",
"their",
"score"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L196-L198 | -1 |
||||||
63 | proycon/pynlpl | pynlpl/datatypes.py | Tree.append | def append(self, item):
"""Add an item to the Tree"""
if not isinstance(item, Tree):
return ValueError("Can only append items of type Tree")
if not self.children: self.children = []
item.parent = self
self.children.append(item) | python | def append(self, item):
"""Add an item to the Tree"""
if not isinstance(item, Tree):
return ValueError("Can only append items of type Tree")
if not self.children: self.children = []
item.parent = self
self.children.append(item) | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"if",
"not",
"isinstance",
"(",
"item",
",",
"Tree",
")",
":",
"return",
"ValueError",
"(",
"\"Can only append items of type Tree\"",
")",
"if",
"not",
"self",
".",
"children",
":",
"self",
".",
"children",
"=",
"[",
"]",
"item",
".",
"parent",
"=",
"self",
"self",
".",
"children",
".",
"append",
"(",
"item",
")"
] | Add an item to the Tree | [
"Add",
"an",
"item",
"to",
"the",
"Tree"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L261-L267 | -1 |
||||||
64 | proycon/pynlpl | pynlpl/datatypes.py | Trie.size | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | python | def size(self):
"""Size is number of nodes under the trie, including the current node"""
if self.children:
return sum( ( c.size() for c in self.children.values() ) ) + 1
else:
return 1 | [
"def",
"size",
"(",
"self",
")",
":",
"if",
"self",
".",
"children",
":",
"return",
"sum",
"(",
"(",
"c",
".",
"size",
"(",
")",
"for",
"c",
"in",
"self",
".",
"children",
".",
"values",
"(",
")",
")",
")",
"+",
"1",
"else",
":",
"return",
"1"
] | Size is number of nodes under the trie, including the current node | [
"Size",
"is",
"number",
"of",
"nodes",
"under",
"the",
"trie",
"including",
"the",
"current",
"node"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/datatypes.py#L361-L366 | -1 |
||||||
65 | proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocumentX.validate | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | python | def validate(self, formats_dir="../formats/"):
"""checks if the document is valid"""
#TODO: download XSD from web
if self.inline:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dcoi-dsc.xsd").readlines()))))
xmlschema.assertValid(self.tree)
#return xmlschema.validate(self)
else:
xmlschema = ElementTree.XMLSchema(ElementTree.parse(StringIO("\n".join(open(formats_dir+"dutchsemcor-standalone.xsd").readlines()))))
xmlschema.assertValid(self.tree) | [
"def",
"validate",
"(",
"self",
",",
"formats_dir",
"=",
"\"../formats/\"",
")",
":",
"#TODO: download XSD from web",
"if",
"self",
".",
"inline",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dcoi-dsc.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")",
"#return xmlschema.validate(self)",
"else",
":",
"xmlschema",
"=",
"ElementTree",
".",
"XMLSchema",
"(",
"ElementTree",
".",
"parse",
"(",
"StringIO",
"(",
"\"\\n\"",
".",
"join",
"(",
"open",
"(",
"formats_dir",
"+",
"\"dutchsemcor-standalone.xsd\"",
")",
".",
"readlines",
"(",
")",
")",
")",
")",
")",
"xmlschema",
".",
"assertValid",
"(",
"self",
".",
"tree",
")"
] | checks if the document is valid | [
"checks",
"if",
"the",
"document",
"is",
"valid"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L235-L244 | -1 |
||||||
66 | proycon/pynlpl | pynlpl/formats/sonar.py | CorpusDocumentX.xpath | def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | python | def xpath(self, expression):
"""Executes an xpath expression using the correct namespaces"""
global namespaces
return self.tree.xpath(expression, namespaces=namespaces) | [
"def",
"xpath",
"(",
"self",
",",
"expression",
")",
":",
"global",
"namespaces",
"return",
"self",
".",
"tree",
".",
"xpath",
"(",
"expression",
",",
"namespaces",
"=",
"namespaces",
")"
] | Executes an xpath expression using the correct namespaces | [
"Executes",
"an",
"xpath",
"expression",
"using",
"the",
"correct",
"namespaces"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/sonar.py#L247-L250 | -1 |
||||||
67 | proycon/pynlpl | pynlpl/formats/taggerdata.py | Taggerdata.align | def align(self, referencewords, datatuple):
"""align the reference sentence with the tagged data"""
targetwords = []
for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])):
if word:
subwords = word.split("_")
for w in subwords: #split multiword expressions
targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
referencewords = [ w.lower() for w in referencewords ]
alignment = []
for i, referenceword in enumerate(referencewords):
found = False
best = 0
distance = 999999
for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords):
if referenceword == targetword and abs(i-j) < distance:
found = True
best = j
distance = abs(i-j)
if found:
alignment.append(targetwords[best])
else:
alignment.append((None,None,None,None,False)) #no alignment found
return alignment | python | def align(self, referencewords, datatuple):
"""align the reference sentence with the tagged data"""
targetwords = []
for i, (word,lemma,postag) in enumerate(zip(datatuple[0],datatuple[1],datatuple[2])):
if word:
subwords = word.split("_")
for w in subwords: #split multiword expressions
targetwords.append( (w, lemma, postag, i, len(subwords) > 1 ) ) #word, lemma, pos, index, multiword?
referencewords = [ w.lower() for w in referencewords ]
alignment = []
for i, referenceword in enumerate(referencewords):
found = False
best = 0
distance = 999999
for j, (targetword, lemma, pos, index, multiword) in enumerate(targetwords):
if referenceword == targetword and abs(i-j) < distance:
found = True
best = j
distance = abs(i-j)
if found:
alignment.append(targetwords[best])
else:
alignment.append((None,None,None,None,False)) #no alignment found
return alignment | [
"def",
"align",
"(",
"self",
",",
"referencewords",
",",
"datatuple",
")",
":",
"targetwords",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"word",
",",
"lemma",
",",
"postag",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"datatuple",
"[",
"0",
"]",
",",
"datatuple",
"[",
"1",
"]",
",",
"datatuple",
"[",
"2",
"]",
")",
")",
":",
"if",
"word",
":",
"subwords",
"=",
"word",
".",
"split",
"(",
"\"_\"",
")",
"for",
"w",
"in",
"subwords",
":",
"#split multiword expressions",
"targetwords",
".",
"append",
"(",
"(",
"w",
",",
"lemma",
",",
"postag",
",",
"i",
",",
"len",
"(",
"subwords",
")",
">",
"1",
")",
")",
"#word, lemma, pos, index, multiword? ",
"referencewords",
"=",
"[",
"w",
".",
"lower",
"(",
")",
"for",
"w",
"in",
"referencewords",
"]",
"alignment",
"=",
"[",
"]",
"for",
"i",
",",
"referenceword",
"in",
"enumerate",
"(",
"referencewords",
")",
":",
"found",
"=",
"False",
"best",
"=",
"0",
"distance",
"=",
"999999",
"for",
"j",
",",
"(",
"targetword",
",",
"lemma",
",",
"pos",
",",
"index",
",",
"multiword",
")",
"in",
"enumerate",
"(",
"targetwords",
")",
":",
"if",
"referenceword",
"==",
"targetword",
"and",
"abs",
"(",
"i",
"-",
"j",
")",
"<",
"distance",
":",
"found",
"=",
"True",
"best",
"=",
"j",
"distance",
"=",
"abs",
"(",
"i",
"-",
"j",
")",
"if",
"found",
":",
"alignment",
".",
"append",
"(",
"targetwords",
"[",
"best",
"]",
")",
"else",
":",
"alignment",
".",
"append",
"(",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"False",
")",
")",
"#no alignment found ",
"return",
"alignment"
] | align the reference sentence with the tagged data | [
"align",
"the",
"reference",
"sentence",
"with",
"the",
"tagged",
"data"
] | 7707f69a91caaa6cde037f0d0379f1d42500a68b | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/taggerdata.py#L99-L125 | -1 |
||||||
68 | scrapinghub/js2xml | js2xml/lexer.py | CustomLexer.build | def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs) | python | def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs) | [
"def",
"build",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"lexer",
"=",
"ply",
".",
"lex",
".",
"lex",
"(",
"object",
"=",
"self",
",",
"*",
"*",
"kwargs",
")"
] | Build the lexer. | [
"Build",
"the",
"lexer",
"."
] | d01b79e1a82de157deffcc1a22f4e0b6bfa07715 | https://github.com/scrapinghub/js2xml/blob/d01b79e1a82de157deffcc1a22f4e0b6bfa07715/js2xml/lexer.py#L74-L76 | -1 |
||||||
69 | mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth.is_authorized | def is_authorized(self, request):
"""Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials.
"""
if self._is_request_in_include_path(request):
if self._is_request_in_exclude_path(request):
return True
else:
auth = request.authorization
if auth and auth[0] == 'Basic':
credentials = b64decode(auth[1]).decode('UTF-8')
username, password = credentials.split(':', 1)
return self._users.get(username) == password
else:
return False
else:
return True | python | def is_authorized(self, request):
"""Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials.
"""
if self._is_request_in_include_path(request):
if self._is_request_in_exclude_path(request):
return True
else:
auth = request.authorization
if auth and auth[0] == 'Basic':
credentials = b64decode(auth[1]).decode('UTF-8')
username, password = credentials.split(':', 1)
return self._users.get(username) == password
else:
return False
else:
return True | [
"def",
"is_authorized",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_is_request_in_include_path",
"(",
"request",
")",
":",
"if",
"self",
".",
"_is_request_in_exclude_path",
"(",
"request",
")",
":",
"return",
"True",
"else",
":",
"auth",
"=",
"request",
".",
"authorization",
"if",
"auth",
"and",
"auth",
"[",
"0",
"]",
"==",
"'Basic'",
":",
"credentials",
"=",
"b64decode",
"(",
"auth",
"[",
"1",
"]",
")",
".",
"decode",
"(",
"'UTF-8'",
")",
"username",
",",
"password",
"=",
"credentials",
".",
"split",
"(",
"':'",
",",
"1",
")",
"return",
"self",
".",
"_users",
".",
"get",
"(",
"username",
")",
"==",
"password",
"else",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Check if the user is authenticated for the given request.
The include_paths and exclude_paths are first checked. If
authentication is required then the Authorization HTTP header is
checked against the credentials. | [
"Check",
"if",
"the",
"user",
"is",
"authenticated",
"for",
"the",
"given",
"request",
"."
] | 4e829bff21526f587f8d1a8592b63e1abd862a74 | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L48-L68 | -1 |
||||||
70 | mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._login | def _login(self, environ, start_response):
"""Send a login response back to the client."""
response = HTTPUnauthorized()
response.www_authenticate = ('Basic', {'realm': self._realm})
return response(environ, start_response) | python | def _login(self, environ, start_response):
"""Send a login response back to the client."""
response = HTTPUnauthorized()
response.www_authenticate = ('Basic', {'realm': self._realm})
return response(environ, start_response) | [
"def",
"_login",
"(",
"self",
",",
"environ",
",",
"start_response",
")",
":",
"response",
"=",
"HTTPUnauthorized",
"(",
")",
"response",
".",
"www_authenticate",
"=",
"(",
"'Basic'",
",",
"{",
"'realm'",
":",
"self",
".",
"_realm",
"}",
")",
"return",
"response",
"(",
"environ",
",",
"start_response",
")"
] | Send a login response back to the client. | [
"Send",
"a",
"login",
"response",
"back",
"to",
"the",
"client",
"."
] | 4e829bff21526f587f8d1a8592b63e1abd862a74 | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L70-L74 | -1 |
||||||
71 | mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._is_request_in_include_path | def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True | python | def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True | [
"def",
"_is_request_in_include_path",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_include_paths",
":",
"for",
"path",
"in",
"self",
".",
"_include_paths",
":",
"if",
"request",
".",
"path",
".",
"startswith",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"else",
":",
"return",
"True"
] | Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths. | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_include_paths",
"list",
"."
] | 4e829bff21526f587f8d1a8592b63e1abd862a74 | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L76-L89 | -1 |
||||||
72 | mvantellingen/wsgi-basic-auth | src/wsgi_basic_auth.py | BasicAuth._is_request_in_exclude_path | def _is_request_in_exclude_path(self, request):
"""Check if the request path is in the `_exclude_paths` list"""
if self._exclude_paths:
for path in self._exclude_paths:
if request.path.startswith(path):
return True
return False
else:
return False | python | def _is_request_in_exclude_path(self, request):
"""Check if the request path is in the `_exclude_paths` list"""
if self._exclude_paths:
for path in self._exclude_paths:
if request.path.startswith(path):
return True
return False
else:
return False | [
"def",
"_is_request_in_exclude_path",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"_exclude_paths",
":",
"for",
"path",
"in",
"self",
".",
"_exclude_paths",
":",
"if",
"request",
".",
"path",
".",
"startswith",
"(",
"path",
")",
":",
"return",
"True",
"return",
"False",
"else",
":",
"return",
"False"
] | Check if the request path is in the `_exclude_paths` list | [
"Check",
"if",
"the",
"request",
"path",
"is",
"in",
"the",
"_exclude_paths",
"list"
] | 4e829bff21526f587f8d1a8592b63e1abd862a74 | https://github.com/mvantellingen/wsgi-basic-auth/blob/4e829bff21526f587f8d1a8592b63e1abd862a74/src/wsgi_basic_auth.py#L91-L99 | -1 |
||||||
73 | click-contrib/click-repl | click_repl/__init__.py | bootstrap_prompt | def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs | python | def bootstrap_prompt(prompt_kwargs, group):
"""
Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs.
"""
prompt_kwargs = prompt_kwargs or {}
defaults = {
"history": InMemoryHistory(),
"completer": ClickCompleter(group),
"message": u"> ",
}
for key in defaults:
default_value = defaults[key]
if key not in prompt_kwargs:
prompt_kwargs[key] = default_value
return prompt_kwargs | [
"def",
"bootstrap_prompt",
"(",
"prompt_kwargs",
",",
"group",
")",
":",
"prompt_kwargs",
"=",
"prompt_kwargs",
"or",
"{",
"}",
"defaults",
"=",
"{",
"\"history\"",
":",
"InMemoryHistory",
"(",
")",
",",
"\"completer\"",
":",
"ClickCompleter",
"(",
"group",
")",
",",
"\"message\"",
":",
"u\"> \"",
",",
"}",
"for",
"key",
"in",
"defaults",
":",
"default_value",
"=",
"defaults",
"[",
"key",
"]",
"if",
"key",
"not",
"in",
"prompt_kwargs",
":",
"prompt_kwargs",
"[",
"key",
"]",
"=",
"default_value",
"return",
"prompt_kwargs"
] | Bootstrap prompt_toolkit kwargs or use user defined values.
:param prompt_kwargs: The user specified prompt kwargs. | [
"Bootstrap",
"prompt_toolkit",
"kwargs",
"or",
"use",
"user",
"defined",
"values",
"."
] | 2d78dc520eb0bb5b813bad3b72344edbd22a7f4e | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L146-L165 | -1 |
||||||
74 | click-contrib/click-repl | click_repl/__init__.py | repl | def repl( # noqa: C901
old_ctx,
prompt_kwargs=None,
allow_system_commands=True,
allow_internal_commands=True,
):
"""
Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin.
"""
# parent should be available, but we're not going to bother if not
group_ctx = old_ctx.parent or old_ctx
group = group_ctx.command
isatty = sys.stdin.isatty()
# Delete the REPL command from those available, as we don't want to allow
# nesting REPLs (note: pass `None` to `pop` as we don't want to error if
# REPL command already not present for some reason).
repl_command_name = old_ctx.command.name
if isinstance(group_ctx.command, click.CommandCollection):
available_commands = {
cmd_name: cmd_obj
for source in group_ctx.command.sources
for cmd_name, cmd_obj in source.commands.items()
}
else:
available_commands = group_ctx.command.commands
available_commands.pop(repl_command_name, None)
prompt_kwargs = bootstrap_prompt(prompt_kwargs, group)
if isatty:
def get_command():
return prompt(**prompt_kwargs)
else:
get_command = sys.stdin.readline
while True:
try:
command = get_command()
except KeyboardInterrupt:
continue
except EOFError:
break
if not command:
if isatty:
continue
else:
break
if allow_system_commands and dispatch_repl_commands(command):
continue
if allow_internal_commands:
try:
result = handle_internal_commands(command)
if isinstance(result, six.string_types):
click.echo(result)
continue
except ExitReplException:
break
try:
args = shlex.split(command)
except ValueError as e:
click.echo("{}: {}".format(type(e).__name__, e))
continue
try:
with group.make_context(None, args, parent=group_ctx) as ctx:
group.invoke(ctx)
ctx.exit()
except click.ClickException as e:
e.show()
except ClickExit:
pass
except SystemExit:
pass
except ExitReplException:
break | python | def repl( # noqa: C901
old_ctx,
prompt_kwargs=None,
allow_system_commands=True,
allow_internal_commands=True,
):
"""
Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin.
"""
# parent should be available, but we're not going to bother if not
group_ctx = old_ctx.parent or old_ctx
group = group_ctx.command
isatty = sys.stdin.isatty()
# Delete the REPL command from those available, as we don't want to allow
# nesting REPLs (note: pass `None` to `pop` as we don't want to error if
# REPL command already not present for some reason).
repl_command_name = old_ctx.command.name
if isinstance(group_ctx.command, click.CommandCollection):
available_commands = {
cmd_name: cmd_obj
for source in group_ctx.command.sources
for cmd_name, cmd_obj in source.commands.items()
}
else:
available_commands = group_ctx.command.commands
available_commands.pop(repl_command_name, None)
prompt_kwargs = bootstrap_prompt(prompt_kwargs, group)
if isatty:
def get_command():
return prompt(**prompt_kwargs)
else:
get_command = sys.stdin.readline
while True:
try:
command = get_command()
except KeyboardInterrupt:
continue
except EOFError:
break
if not command:
if isatty:
continue
else:
break
if allow_system_commands and dispatch_repl_commands(command):
continue
if allow_internal_commands:
try:
result = handle_internal_commands(command)
if isinstance(result, six.string_types):
click.echo(result)
continue
except ExitReplException:
break
try:
args = shlex.split(command)
except ValueError as e:
click.echo("{}: {}".format(type(e).__name__, e))
continue
try:
with group.make_context(None, args, parent=group_ctx) as ctx:
group.invoke(ctx)
ctx.exit()
except click.ClickException as e:
e.show()
except ClickExit:
pass
except SystemExit:
pass
except ExitReplException:
break | [
"def",
"repl",
"(",
"# noqa: C901",
"old_ctx",
",",
"prompt_kwargs",
"=",
"None",
",",
"allow_system_commands",
"=",
"True",
",",
"allow_internal_commands",
"=",
"True",
",",
")",
":",
"# parent should be available, but we're not going to bother if not",
"group_ctx",
"=",
"old_ctx",
".",
"parent",
"or",
"old_ctx",
"group",
"=",
"group_ctx",
".",
"command",
"isatty",
"=",
"sys",
".",
"stdin",
".",
"isatty",
"(",
")",
"# Delete the REPL command from those available, as we don't want to allow",
"# nesting REPLs (note: pass `None` to `pop` as we don't want to error if",
"# REPL command already not present for some reason).",
"repl_command_name",
"=",
"old_ctx",
".",
"command",
".",
"name",
"if",
"isinstance",
"(",
"group_ctx",
".",
"command",
",",
"click",
".",
"CommandCollection",
")",
":",
"available_commands",
"=",
"{",
"cmd_name",
":",
"cmd_obj",
"for",
"source",
"in",
"group_ctx",
".",
"command",
".",
"sources",
"for",
"cmd_name",
",",
"cmd_obj",
"in",
"source",
".",
"commands",
".",
"items",
"(",
")",
"}",
"else",
":",
"available_commands",
"=",
"group_ctx",
".",
"command",
".",
"commands",
"available_commands",
".",
"pop",
"(",
"repl_command_name",
",",
"None",
")",
"prompt_kwargs",
"=",
"bootstrap_prompt",
"(",
"prompt_kwargs",
",",
"group",
")",
"if",
"isatty",
":",
"def",
"get_command",
"(",
")",
":",
"return",
"prompt",
"(",
"*",
"*",
"prompt_kwargs",
")",
"else",
":",
"get_command",
"=",
"sys",
".",
"stdin",
".",
"readline",
"while",
"True",
":",
"try",
":",
"command",
"=",
"get_command",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"continue",
"except",
"EOFError",
":",
"break",
"if",
"not",
"command",
":",
"if",
"isatty",
":",
"continue",
"else",
":",
"break",
"if",
"allow_system_commands",
"and",
"dispatch_repl_commands",
"(",
"command",
")",
":",
"continue",
"if",
"allow_internal_commands",
":",
"try",
":",
"result",
"=",
"handle_internal_commands",
"(",
"command",
")",
"if",
"isinstance",
"(",
"result",
",",
"six",
".",
"string_types",
")",
":",
"click",
".",
"echo",
"(",
"result",
")",
"continue",
"except",
"ExitReplException",
":",
"break",
"try",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"except",
"ValueError",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
",",
"e",
")",
")",
"continue",
"try",
":",
"with",
"group",
".",
"make_context",
"(",
"None",
",",
"args",
",",
"parent",
"=",
"group_ctx",
")",
"as",
"ctx",
":",
"group",
".",
"invoke",
"(",
"ctx",
")",
"ctx",
".",
"exit",
"(",
")",
"except",
"click",
".",
"ClickException",
"as",
"e",
":",
"e",
".",
"show",
"(",
")",
"except",
"ClickExit",
":",
"pass",
"except",
"SystemExit",
":",
"pass",
"except",
"ExitReplException",
":",
"break"
] | Start an interactive shell. All subcommands are available in it.
:param old_ctx: The current Click context.
:param prompt_kwargs: Parameters passed to
:py:func:`prompt_toolkit.shortcuts.prompt`.
If stdin is not a TTY, no prompt will be printed, but only commands read
from stdin. | [
"Start",
"an",
"interactive",
"shell",
".",
"All",
"subcommands",
"are",
"available",
"in",
"it",
"."
] | 2d78dc520eb0bb5b813bad3b72344edbd22a7f4e | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L168-L257 | -1 |
||||||
75 | click-contrib/click-repl | click_repl/__init__.py | handle_internal_commands | def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target() | python | def handle_internal_commands(command):
"""Run repl-internal commands.
Repl-internal commands are all commands starting with ":".
"""
if command.startswith(":"):
target = _get_registered_target(command[1:], default=None)
if target:
return target() | [
"def",
"handle_internal_commands",
"(",
"command",
")",
":",
"if",
"command",
".",
"startswith",
"(",
"\":\"",
")",
":",
"target",
"=",
"_get_registered_target",
"(",
"command",
"[",
"1",
":",
"]",
",",
"default",
"=",
"None",
")",
"if",
"target",
":",
"return",
"target",
"(",
")"
] | Run repl-internal commands.
Repl-internal commands are all commands starting with ":". | [
"Run",
"repl",
"-",
"internal",
"commands",
"."
] | 2d78dc520eb0bb5b813bad3b72344edbd22a7f4e | https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L283-L292 | -1 |
||||||
76 | graphql-python/graphql-relay-py | graphql_relay/node/node.py | node_definitions | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | python | def node_definitions(id_fetcher, type_resolver=None, id_resolver=None):
'''
Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method.
'''
node_interface = GraphQLInterfaceType(
'Node',
description='An object with an ID',
fields=lambda: OrderedDict((
('id', GraphQLField(
GraphQLNonNull(GraphQLID),
description='The id of the object.',
resolver=id_resolver,
)),
)),
resolve_type=type_resolver
)
node_field = GraphQLField(
node_interface,
description='Fetches an object given its ID',
args=OrderedDict((
('id', GraphQLArgument(
GraphQLNonNull(GraphQLID),
description='The ID of an object'
)),
)),
resolver=lambda obj, args, *_: id_fetcher(args.get('id'), *_)
)
return node_interface, node_field | [
"def",
"node_definitions",
"(",
"id_fetcher",
",",
"type_resolver",
"=",
"None",
",",
"id_resolver",
"=",
"None",
")",
":",
"node_interface",
"=",
"GraphQLInterfaceType",
"(",
"'Node'",
",",
"description",
"=",
"'An object with an ID'",
",",
"fields",
"=",
"lambda",
":",
"OrderedDict",
"(",
"(",
"(",
"'id'",
",",
"GraphQLField",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The id of the object.'",
",",
"resolver",
"=",
"id_resolver",
",",
")",
")",
",",
")",
")",
",",
"resolve_type",
"=",
"type_resolver",
")",
"node_field",
"=",
"GraphQLField",
"(",
"node_interface",
",",
"description",
"=",
"'Fetches an object given its ID'",
",",
"args",
"=",
"OrderedDict",
"(",
"(",
"(",
"'id'",
",",
"GraphQLArgument",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The ID of an object'",
")",
")",
",",
")",
")",
",",
"resolver",
"=",
"lambda",
"obj",
",",
"args",
",",
"*",
"_",
":",
"id_fetcher",
"(",
"args",
".",
"get",
"(",
"'id'",
")",
",",
"*",
"_",
")",
")",
"return",
"node_interface",
",",
"node_field"
] | Given a function to map from an ID to an underlying object, and a function
to map from an underlying object to the concrete GraphQLObjectType it
corresponds to, constructs a `Node` interface that objects can implement,
and a field config for a `node` root field.
If the type_resolver is omitted, object resolution on the interface will be
handled with the `isTypeOf` method on object types, as with any GraphQL
interface without a provided `resolveType` method. | [
"Given",
"a",
"function",
"to",
"map",
"from",
"an",
"ID",
"to",
"an",
"underlying",
"object",
"and",
"a",
"function",
"to",
"map",
"from",
"an",
"underlying",
"object",
"to",
"the",
"concrete",
"GraphQLObjectType",
"it",
"corresponds",
"to",
"constructs",
"a",
"Node",
"interface",
"that",
"objects",
"can",
"implement",
"and",
"a",
"field",
"config",
"for",
"a",
"node",
"root",
"field",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L15-L49 | -1 |
||||||
77 | graphql-python/graphql-relay-py | graphql_relay/node/node.py | from_global_id | def from_global_id(global_id):
'''
Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it.
'''
unbased_global_id = unbase64(global_id)
_type, _id = unbased_global_id.split(':', 1)
return _type, _id | python | def from_global_id(global_id):
'''
Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it.
'''
unbased_global_id = unbase64(global_id)
_type, _id = unbased_global_id.split(':', 1)
return _type, _id | [
"def",
"from_global_id",
"(",
"global_id",
")",
":",
"unbased_global_id",
"=",
"unbase64",
"(",
"global_id",
")",
"_type",
",",
"_id",
"=",
"unbased_global_id",
".",
"split",
"(",
"':'",
",",
"1",
")",
"return",
"_type",
",",
"_id"
] | Takes the "global ID" created by toGlobalID, and retuns the type name and ID
used to create it. | [
"Takes",
"the",
"global",
"ID",
"created",
"by",
"toGlobalID",
"and",
"retuns",
"the",
"type",
"name",
"and",
"ID",
"used",
"to",
"create",
"it",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L60-L67 | -1 |
||||||
78 | graphql-python/graphql-relay-py | graphql_relay/node/node.py | global_id_field | def global_id_field(type_name, id_fetcher=None):
'''
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object.
'''
return GraphQLField(
GraphQLNonNull(GraphQLID),
description='The ID of an object',
resolver=lambda obj, args, context, info: to_global_id(
type_name or info.parent_type.name,
id_fetcher(obj, context, info) if id_fetcher else obj.id
)
) | python | def global_id_field(type_name, id_fetcher=None):
'''
Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object.
'''
return GraphQLField(
GraphQLNonNull(GraphQLID),
description='The ID of an object',
resolver=lambda obj, args, context, info: to_global_id(
type_name or info.parent_type.name,
id_fetcher(obj, context, info) if id_fetcher else obj.id
)
) | [
"def",
"global_id_field",
"(",
"type_name",
",",
"id_fetcher",
"=",
"None",
")",
":",
"return",
"GraphQLField",
"(",
"GraphQLNonNull",
"(",
"GraphQLID",
")",
",",
"description",
"=",
"'The ID of an object'",
",",
"resolver",
"=",
"lambda",
"obj",
",",
"args",
",",
"context",
",",
"info",
":",
"to_global_id",
"(",
"type_name",
"or",
"info",
".",
"parent_type",
".",
"name",
",",
"id_fetcher",
"(",
"obj",
",",
"context",
",",
"info",
")",
"if",
"id_fetcher",
"else",
"obj",
".",
"id",
")",
")"
] | Creates the configuration for an id field on a node, using `to_global_id` to
construct the ID from the provided typename. The type-specific ID is fetcher
by calling id_fetcher on the object, or if not provided, by accessing the `id`
property on the object. | [
"Creates",
"the",
"configuration",
"for",
"an",
"id",
"field",
"on",
"a",
"node",
"using",
"to_global_id",
"to",
"construct",
"the",
"ID",
"from",
"the",
"provided",
"typename",
".",
"The",
"type",
"-",
"specific",
"ID",
"is",
"fetcher",
"by",
"calling",
"id_fetcher",
"on",
"the",
"object",
"or",
"if",
"not",
"provided",
"by",
"accessing",
"the",
"id",
"property",
"on",
"the",
"object",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/node/node.py#L70-L84 | -1 |
||||||
79 | graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | connection_from_list | def connection_from_list(data, args=None, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
_len = len(data)
return connection_from_list_slice(
data,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
**kwargs
) | python | def connection_from_list(data, args=None, **kwargs):
'''
A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
_len = len(data)
return connection_from_list_slice(
data,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
**kwargs
) | [
"def",
"connection_from_list",
"(",
"data",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_len",
"=",
"len",
"(",
"data",
")",
"return",
"connection_from_list_slice",
"(",
"data",
",",
"args",
",",
"slice_start",
"=",
"0",
",",
"list_length",
"=",
"_len",
",",
"list_slice_length",
"=",
"_len",
",",
"*",
"*",
"kwargs",
")"
] | A simple function that accepts an array and connection arguments, and returns
a connection object for use in GraphQL. It uses array offsets as pagination,
so pagination will only work if the array is static. | [
"A",
"simple",
"function",
"that",
"accepts",
"an",
"array",
"and",
"connection",
"arguments",
"and",
"returns",
"a",
"connection",
"object",
"for",
"use",
"in",
"GraphQL",
".",
"It",
"uses",
"array",
"offsets",
"as",
"pagination",
"so",
"pagination",
"will",
"only",
"work",
"if",
"the",
"array",
"is",
"static",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L7-L21 | -1 |
||||||
80 | graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | connection_from_promised_list | def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | python | def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | [
"def",
"connection_from_promised_list",
"(",
"data_promise",
",",
"args",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"data_promise",
".",
"then",
"(",
"lambda",
"data",
":",
"connection_from_list",
"(",
"data",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | A version of `connectionFromArray` that takes a promised array, and returns a
promised connection. | [
"A",
"version",
"of",
"connectionFromArray",
"that",
"takes",
"a",
"promised",
"array",
"and",
"returns",
"a",
"promised",
"connection",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L24-L29 | -1 |
||||||
81 | graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | cursor_for_object_in_connection | def cursor_for_object_in_connection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data:
return None
offset = data.index(_object)
return offset_to_cursor(offset) | python | def cursor_for_object_in_connection(data, _object):
'''
Return the cursor associated with an object in an array.
'''
if _object not in data:
return None
offset = data.index(_object)
return offset_to_cursor(offset) | [
"def",
"cursor_for_object_in_connection",
"(",
"data",
",",
"_object",
")",
":",
"if",
"_object",
"not",
"in",
"data",
":",
"return",
"None",
"offset",
"=",
"data",
".",
"index",
"(",
"_object",
")",
"return",
"offset_to_cursor",
"(",
"offset",
")"
] | Return the cursor associated with an object in an array. | [
"Return",
"the",
"cursor",
"associated",
"with",
"an",
"object",
"in",
"an",
"array",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L134-L142 | -1 |
||||||
82 | graphql-python/graphql-relay-py | graphql_relay/connection/arrayconnection.py | get_offset_with_default | def get_offset_with_default(cursor=None, default_offset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | python | def get_offset_with_default(cursor=None, default_offset=0):
'''
Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default.
'''
if not is_str(cursor):
return default_offset
offset = cursor_to_offset(cursor)
try:
return int(offset)
except:
return default_offset | [
"def",
"get_offset_with_default",
"(",
"cursor",
"=",
"None",
",",
"default_offset",
"=",
"0",
")",
":",
"if",
"not",
"is_str",
"(",
"cursor",
")",
":",
"return",
"default_offset",
"offset",
"=",
"cursor_to_offset",
"(",
"cursor",
")",
"try",
":",
"return",
"int",
"(",
"offset",
")",
"except",
":",
"return",
"default_offset"
] | Given an optional cursor and a default offset, returns the offset
to use; if the cursor contains a valid offset, that will be used,
otherwise it will be the default. | [
"Given",
"an",
"optional",
"cursor",
"and",
"a",
"default",
"offset",
"returns",
"the",
"offset",
"to",
"use",
";",
"if",
"the",
"cursor",
"contains",
"a",
"valid",
"offset",
"that",
"will",
"be",
"used",
"otherwise",
"it",
"will",
"be",
"the",
"default",
"."
] | 17ce2efa3c396df42791ae00667120b5fae64610 | https://github.com/graphql-python/graphql-relay-py/blob/17ce2efa3c396df42791ae00667120b5fae64610/graphql_relay/connection/arrayconnection.py#L145-L158 | -1 |
||||||
83 | patrickfuller/jgraph | python/notebook.py | generate | def generate(data, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`.
"""
edges = [{'source': s, 'target': t} for s, t in data]
nodes = force_directed_layout.run(edges, iterations, force_strength,
dampening, max_velocity, max_distance,
is_3d)
return {'edges': edges, 'nodes': nodes} | python | def generate(data, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`.
"""
edges = [{'source': s, 'target': t} for s, t in data]
nodes = force_directed_layout.run(edges, iterations, force_strength,
dampening, max_velocity, max_distance,
is_3d)
return {'edges': edges, 'nodes': nodes} | [
"def",
"generate",
"(",
"data",
",",
"iterations",
"=",
"1000",
",",
"force_strength",
"=",
"5.0",
",",
"dampening",
"=",
"0.01",
",",
"max_velocity",
"=",
"2.0",
",",
"max_distance",
"=",
"50",
",",
"is_3d",
"=",
"True",
")",
":",
"edges",
"=",
"[",
"{",
"'source'",
":",
"s",
",",
"'target'",
":",
"t",
"}",
"for",
"s",
",",
"t",
"in",
"data",
"]",
"nodes",
"=",
"force_directed_layout",
".",
"run",
"(",
"edges",
",",
"iterations",
",",
"force_strength",
",",
"dampening",
",",
"max_velocity",
",",
"max_distance",
",",
"is_3d",
")",
"return",
"{",
"'edges'",
":",
"edges",
",",
"'nodes'",
":",
"nodes",
"}"
] | Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`. | [
"Runs",
"a",
"force",
"-",
"directed",
"algorithm",
"on",
"a",
"graph",
"returning",
"a",
"data",
"structure",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/notebook.py#L136-L159 | -1 |
||||||
84 | patrickfuller/jgraph | python/json_formatter.py | compress | def compress(obj):
"""Outputs json without whitespace."""
return json.dumps(obj, sort_keys=True, separators=(',', ':'),
cls=CustomEncoder) | python | def compress(obj):
"""Outputs json without whitespace."""
return json.dumps(obj, sort_keys=True, separators=(',', ':'),
cls=CustomEncoder) | [
"def",
"compress",
"(",
"obj",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"sort_keys",
"=",
"True",
",",
"separators",
"=",
"(",
"','",
",",
"':'",
")",
",",
"cls",
"=",
"CustomEncoder",
")"
] | Outputs json without whitespace. | [
"Outputs",
"json",
"without",
"whitespace",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L18-L21 | -1 |
||||||
85 | patrickfuller/jgraph | python/json_formatter.py | dumps | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) | python | def dumps(obj):
"""Outputs json with formatting edits + object handling."""
return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder) | [
"def",
"dumps",
"(",
"obj",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"obj",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
",",
"cls",
"=",
"CustomEncoder",
")"
] | Outputs json with formatting edits + object handling. | [
"Outputs",
"json",
"with",
"formatting",
"edits",
"+",
"object",
"handling",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L24-L26 | -1 |
||||||
86 | patrickfuller/jgraph | python/json_formatter.py | CustomEncoder.encode | def encode(self, obj):
"""Fired for every object."""
s = super(CustomEncoder, self).encode(obj)
# If uncompressed, postprocess for formatting
if len(s.splitlines()) > 1:
s = self.postprocess(s)
return s | python | def encode(self, obj):
"""Fired for every object."""
s = super(CustomEncoder, self).encode(obj)
# If uncompressed, postprocess for formatting
if len(s.splitlines()) > 1:
s = self.postprocess(s)
return s | [
"def",
"encode",
"(",
"self",
",",
"obj",
")",
":",
"s",
"=",
"super",
"(",
"CustomEncoder",
",",
"self",
")",
".",
"encode",
"(",
"obj",
")",
"# If uncompressed, postprocess for formatting",
"if",
"len",
"(",
"s",
".",
"splitlines",
"(",
")",
")",
">",
"1",
":",
"s",
"=",
"self",
".",
"postprocess",
"(",
"s",
")",
"return",
"s"
] | Fired for every object. | [
"Fired",
"for",
"every",
"object",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L31-L37 | -1 |
||||||
87 | patrickfuller/jgraph | python/json_formatter.py | CustomEncoder.postprocess | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip())
if any(a in row for a in ['edges', 'nodes']):
# Fix to handle issues that arise with empty lists
if '[]' in row:
continue
spaces = sum(1 for _ in takewhile(str.isspace, row))
is_compressing, is_hash = True, '{' in row
return '\n'.join(compressed) | python | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip())
if any(a in row for a in ['edges', 'nodes']):
# Fix to handle issues that arise with empty lists
if '[]' in row:
continue
spaces = sum(1 for _ in takewhile(str.isspace, row))
is_compressing, is_hash = True, '{' in row
return '\n'.join(compressed) | [
"def",
"postprocess",
"(",
"self",
",",
"json_string",
")",
":",
"is_compressing",
",",
"is_hash",
",",
"compressed",
",",
"spaces",
"=",
"False",
",",
"False",
",",
"[",
"]",
",",
"0",
"for",
"row",
"in",
"json_string",
".",
"split",
"(",
"'\\n'",
")",
":",
"if",
"is_compressing",
":",
"if",
"(",
"row",
"[",
":",
"spaces",
"+",
"5",
"]",
"==",
"' '",
"*",
"(",
"spaces",
"+",
"4",
")",
"+",
"(",
"'\"'",
"if",
"is_hash",
"else",
"'{'",
")",
")",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"elif",
"(",
"len",
"(",
"row",
")",
">",
"spaces",
"and",
"row",
"[",
":",
"spaces",
"]",
"==",
"' '",
"*",
"spaces",
"and",
"re",
".",
"match",
"(",
"'[\\]\\}],?'",
",",
"row",
"[",
"spaces",
":",
"]",
".",
"rstrip",
"(",
")",
")",
")",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"is_compressing",
"=",
"False",
"else",
":",
"compressed",
"[",
"-",
"1",
"]",
"+=",
"' '",
"+",
"row",
".",
"strip",
"(",
")",
"else",
":",
"compressed",
".",
"append",
"(",
"row",
".",
"rstrip",
"(",
")",
")",
"if",
"any",
"(",
"a",
"in",
"row",
"for",
"a",
"in",
"[",
"'edges'",
",",
"'nodes'",
"]",
")",
":",
"# Fix to handle issues that arise with empty lists",
"if",
"'[]'",
"in",
"row",
":",
"continue",
"spaces",
"=",
"sum",
"(",
"1",
"for",
"_",
"in",
"takewhile",
"(",
"str",
".",
"isspace",
",",
"row",
")",
")",
"is_compressing",
",",
"is_hash",
"=",
"True",
",",
"'{'",
"in",
"row",
"return",
"'\\n'",
".",
"join",
"(",
"compressed",
")"
] | Displays each entry on its own line. | [
"Displays",
"each",
"entry",
"on",
"its",
"own",
"line",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/json_formatter.py#L39-L61 | -1 |
||||||
88 | patrickfuller/jgraph | python/force_directed_layout.py | run | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | python | def run(edges, iterations=1000, force_strength=5.0, dampening=0.01,
max_velocity=2.0, max_distance=50, is_3d=True):
"""Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions
"""
# Get a list of node ids from the edge data
nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges)
# Convert to a data-storing object and initialize some values
d = 3 if is_3d else 2
nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes}
# Repeat n times (is there a more Pythonic way to do this?)
for _ in repeat(None, iterations):
# Add in Coulomb-esque node-node repulsive forces
for node1, node2 in combinations(nodes.values(), 2):
_coulomb(node1, node2, force_strength, max_distance)
# And Hooke-esque edge spring forces
for edge in edges:
_hooke(nodes[edge['source']], nodes[edge['target']],
force_strength * edge.get('size', 1), max_distance)
# Move by resultant force
for node in nodes.values():
# Constrain the force to the bounds specified by input parameter
force = [_constrain(dampening * f, -max_velocity, max_velocity)
for f in node['force']]
# Update velocities and reset force
node['velocity'] = [v + dv
for v, dv in zip(node['velocity'], force)]
node['force'] = [0] * d
# Clean and return
for node in nodes.values():
del node['force']
node['location'] = node['velocity']
del node['velocity']
# Even if it's 2D, let's specify three dimensions
if not is_3d:
node['location'] += [0.0]
return nodes | [
"def",
"run",
"(",
"edges",
",",
"iterations",
"=",
"1000",
",",
"force_strength",
"=",
"5.0",
",",
"dampening",
"=",
"0.01",
",",
"max_velocity",
"=",
"2.0",
",",
"max_distance",
"=",
"50",
",",
"is_3d",
"=",
"True",
")",
":",
"# Get a list of node ids from the edge data",
"nodes",
"=",
"set",
"(",
"e",
"[",
"'source'",
"]",
"for",
"e",
"in",
"edges",
")",
"|",
"set",
"(",
"e",
"[",
"'target'",
"]",
"for",
"e",
"in",
"edges",
")",
"# Convert to a data-storing object and initialize some values",
"d",
"=",
"3",
"if",
"is_3d",
"else",
"2",
"nodes",
"=",
"{",
"n",
":",
"{",
"'velocity'",
":",
"[",
"0.0",
"]",
"*",
"d",
",",
"'force'",
":",
"[",
"0.0",
"]",
"*",
"d",
"}",
"for",
"n",
"in",
"nodes",
"}",
"# Repeat n times (is there a more Pythonic way to do this?)",
"for",
"_",
"in",
"repeat",
"(",
"None",
",",
"iterations",
")",
":",
"# Add in Coulomb-esque node-node repulsive forces",
"for",
"node1",
",",
"node2",
"in",
"combinations",
"(",
"nodes",
".",
"values",
"(",
")",
",",
"2",
")",
":",
"_coulomb",
"(",
"node1",
",",
"node2",
",",
"force_strength",
",",
"max_distance",
")",
"# And Hooke-esque edge spring forces",
"for",
"edge",
"in",
"edges",
":",
"_hooke",
"(",
"nodes",
"[",
"edge",
"[",
"'source'",
"]",
"]",
",",
"nodes",
"[",
"edge",
"[",
"'target'",
"]",
"]",
",",
"force_strength",
"*",
"edge",
".",
"get",
"(",
"'size'",
",",
"1",
")",
",",
"max_distance",
")",
"# Move by resultant force",
"for",
"node",
"in",
"nodes",
".",
"values",
"(",
")",
":",
"# Constrain the force to the bounds specified by input parameter",
"force",
"=",
"[",
"_constrain",
"(",
"dampening",
"*",
"f",
",",
"-",
"max_velocity",
",",
"max_velocity",
")",
"for",
"f",
"in",
"node",
"[",
"'force'",
"]",
"]",
"# Update velocities and reset force",
"node",
"[",
"'velocity'",
"]",
"=",
"[",
"v",
"+",
"dv",
"for",
"v",
",",
"dv",
"in",
"zip",
"(",
"node",
"[",
"'velocity'",
"]",
",",
"force",
")",
"]",
"node",
"[",
"'force'",
"]",
"=",
"[",
"0",
"]",
"*",
"d",
"# Clean and return",
"for",
"node",
"in",
"nodes",
".",
"values",
"(",
")",
":",
"del",
"node",
"[",
"'force'",
"]",
"node",
"[",
"'location'",
"]",
"=",
"node",
"[",
"'velocity'",
"]",
"del",
"node",
"[",
"'velocity'",
"]",
"# Even if it's 2D, let's specify three dimensions",
"if",
"not",
"is_3d",
":",
"node",
"[",
"'location'",
"]",
"+=",
"[",
"0.0",
"]",
"return",
"nodes"
] | Runs a force-directed-layout algorithm on the input graph.
iterations - Number of FDL iterations to run in coordinate generation
force_strength - Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening - Multiplier to reduce force applied to nodes
max_velocity - Maximum distance a node can move in one step
max_distance - The maximum distance considered for interactions | [
"Runs",
"a",
"force",
"-",
"directed",
"-",
"layout",
"algorithm",
"on",
"the",
"input",
"graph",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/force_directed_layout.py#L10-L59 | -1 |
||||||
89 | patrickfuller/jgraph | python/force_directed_layout.py | _coulomb | def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
# If the distance isn't huge (ie. Coulomb is negligible), calculate
if distance < r:
force = (k / distance) ** 2
n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)]
n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)] | python | def _coulomb(n1, n2, k, r):
"""Calculates Coulomb forces and updates node data."""
# Get relevant positional data
delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])]
distance = sqrt(sum(d ** 2 for d in delta))
# If the deltas are too small, use random values to keep things moving
if distance < 0.1:
delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)]
distance = sqrt(sum(d ** 2 for d in delta))
# If the distance isn't huge (ie. Coulomb is negligible), calculate
if distance < r:
force = (k / distance) ** 2
n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)]
n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)] | [
"def",
"_coulomb",
"(",
"n1",
",",
"n2",
",",
"k",
",",
"r",
")",
":",
"# Get relevant positional data",
"delta",
"=",
"[",
"x2",
"-",
"x1",
"for",
"x1",
",",
"x2",
"in",
"zip",
"(",
"n1",
"[",
"'velocity'",
"]",
",",
"n2",
"[",
"'velocity'",
"]",
")",
"]",
"distance",
"=",
"sqrt",
"(",
"sum",
"(",
"d",
"**",
"2",
"for",
"d",
"in",
"delta",
")",
")",
"# If the deltas are too small, use random values to keep things moving",
"if",
"distance",
"<",
"0.1",
":",
"delta",
"=",
"[",
"uniform",
"(",
"0.1",
",",
"0.2",
")",
"for",
"_",
"in",
"repeat",
"(",
"None",
",",
"3",
")",
"]",
"distance",
"=",
"sqrt",
"(",
"sum",
"(",
"d",
"**",
"2",
"for",
"d",
"in",
"delta",
")",
")",
"# If the distance isn't huge (ie. Coulomb is negligible), calculate",
"if",
"distance",
"<",
"r",
":",
"force",
"=",
"(",
"k",
"/",
"distance",
")",
"**",
"2",
"n1",
"[",
"'force'",
"]",
"=",
"[",
"f",
"-",
"force",
"*",
"d",
"for",
"f",
",",
"d",
"in",
"zip",
"(",
"n1",
"[",
"'force'",
"]",
",",
"delta",
")",
"]",
"n2",
"[",
"'force'",
"]",
"=",
"[",
"f",
"+",
"force",
"*",
"d",
"for",
"f",
",",
"d",
"in",
"zip",
"(",
"n2",
"[",
"'force'",
"]",
",",
"delta",
")",
"]"
] | Calculates Coulomb forces and updates node data. | [
"Calculates",
"Coulomb",
"forces",
"and",
"updates",
"node",
"data",
"."
] | 7297450f26ae8cba21914668a5aaa755de8aa14d | https://github.com/patrickfuller/jgraph/blob/7297450f26ae8cba21914668a5aaa755de8aa14d/python/force_directed_layout.py#L62-L77 | -1 |
||||||
90 | pypyr/pypyr-cli | pypyr/steps/contextclearall.py | run_step | def run_step(context):
"""Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context.
"""
logger.debug("started")
context.clear()
logger.info(f"Context wiped. New context size: {len(context)}")
logger.debug("done") | python | def run_step(context):
"""Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context.
"""
logger.debug("started")
context.clear()
logger.info(f"Context wiped. New context size: {len(context)}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"clear",
"(",
")",
"logger",
".",
"info",
"(",
"f\"Context wiped. New context size: {len(context)}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Wipe the entire context.
Args:
Context is a dictionary or dictionary-like.
Does not require any specific keys in context. | [
"Wipe",
"the",
"entire",
"context",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/contextclearall.py#L8-L20 | -1 |
||||||
91 | pypyr/pypyr-cli | pypyr/steps/pathcheck.py | run_step | def run_step(context):
"""pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None.
"""
logger.debug("started")
context.assert_key_has_value(key='pathCheck', caller=__name__)
paths_to_check = context['pathCheck']
if not paths_to_check:
raise KeyInContextHasNoValueError("context['pathCheck'] must have a "
f"value for {__name__}.")
# pathsToCheck can be a string or a list in case there are multiple paths
if isinstance(paths_to_check, list):
check_me = paths_to_check
else:
# assuming it's a str/path at this point
check_me = [paths_to_check]
out = {}
total_found = 0
for path in check_me:
logger.debug(f"checking path: {path}")
formatted_path = context.get_formatted_string(path)
found_paths = pypyr.utils.filesystem.get_glob(formatted_path)
no_of_paths = len(found_paths)
out[path] = {
'exists': no_of_paths > 0,
'count': no_of_paths,
'found': found_paths
}
total_found = total_found + no_of_paths
context['pathCheckOut'] = out
logger.info(f'checked {len(out)} path(s) and found {total_found}')
logger.debug("done") | python | def run_step(context):
"""pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None.
"""
logger.debug("started")
context.assert_key_has_value(key='pathCheck', caller=__name__)
paths_to_check = context['pathCheck']
if not paths_to_check:
raise KeyInContextHasNoValueError("context['pathCheck'] must have a "
f"value for {__name__}.")
# pathsToCheck can be a string or a list in case there are multiple paths
if isinstance(paths_to_check, list):
check_me = paths_to_check
else:
# assuming it's a str/path at this point
check_me = [paths_to_check]
out = {}
total_found = 0
for path in check_me:
logger.debug(f"checking path: {path}")
formatted_path = context.get_formatted_string(path)
found_paths = pypyr.utils.filesystem.get_glob(formatted_path)
no_of_paths = len(found_paths)
out[path] = {
'exists': no_of_paths > 0,
'count': no_of_paths,
'found': found_paths
}
total_found = total_found + no_of_paths
context['pathCheckOut'] = out
logger.info(f'checked {len(out)} path(s) and found {total_found}')
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'pathCheck'",
",",
"caller",
"=",
"__name__",
")",
"paths_to_check",
"=",
"context",
"[",
"'pathCheck'",
"]",
"if",
"not",
"paths_to_check",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"\"context['pathCheck'] must have a \"",
"f\"value for {__name__}.\"",
")",
"# pathsToCheck can be a string or a list in case there are multiple paths",
"if",
"isinstance",
"(",
"paths_to_check",
",",
"list",
")",
":",
"check_me",
"=",
"paths_to_check",
"else",
":",
"# assuming it's a str/path at this point",
"check_me",
"=",
"[",
"paths_to_check",
"]",
"out",
"=",
"{",
"}",
"total_found",
"=",
"0",
"for",
"path",
"in",
"check_me",
":",
"logger",
".",
"debug",
"(",
"f\"checking path: {path}\"",
")",
"formatted_path",
"=",
"context",
".",
"get_formatted_string",
"(",
"path",
")",
"found_paths",
"=",
"pypyr",
".",
"utils",
".",
"filesystem",
".",
"get_glob",
"(",
"formatted_path",
")",
"no_of_paths",
"=",
"len",
"(",
"found_paths",
")",
"out",
"[",
"path",
"]",
"=",
"{",
"'exists'",
":",
"no_of_paths",
">",
"0",
",",
"'count'",
":",
"no_of_paths",
",",
"'found'",
":",
"found_paths",
"}",
"total_found",
"=",
"total_found",
"+",
"no_of_paths",
"context",
"[",
"'pathCheckOut'",
"]",
"=",
"out",
"logger",
".",
"info",
"(",
"f'checked {len(out)} path(s) and found {total_found}'",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | pypyr step that checks if a file or directory path exists.
Args:
context: pypyr.context.Context. Mandatory.
The following context key must exist
- pathsToCheck. str/path-like or list of str/paths.
Path to file on disk to check.
All inputs support formatting expressions. Supports globs.
This step creates pathCheckOut in context, containing the results of the
path check operation.
pathCheckOut:
'inpath':
exists: true # bool. True if path exists.
count: 0 # int. Number of files found for in path.
found: ['path1', 'path2'] # list of strings. Paths of files found.
[count] is 0 if no files found. If you specified a single input
path to check and it exists, it's going to be 1. If you specified multiple
in paths or a glob expression that found more than 1 result, well, take a
guess.
[found] is a list of all the paths found for the [inpath]. If you passed
in a glob or globs, will contain the globs found for [inpath].
This means you can do an existence evaluation like this in a formatting
expression: '{pathCheckOut[inpathhere][exists]}'
Returns:
None. updates context arg.
Raises:
pypyr.errors.KeyNotInContextError: pathExists missing in context.
pypyr.errors.KeyInContextHasNoValueError: pathCheck exists but is None. | [
"pypyr",
"step",
"that",
"checks",
"if",
"a",
"file",
"or",
"directory",
"path",
"exists",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pathcheck.py#L10-L83 | -1 |
||||||
92 | pypyr/pypyr-cli | pypyr/steps/filewritejson.py | run_step | def run_step(context):
"""Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteJson', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteJson']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteJson']
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteJson']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | python | def run_step(context):
"""Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None.
"""
logger.debug("started")
context.assert_child_key_has_value('fileWriteJson', 'path', __name__)
out_path = context.get_formatted_string(context['fileWriteJson']['path'])
# doing it like this to safeguard against accidentally dumping all context
# with potentially sensitive values in it to disk if payload exists but is
# None.
is_payload_specified = 'payload' in context['fileWriteJson']
logger.debug(f"opening destination file for writing: {out_path}")
os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)
with open(out_path, 'w') as outfile:
if is_payload_specified:
payload = context['fileWriteJson']['payload']
formatted_iterable = context.get_formatted_iterable(payload)
else:
formatted_iterable = context.get_formatted_iterable(context)
json.dump(formatted_iterable, outfile, indent=2, ensure_ascii=False)
logger.info(f"formatted context content and wrote to {out_path}")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"context",
".",
"assert_child_key_has_value",
"(",
"'fileWriteJson'",
",",
"'path'",
",",
"__name__",
")",
"out_path",
"=",
"context",
".",
"get_formatted_string",
"(",
"context",
"[",
"'fileWriteJson'",
"]",
"[",
"'path'",
"]",
")",
"# doing it like this to safeguard against accidentally dumping all context",
"# with potentially sensitive values in it to disk if payload exists but is",
"# None.",
"is_payload_specified",
"=",
"'payload'",
"in",
"context",
"[",
"'fileWriteJson'",
"]",
"logger",
".",
"debug",
"(",
"f\"opening destination file for writing: {out_path}\"",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"out_path",
")",
")",
",",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"out_path",
",",
"'w'",
")",
"as",
"outfile",
":",
"if",
"is_payload_specified",
":",
"payload",
"=",
"context",
"[",
"'fileWriteJson'",
"]",
"[",
"'payload'",
"]",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"payload",
")",
"else",
":",
"formatted_iterable",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"context",
")",
"json",
".",
"dump",
"(",
"formatted_iterable",
",",
"outfile",
",",
"indent",
"=",
"2",
",",
"ensure_ascii",
"=",
"False",
")",
"logger",
".",
"info",
"(",
"f\"formatted context content and wrote to {out_path}\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Write payload out to json file.
Args:
context: pypyr.context.Context. Mandatory.
The following context keys expected:
- fileWriteJson
- path. mandatory. path-like. Write output file to
here. Will create directories in path for you.
- payload. optional. Write this key to output file. If not
specified, output entire context.
Returns:
None.
Raises:
pypyr.errors.KeyNotInContextError: fileWriteJson or
fileWriteJson['path'] missing in context.
pypyr.errors.KeyInContextHasNoValueError: fileWriteJson or
fileWriteJson['path'] exists but is None. | [
"Write",
"payload",
"out",
"to",
"json",
"file",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/filewritejson.py#L10-L53 | -1 |
||||||
93 | pypyr/pypyr-cli | pypyr/steps/pype.py | run_step | def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) = get_arguments(context)
try:
if use_parent_context:
logger.info(f"pyping {pipeline_name}, using parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=context,
parse_input=not skip_parse,
loader=loader
)
else:
logger.info(f"pyping {pipeline_name}, without parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
working_dir=context.working_dir,
parse_input=not skip_parse,
loader=loader
)
logger.info(f"pyped {pipeline_name}.")
except Exception as ex_info:
# yes, yes, don't catch Exception. Have to, though, in order to swallow
# errs if !raise_error
logger.error(f"Something went wrong pyping {pipeline_name}. "
f"{type(ex_info).__name__}: {ex_info}")
if raise_error:
logger.debug("Raising original exception to caller.")
raise
else:
logger.debug(
f"raiseError is False. Swallowing error in {pipeline_name}.")
logger.debug("done") | python | def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) = get_arguments(context)
try:
if use_parent_context:
logger.info(f"pyping {pipeline_name}, using parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=context,
parse_input=not skip_parse,
loader=loader
)
else:
logger.info(f"pyping {pipeline_name}, without parent context.")
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
working_dir=context.working_dir,
parse_input=not skip_parse,
loader=loader
)
logger.info(f"pyped {pipeline_name}.")
except Exception as ex_info:
# yes, yes, don't catch Exception. Have to, though, in order to swallow
# errs if !raise_error
logger.error(f"Something went wrong pyping {pipeline_name}. "
f"{type(ex_info).__name__}: {ex_info}")
if raise_error:
logger.debug("Raising original exception to caller.")
raise
else:
logger.debug(
f"raiseError is False. Swallowing error in {pipeline_name}.")
logger.debug("done") | [
"def",
"run_step",
"(",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"started\"",
")",
"(",
"pipeline_name",
",",
"use_parent_context",
",",
"pipe_arg",
",",
"skip_parse",
",",
"raise_error",
",",
"loader",
",",
")",
"=",
"get_arguments",
"(",
"context",
")",
"try",
":",
"if",
"use_parent_context",
":",
"logger",
".",
"info",
"(",
"f\"pyping {pipeline_name}, using parent context.\"",
")",
"pipelinerunner",
".",
"load_and_run_pipeline",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"pipe_arg",
",",
"context",
"=",
"context",
",",
"parse_input",
"=",
"not",
"skip_parse",
",",
"loader",
"=",
"loader",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"pyping {pipeline_name}, without parent context.\"",
")",
"pipelinerunner",
".",
"load_and_run_pipeline",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"pipeline_context_input",
"=",
"pipe_arg",
",",
"working_dir",
"=",
"context",
".",
"working_dir",
",",
"parse_input",
"=",
"not",
"skip_parse",
",",
"loader",
"=",
"loader",
")",
"logger",
".",
"info",
"(",
"f\"pyped {pipeline_name}.\"",
")",
"except",
"Exception",
"as",
"ex_info",
":",
"# yes, yes, don't catch Exception. Have to, though, in order to swallow",
"# errs if !raise_error",
"logger",
".",
"error",
"(",
"f\"Something went wrong pyping {pipeline_name}. \"",
"f\"{type(ex_info).__name__}: {ex_info}\"",
")",
"if",
"raise_error",
":",
"logger",
".",
"debug",
"(",
"\"Raising original exception to caller.\"",
")",
"raise",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"raiseError is False. Swallowing error in {pipeline_name}.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty. | [
"Run",
"another",
"pipeline",
"from",
"this",
"step",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pype.py#L10-L93 | -1 |
||||||
94 | pypyr/pypyr-cli | pypyr/steps/pype.py | get_arguments | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | python | def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
use_parent_context = pype.get('useParentContext', True)
pipe_arg = pype.get('pipeArg', None)
skip_parse = pype.get('skipParse', True)
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
return (
pipeline_name,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
) | [
"def",
"get_arguments",
"(",
"context",
")",
":",
"context",
".",
"assert_key_has_value",
"(",
"key",
"=",
"'pype'",
",",
"caller",
"=",
"__name__",
")",
"pype",
"=",
"context",
".",
"get_formatted",
"(",
"'pype'",
")",
"try",
":",
"pipeline_name",
"=",
"pype",
"[",
"'name'",
"]",
"if",
"pipeline_name",
"is",
"None",
":",
"raise",
"KeyInContextHasNoValueError",
"(",
"\"pypyr.steps.pype ['pype']['name'] exists but is empty.\"",
")",
"except",
"KeyError",
"as",
"err",
":",
"raise",
"KeyNotInContextError",
"(",
"\"pypyr.steps.pype missing 'name' in the 'pype' context item. \"",
"\"You need to specify the pipeline name to run another \"",
"\"pipeline.\"",
")",
"from",
"err",
"use_parent_context",
"=",
"pype",
".",
"get",
"(",
"'useParentContext'",
",",
"True",
")",
"pipe_arg",
"=",
"pype",
".",
"get",
"(",
"'pipeArg'",
",",
"None",
")",
"skip_parse",
"=",
"pype",
".",
"get",
"(",
"'skipParse'",
",",
"True",
")",
"raise_error",
"=",
"pype",
".",
"get",
"(",
"'raiseError'",
",",
"True",
")",
"loader",
"=",
"pype",
".",
"get",
"(",
"'loader'",
",",
"None",
")",
"return",
"(",
"pipeline_name",
",",
"use_parent_context",
",",
"pipe_arg",
",",
"skip_parse",
",",
"raise_error",
",",
"loader",
",",
")"
] | Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None. | [
"Parse",
"arguments",
"for",
"pype",
"from",
"context",
"and",
"assign",
"default",
"values",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/pype.py#L96-L143 | -1 |
||||||
95 | pypyr/pypyr-cli | pypyr/pypeloaders/fileloader.py | get_pipeline_path | def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
# look for name.yaml in the pipelines/ sub-directory
logger.debug(f"current directory is {working_directory}")
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: {pypyr_dir}")
pipeline_path = os.path.abspath(os.path.join(
pypyr_dir,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
raise PipelineNotFoundError(f"{pipeline_name}.yaml not found in "
f"either "
f"{working_directory}/pipelines "
f"or {pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path | python | def get_pipeline_path(pipeline_name, working_directory):
"""Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines.
"""
logger.debug("starting")
# look for name.yaml in the pipelines/ sub-directory
logger.debug(f"current directory is {working_directory}")
# looking for {cwd}/pipelines/[pipeline_name].yaml
pipeline_path = os.path.abspath(os.path.join(
working_directory,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
logger.debug(f"{pipeline_name} not found in current "
"directory/pipelines folder. Looking in pypyr install "
"directory instead.")
pypyr_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.debug(f"pypyr installation directory is: {pypyr_dir}")
pipeline_path = os.path.abspath(os.path.join(
pypyr_dir,
'pipelines',
pipeline_name + '.yaml'))
if os.path.isfile(pipeline_path):
logger.debug(f"Found {pipeline_path}")
else:
raise PipelineNotFoundError(f"{pipeline_name}.yaml not found in "
f"either "
f"{working_directory}/pipelines "
f"or {pypyr_dir}/pipelines")
logger.debug("done")
return pipeline_path | [
"def",
"get_pipeline_path",
"(",
"pipeline_name",
",",
"working_directory",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# look for name.yaml in the pipelines/ sub-directory",
"logger",
".",
"debug",
"(",
"f\"current directory is {working_directory}\"",
")",
"# looking for {cwd}/pipelines/[pipeline_name].yaml",
"pipeline_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"working_directory",
",",
"'pipelines'",
",",
"pipeline_name",
"+",
"'.yaml'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pipeline_path",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Found {pipeline_path}\"",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"f\"{pipeline_name} not found in current \"",
"\"directory/pipelines folder. Looking in pypyr install \"",
"\"directory instead.\"",
")",
"pypyr_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
")",
"logger",
".",
"debug",
"(",
"f\"pypyr installation directory is: {pypyr_dir}\"",
")",
"pipeline_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pypyr_dir",
",",
"'pipelines'",
",",
"pipeline_name",
"+",
"'.yaml'",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"pipeline_path",
")",
":",
"logger",
".",
"debug",
"(",
"f\"Found {pipeline_path}\"",
")",
"else",
":",
"raise",
"PipelineNotFoundError",
"(",
"f\"{pipeline_name}.yaml not found in \"",
"f\"either \"",
"f\"{working_directory}/pipelines \"",
"f\"or {pypyr_dir}/pipelines\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"pipeline_path"
] | Look for the pipeline in the various places it could be.
First checks the cwd. Then checks pypyr/pipelines dir.
Args:
pipeline_name: string. Name of pipeline to find
working_directory: string. Path in which to look for pipeline_name.yaml
Returns:
Absolute path to the pipeline_name.yaml file
Raises:
PipelineNotFoundError: if pipeline_name.yaml not found in working_dir
or in {pypyr install dir}/pipelines. | [
"Look",
"for",
"the",
"pipeline",
"in",
"the",
"various",
"places",
"it",
"could",
"be",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pypeloaders/fileloader.py#L11-L61 | -1 |
||||||
96 | pypyr/pypyr-cli | pypyr/pypeloaders/fileloader.py | get_pipeline_definition | def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
"""
logger.debug("starting")
pipeline_path = get_pipeline_path(
pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug(f"Trying to open pipeline at path {pipeline_path}")
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
f"found {len(pipeline_definition)} stages in pipeline.")
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
f"{pipeline_name}.yaml in the /pipelines sub directory.")
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition | python | def get_pipeline_definition(pipeline_name, working_dir):
"""Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs.
"""
logger.debug("starting")
pipeline_path = get_pipeline_path(
pipeline_name=pipeline_name,
working_directory=working_dir)
logger.debug(f"Trying to open pipeline at path {pipeline_path}")
try:
with open(pipeline_path) as yaml_file:
pipeline_definition = pypyr.yaml.get_pipeline_yaml(
yaml_file)
logger.debug(
f"found {len(pipeline_definition)} stages in pipeline.")
except FileNotFoundError:
logger.error(
"The pipeline doesn't exist. Looking for a file here: "
f"{pipeline_name}.yaml in the /pipelines sub directory.")
raise
logger.debug("pipeline definition loaded")
logger.debug("done")
return pipeline_definition | [
"def",
"get_pipeline_definition",
"(",
"pipeline_name",
",",
"working_dir",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"pipeline_path",
"=",
"get_pipeline_path",
"(",
"pipeline_name",
"=",
"pipeline_name",
",",
"working_directory",
"=",
"working_dir",
")",
"logger",
".",
"debug",
"(",
"f\"Trying to open pipeline at path {pipeline_path}\"",
")",
"try",
":",
"with",
"open",
"(",
"pipeline_path",
")",
"as",
"yaml_file",
":",
"pipeline_definition",
"=",
"pypyr",
".",
"yaml",
".",
"get_pipeline_yaml",
"(",
"yaml_file",
")",
"logger",
".",
"debug",
"(",
"f\"found {len(pipeline_definition)} stages in pipeline.\"",
")",
"except",
"FileNotFoundError",
":",
"logger",
".",
"error",
"(",
"\"The pipeline doesn't exist. Looking for a file here: \"",
"f\"{pipeline_name}.yaml in the /pipelines sub directory.\"",
")",
"raise",
"logger",
".",
"debug",
"(",
"\"pipeline definition loaded\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")",
"return",
"pipeline_definition"
] | Open and parse the pipeline definition yaml.
Parses pipeline yaml and returns dictionary representing the pipeline.
pipeline_name.yaml should be in the working_dir/pipelines/ directory.
Args:
pipeline_name: string. Name of pipeline. This will be the file-name of
the pipeline - i.e {pipeline_name}.yaml
working_dir: path. Start looking in
./working_dir/pipelines/pipeline_name.yaml
Returns:
dict describing the pipeline, parsed from the pipeline yaml.
Raises:
FileNotFoundError: pipeline_name.yaml not found in the various pipeline
dirs. | [
"Open",
"and",
"parse",
"the",
"pipeline",
"definition",
"yaml",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/pypeloaders/fileloader.py#L64-L107 | -1 |
||||||
97 | pypyr/pypyr-cli | pypyr/dsl.py | SpecialTagDirective.to_yaml | def to_yaml(cls, representer, node):
"""How to serialize this class back to yaml."""
return representer.represent_scalar(cls.yaml_tag, node.value) | python | def to_yaml(cls, representer, node):
"""How to serialize this class back to yaml."""
return representer.represent_scalar(cls.yaml_tag, node.value) | [
"def",
"to_yaml",
"(",
"cls",
",",
"representer",
",",
"node",
")",
":",
"return",
"representer",
".",
"represent_scalar",
"(",
"cls",
".",
"yaml_tag",
",",
"node",
".",
"value",
")"
] | How to serialize this class back to yaml. | [
"How",
"to",
"serialize",
"this",
"class",
"back",
"to",
"yaml",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L55-L57 | -1 |
||||||
98 | pypyr/pypyr-cli | pypyr/dsl.py | PyString.get_value | def get_value(self, context):
"""Run python eval on the input string."""
if self.value:
return expressions.eval_string(self.value, context)
else:
# Empty input raises cryptic EOF syntax err, this more human
# friendly
raise ValueError('!py string expression is empty. It must be a '
'valid python expression instead.') | python | def get_value(self, context):
"""Run python eval on the input string."""
if self.value:
return expressions.eval_string(self.value, context)
else:
# Empty input raises cryptic EOF syntax err, this more human
# friendly
raise ValueError('!py string expression is empty. It must be a '
'valid python expression instead.') | [
"def",
"get_value",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"value",
":",
"return",
"expressions",
".",
"eval_string",
"(",
"self",
".",
"value",
",",
"context",
")",
"else",
":",
"# Empty input raises cryptic EOF syntax err, this more human",
"# friendly",
"raise",
"ValueError",
"(",
"'!py string expression is empty. It must be a '",
"'valid python expression instead.'",
")"
] | Run python eval on the input string. | [
"Run",
"python",
"eval",
"on",
"the",
"input",
"string",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L107-L115 | -1 |
||||||
99 | pypyr/pypyr-cli | pypyr/dsl.py | Step.foreach_loop | def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") | python | def foreach_loop(self, context):
"""Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
"""
logger.debug("starting")
# Loop decorators only evaluated once, not for every step repeat
# execution.
foreach = context.get_formatted_iterable(self.foreach_items)
foreach_length = len(foreach)
logger.info(f"foreach decorator will loop {foreach_length} times.")
for i in foreach:
logger.info(f"foreach: running step {i}")
# the iterator must be available to the step when it executes
context['i'] = i
# conditional operators apply to each iteration, so might be an
# iteration run, skips or swallows.
self.run_conditional_decorators(context)
logger.debug(f"foreach: done step {i}")
logger.debug(f"foreach decorator looped {foreach_length} times.")
logger.debug("done") | [
"def",
"foreach_loop",
"(",
"self",
",",
"context",
")",
":",
"logger",
".",
"debug",
"(",
"\"starting\"",
")",
"# Loop decorators only evaluated once, not for every step repeat",
"# execution.",
"foreach",
"=",
"context",
".",
"get_formatted_iterable",
"(",
"self",
".",
"foreach_items",
")",
"foreach_length",
"=",
"len",
"(",
"foreach",
")",
"logger",
".",
"info",
"(",
"f\"foreach decorator will loop {foreach_length} times.\"",
")",
"for",
"i",
"in",
"foreach",
":",
"logger",
".",
"info",
"(",
"f\"foreach: running step {i}\"",
")",
"# the iterator must be available to the step when it executes",
"context",
"[",
"'i'",
"]",
"=",
"i",
"# conditional operators apply to each iteration, so might be an",
"# iteration run, skips or swallows.",
"self",
".",
"run_conditional_decorators",
"(",
"context",
")",
"logger",
".",
"debug",
"(",
"f\"foreach: done step {i}\"",
")",
"logger",
".",
"debug",
"(",
"f\"foreach decorator looped {foreach_length} times.\"",
")",
"logger",
".",
"debug",
"(",
"\"done\"",
")"
] | Run step once for each item in foreach_items.
On each iteration, the invoked step can use context['i'] to get the
current iterator value.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | [
"Run",
"step",
"once",
"for",
"each",
"item",
"in",
"foreach_items",
"."
] | 4003f999cd5eb030b4c7407317de728f5115a80f | https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L253-L283 | -1 |