hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a5657a9ba609c4b80f71648ff43f36e553c66c2 | 26,287 | py | Python | azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Control(Model):
"""Control.
:param contribution: Contribution for the control.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param control_type: Type of the control.
:type control_type: str
:param height: Height of the control, for html controls.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution or not.
:type is_contribution: bool
:param label: Label for the field
:type label: str
:param metadata: Inner text of the control.
:type metadata: str
:param order:
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param read_only: A value indicating if the control is readonly.
:type read_only: bool
:param visible: A value indicating if the control should be hidden or not.
:type visible: bool
:param watermark: Watermark text for the textbox.
:type watermark: str
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'control_type': {'key': 'controlType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'},
'watermark': {'key': 'watermark', 'type': 'str'}
}
def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None):
super(Control, self).__init__()
self.contribution = contribution
self.control_type = control_type
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.metadata = metadata
self.order = order
self.overridden = overridden
self.read_only = read_only
self.visible = visible
self.watermark = watermark
class CreateProcessModel(Model):
"""CreateProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param parent_process_type_id:
:type parent_process_type_id: str
:param reference_name:
:type reference_name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'}
}
def __init__(self, description=None, name=None, parent_process_type_id=None, reference_name=None):
super(CreateProcessModel, self).__init__()
self.description = description
self.name = name
self.parent_process_type_id = parent_process_type_id
self.reference_name = reference_name
class Extension(Model):
"""Extension.
:param id:
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, id=None):
super(Extension, self).__init__()
self.id = id
class FieldModel(Model):
"""FieldModel.
:param description:
:type description: str
:param id:
:type id: str
:param is_identity:
:type is_identity: bool
:param name:
:type name: str
:param type:
:type type: object
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_identity': {'key': 'isIdentity', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, is_identity=None, name=None, type=None, url=None):
super(FieldModel, self).__init__()
self.description = description
self.id = id
self.is_identity = is_identity
self.name = name
self.type = type
self.url = url
class FieldRuleModel(Model):
"""FieldRuleModel.
:param actions:
:type actions: list of :class:`RuleActionModel <work-item-tracking.v4_0.models.RuleActionModel>`
:param conditions:
:type conditions: list of :class:`RuleConditionModel <work-item-tracking.v4_0.models.RuleConditionModel>`
:param friendly_name:
:type friendly_name: str
:param id:
:type id: str
:param is_disabled:
:type is_disabled: bool
:param is_system:
:type is_system: bool
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleActionModel]'},
'conditions': {'key': 'conditions', 'type': '[RuleConditionModel]'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'is_system': {'key': 'isSystem', 'type': 'bool'}
}
def __init__(self, actions=None, conditions=None, friendly_name=None, id=None, is_disabled=None, is_system=None):
super(FieldRuleModel, self).__init__()
self.actions = actions
self.conditions = conditions
self.friendly_name = friendly_name
self.id = id
self.is_disabled = is_disabled
self.is_system = is_system
class FormLayout(Model):
"""FormLayout.
:param extensions: Gets and sets extensions list
:type extensions: list of :class:`Extension <work-item-tracking.v4_0.models.Extension>`
:param pages: Top level tabs of the layout.
:type pages: list of :class:`Page <work-item-tracking.v4_0.models.Page>`
:param system_controls: Headers controls of the layout.
:type system_controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[Extension]'},
'pages': {'key': 'pages', 'type': '[Page]'},
'system_controls': {'key': 'systemControls', 'type': '[Control]'}
}
def __init__(self, extensions=None, pages=None, system_controls=None):
super(FormLayout, self).__init__()
self.extensions = extensions
self.pages = pages
self.system_controls = system_controls
class Group(Model):
"""Group.
:param contribution: Contribution for the group.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param controls: Controls to be put in the group.
:type controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
:param height: The height for the contribution.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: Label for the group.
:type label: str
:param order: Order in which the group should appear in the section.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param visible: A value indicating if the group should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'controls': {'key': 'controls', 'type': '[Control]'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, controls=None, height=None, id=None, inherited=None, is_contribution=None, label=None, order=None, overridden=None, visible=None):
super(Group, self).__init__()
self.contribution = contribution
self.controls = controls
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.order = order
self.overridden = overridden
self.visible = visible
class Page(Model):
"""Page.
:param contribution: Contribution for the page.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: The label for the page.
:type label: str
:param locked: A value indicating whether any user operations are permitted on this page and the contents of this page
:type locked: bool
:param order: Order in which the page should appear in the layout.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param page_type: The icon for the page.
:type page_type: object
:param sections: The sections of the page.
:type sections: list of :class:`Section <work-item-tracking.v4_0.models.Section>`
:param visible: A value indicating if the page should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'locked': {'key': 'locked', 'type': 'bool'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'page_type': {'key': 'pageType', 'type': 'object'},
'sections': {'key': 'sections', 'type': '[Section]'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, id=None, inherited=None, is_contribution=None, label=None, locked=None, order=None, overridden=None, page_type=None, sections=None, visible=None):
super(Page, self).__init__()
self.contribution = contribution
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.locked = locked
self.order = order
self.overridden = overridden
self.page_type = page_type
self.sections = sections
self.visible = visible
class ProcessModel(Model):
"""ProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param projects:
:type projects: list of :class:`ProjectReference <work-item-tracking.v4_0.models.ProjectReference>`
:param properties:
:type properties: :class:`ProcessProperties <work-item-tracking.v4_0.models.ProcessProperties>`
:param reference_name:
:type reference_name: str
:param type_id:
:type type_id: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'projects': {'key': 'projects', 'type': '[ProjectReference]'},
'properties': {'key': 'properties', 'type': 'ProcessProperties'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, description=None, name=None, projects=None, properties=None, reference_name=None, type_id=None):
super(ProcessModel, self).__init__()
self.description = description
self.name = name
self.projects = projects
self.properties = properties
self.reference_name = reference_name
self.type_id = type_id
class ProcessProperties(Model):
"""ProcessProperties.
:param class_:
:type class_: object
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param parent_process_type_id:
:type parent_process_type_id: str
:param version:
:type version: str
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'object'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, class_=None, is_default=None, is_enabled=None, parent_process_type_id=None, version=None):
super(ProcessProperties, self).__init__()
self.class_ = class_
self.is_default = is_default
self.is_enabled = is_enabled
self.parent_process_type_id = parent_process_type_id
self.version = version
class ProjectReference(Model):
"""ProjectReference.
:param description:
:type description: str
:param id:
:type id: str
:param name:
:type name: str
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, name=None, url=None):
super(ProjectReference, self).__init__()
self.description = description
self.id = id
self.name = name
self.url = url
class RuleActionModel(Model):
"""RuleActionModel.
:param action_type:
:type action_type: str
:param target_field:
:type target_field: str
:param value:
:type value: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'target_field': {'key': 'targetField', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_type=None, target_field=None, value=None):
super(RuleActionModel, self).__init__()
self.action_type = action_type
self.target_field = target_field
self.value = value
class RuleConditionModel(Model):
"""RuleConditionModel.
:param condition_type:
:type condition_type: str
:param field:
:type field: str
:param value:
:type value: str
"""
_attribute_map = {
'condition_type': {'key': 'conditionType', 'type': 'str'},
'field': {'key': 'field', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, condition_type=None, field=None, value=None):
super(RuleConditionModel, self).__init__()
self.condition_type = condition_type
self.field = field
self.value = value
class Section(Model):
"""Section.
:param groups:
:type groups: list of :class:`Group <work-item-tracking.v4_0.models.Group>`
:param id: The id for the layout node.
:type id: str
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
"""
_attribute_map = {
'groups': {'key': 'groups', 'type': '[Group]'},
'id': {'key': 'id', 'type': 'str'},
'overridden': {'key': 'overridden', 'type': 'bool'}
}
def __init__(self, groups=None, id=None, overridden=None):
super(Section, self).__init__()
self.groups = groups
self.id = id
self.overridden = overridden
class UpdateProcessModel(Model):
"""UpdateProcessModel.
:param description:
:type description: str
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param name:
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, is_default=None, is_enabled=None, name=None):
super(UpdateProcessModel, self).__init__()
self.description = description
self.is_default = is_default
self.is_enabled = is_enabled
self.name = name
class WitContribution(Model):
"""WitContribution.
:param contribution_id: The id for the contribution.
:type contribution_id: str
:param height: The height for the contribution.
:type height: int
:param inputs: A dictionary holding key value pairs for contribution inputs.
:type inputs: dict
:param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem.
:type show_on_deleted_work_item: bool
"""
_attribute_map = {
'contribution_id': {'key': 'contributionId', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'inputs': {'key': 'inputs', 'type': '{object}'},
'show_on_deleted_work_item': {'key': 'showOnDeletedWorkItem', 'type': 'bool'}
}
def __init__(self, contribution_id=None, height=None, inputs=None, show_on_deleted_work_item=None):
super(WitContribution, self).__init__()
self.contribution_id = contribution_id
self.height = height
self.inputs = inputs
self.show_on_deleted_work_item = show_on_deleted_work_item
class WorkItemBehavior(Model):
"""WorkItemBehavior.
:param abstract:
:type abstract: bool
:param color:
:type color: str
:param description:
:type description: str
:param fields:
:type fields: list of :class:`WorkItemBehaviorField <work-item-tracking.v4_0.models.WorkItemBehaviorField>`
:param id:
:type id: str
:param inherits:
:type inherits: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param name:
:type name: str
:param overriden:
:type overriden: bool
:param rank:
:type rank: int
:param url:
:type url: str
"""
_attribute_map = {
'abstract': {'key': 'abstract', 'type': 'bool'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[WorkItemBehaviorField]'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'WorkItemBehaviorReference'},
'name': {'key': 'name', 'type': 'str'},
'overriden': {'key': 'overriden', 'type': 'bool'},
'rank': {'key': 'rank', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, abstract=None, color=None, description=None, fields=None, id=None, inherits=None, name=None, overriden=None, rank=None, url=None):
super(WorkItemBehavior, self).__init__()
self.abstract = abstract
self.color = color
self.description = description
self.fields = fields
self.id = id
self.inherits = inherits
self.name = name
self.overriden = overriden
self.rank = rank
self.url = url
class WorkItemBehaviorField(Model):
"""WorkItemBehaviorField.
:param behavior_field_id:
:type behavior_field_id: str
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'behavior_field_id': {'key': 'behaviorFieldId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior_field_id=None, id=None, url=None):
super(WorkItemBehaviorField, self).__init__()
self.behavior_field_id = behavior_field_id
self.id = id
self.url = url
class WorkItemBehaviorReference(Model):
"""WorkItemBehaviorReference.
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(WorkItemBehaviorReference, self).__init__()
self.id = id
self.url = url
class WorkItemStateResultModel(Model):
"""WorkItemStateResultModel.
:param color:
:type color: str
:param hidden:
:type hidden: bool
:param id:
:type id: str
:param name:
:type name: str
:param order:
:type order: int
:param state_category:
:type state_category: str
:param url:
:type url: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'hidden': {'key': 'hidden', 'type': 'bool'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, color=None, hidden=None, id=None, name=None, order=None, state_category=None, url=None):
super(WorkItemStateResultModel, self).__init__()
self.color = color
self.hidden = hidden
self.id = id
self.name = name
self.order = order
self.state_category = state_category
self.url = url
class WorkItemTypeBehavior(Model):
"""WorkItemTypeBehavior.
:param behavior:
:type behavior: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param is_default:
:type is_default: bool
:param url:
:type url: str
"""
_attribute_map = {
'behavior': {'key': 'behavior', 'type': 'WorkItemBehaviorReference'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior=None, is_default=None, url=None):
super(WorkItemTypeBehavior, self).__init__()
self.behavior = behavior
self.is_default = is_default
self.url = url
class WorkItemTypeModel(Model):
"""WorkItemTypeModel.
:param behaviors:
:type behaviors: list of :class:`WorkItemTypeBehavior <work-item-tracking.v4_0.models.WorkItemTypeBehavior>`
:param class_:
:type class_: object
:param color:
:type color: str
:param description:
:type description: str
:param icon:
:type icon: str
:param id:
:type id: str
:param inherits: Parent WIT Id/Internal ReferenceName that it inherits from
:type inherits: str
:param is_disabled:
:type is_disabled: bool
:param layout:
:type layout: :class:`FormLayout <work-item-tracking.v4_0.models.FormLayout>`
:param name:
:type name: str
:param states:
:type states: list of :class:`WorkItemStateResultModel <work-item-tracking.v4_0.models.WorkItemStateResultModel>`
:param url:
:type url: str
"""
_attribute_map = {
'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'},
'class_': {'key': 'class', 'type': 'object'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'layout': {'key': 'layout', 'type': 'FormLayout'},
'name': {'key': 'name', 'type': 'str'},
'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behaviors=None, class_=None, color=None, description=None, icon=None, id=None, inherits=None, is_disabled=None, layout=None, name=None, states=None, url=None):
super(WorkItemTypeModel, self).__init__()
self.behaviors = behaviors
self.class_ = class_
self.color = color
self.description = description
self.icon = icon
self.id = id
self.inherits = inherits
self.is_disabled = is_disabled
self.layout = layout
self.name = name
self.states = states
self.url = url
| 33.316857 | 223 | 0.608666 | 25,604 | 0.974018 | 0 | 0 | 0 | 0 | 0 | 0 | 14,909 | 0.567162 |
8a56af27e9c2f89056b41302d904e7b48a70d0c4 | 1,379 | py | Python | responder.py | ziggyzacks/pyrecs | 06e760aad4b49a62322f1d46660c52c81eeb1b11 | [
"MIT"
] | 2 | 2018-03-21T17:55:57.000Z | 2018-03-21T17:56:00.000Z | responder.py | ziggyzacks/pyrecs | 06e760aad4b49a62322f1d46660c52c81eeb1b11 | [
"MIT"
] | null | null | null | responder.py | ziggyzacks/pyrecs | 06e760aad4b49a62322f1d46660c52c81eeb1b11 | [
"MIT"
] | null | null | null | import abc
from utils import LogMixin
class Reponse(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def redis(self):
""" redis connection """
return
@abc.abstractmethod
def fetch(self, ids):
""" hydrate relevant ids with data """
return
class Movies(Reponse, LogMixin):
DEFAULT_FIELDS = ['title', 'year', 'genres']
def __init__(self, **kwargs):
super().__init__()
for key, value in kwargs.items():
setattr(self, key, value)
def fetch(self, movies, fields=None, from_index=False):
""" hydrates class ids with metadata, return redis pipeline that must be executed """
if fields is None:
fields = Movies.DEFAULT_FIELDS
if from_index:
movies = self.redis.mget(('inverse:index:movie:{}'.format(idx) for idx in movies))
response = []
for movie in movies:
values = self.redis.hmget('movie:{}'.format(movie), fields)
obj = dict(zip(fields, values))
if 'genres' in obj:
obj['genres'] = obj['genres'].split(',')
if 'year' in obj:
obj['year'] = int(obj['year'])
response.append(obj)
return response
def movie_to_index(self, movies):
return self.redis.mget(('index:movie:{}'.format(m) for m in movies))
| 28.729167 | 94 | 0.57723 | 1,335 | 0.968093 | 0 | 0 | 195 | 0.141407 | 0 | 0 | 263 | 0.190718 |
8a56cef3b3f2ca89cec426bc77ad6809415c305d | 1,327 | bzl | Python | python/library.bzl | robfig/rules_proto | 6a85b0e4c3eeddf8863890ef48f2daab7a524ab7 | [
"Apache-2.0"
] | null | null | null | python/library.bzl | robfig/rules_proto | 6a85b0e4c3eeddf8863890ef48f2daab7a524ab7 | [
"Apache-2.0"
] | null | null | null | python/library.bzl | robfig/rules_proto | 6a85b0e4c3eeddf8863890ef48f2daab7a524ab7 | [
"Apache-2.0"
] | null | null | null | load("//python:compile.bzl", "py_proto_compile", "py_grpc_compile")
load("@grpc_py_deps//:requirements.bzl", "all_requirements")
def py_proto_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_proto_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements, # fixme don't need grpc here
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
)
def py_grpc_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_grpc_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements,
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
) | 28.847826 | 82 | 0.609646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.272042 |
8a58853ac66bc8f5b8cfad78774a49e43b593fba | 2,786 | py | Python | src/mem/slicc/ast/TypeDeclAST.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 135 | 2016-10-21T03:31:49.000Z | 2022-03-25T01:22:20.000Z | src/mem/slicc/ast/TypeDeclAST.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | src/mem/slicc/ast/TypeDeclAST.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 48 | 2016-12-08T12:03:13.000Z | 2022-02-16T09:16:13.000Z | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
| 39.8 | 72 | 0.709261 | 1,107 | 0.397344 | 0 | 0 | 0 | 0 | 0 | 0 | 1,683 | 0.604092 |
8a59e89d09e32fec1b404a96ad1edf1ccd223adb | 8,871 | py | Python | tests/test_preempt_return.py | vpv11110000/pyss | bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b | [
"MIT"
] | null | null | null | tests/test_preempt_return.py | vpv11110000/pyss | bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b | [
"MIT"
] | 2 | 2017-09-05T11:12:05.000Z | 2017-09-07T19:23:15.000Z | tests/test_preempt_return.py | vpv11110000/pyss | bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b | [
"MIT"
] | null | null | null | # #!/usr/bin/python
# -*- coding: utf-8 -*-
# test_preempt_return.py
# pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.pyss_const import *
class TestPreemptReturn(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# @unittest.skip("testing skipping test_preempt_return_001")
def test_preempt_return_001(self):
"""Тест Preempt - Return
Формируется один транзакт в момент времени 1.
Прерывает работу устройства F_1 на 5 единиц времени.
Выходит из модели в момент времени 6.
"""
logger.info("--- test_preempt_return_001 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
#
MAX_TIME = 20
#
F_1 = "F_1"
# ОКУ
Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm, med_value=None,
modificatorFunc=None,
first_tx=1,
max_amount=1)
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Preempt(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY]))
#
Terminate(sgm, deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[TERMINATED_TIME], 6)
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME]])
# @unittest.skip("testing skipping test_preempt_return_002")
def test_preempt_return_002(self):
"""Тест Preempt - Return
Формируется транзакт A в момент времени 1.
Идёт на обработку устройством F_1 в течение 3 единиц времени.
Формируется транзакт B в момент времени 2.
Прерывает работу устройства на 5 единиц времени.
Транзакт B выходит из модели в момент времени 7.
Транзакт А выходит из модели в момент времени 9.
Обработка транзакта А была прервана с 2 по 7.
"""
logger.info("--- test_preempt_return_002 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
# CONSTS
TRANSACT_A = "A"
TRANSACT_B = "B"
#
list_all_transact = []
tA = []
tB = []
#
F_1 = "F_1"
# ОКУ
facility_1 = Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
def setTransactLabel(owner, transact):
if transact[NUM] == 1:
transact[LABEL] = TRANSACT_A
tA.append(transact)
elif transact[NUM] == 2:
transact[LABEL] = TRANSACT_B
tB.append(transact)
# функция проверки условия
def checkTest(o):
t=m.getCurrentTransact()
if t[LABEL] == TRANSACT_B:
return False
return True
def printAllTransact(owner, transact):
print "Time=%s" % str(m.getCurTime())
print "\n".join([str(t) for t in list_all_transact])
print "tA=%s" % str(tA[0])
print "tB=%s" % str(tB[0])
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm,
med_value=1,
modificatorFunc=None,
first_tx=1,
max_amount=2)
# вспомогательные операции
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
Handle(sgm, handlerFunc=setTransactLabel)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
# первый транзакт проходит, второй направляется к метке "to_preempt"
Test(sgm, funcCondition=checkTest, move2block="to_preempt")
# только первый транзакт
Seize(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=3, modificatorFunc=None)
Release(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel("to_term"))
#---
# только второй транзакт
Preempt(sgm, facilityName=F_1, label="to_preempt")
# test
# .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None)))
Handle(sgm, handlerFunc=printAllTransact)
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2))
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9))
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
# все транзакты
Terminate(sgm, label="to_term", deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
# Формируется транзакт A в момент времени 1.
# Идёт на обработку устройством F_1 в течение 3 единиц времени.
# Формируется транзакт B в момент времени 2.
# Прерывает работу устройства на 5 единиц времени.
# Транзакт B выходит из модели в момент времени 7.
# Транзакт А выходит из модели в момент времени 9.
# Обработка транзакта А была прервана с 2 по 7.
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]])
if t[LABEL] == TRANSACT_A:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[REMAIND_TIME], 2)
self.assertEqual(t[TERMINATED_TIME], 9)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 1, 'state': 'actived'},
{'start': 2, 'state': 'preempted'},
{'start': 7, 'state': 'actived'},
{'start': 9, 'state': 'deleted'}])
elif t[LABEL] == TRANSACT_B:
self.assertEqual(t[TIME_CREATED], 2)
self.assertEqual(t[TERMINATED_TIME], 7)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 2, 'state': 'actived'},
{'start': 7, 'state': 'deleted'}])
if __name__ == '__main__':
unittest.main(module="test_preempt_return")
| 35.342629 | 106 | 0.578289 | 8,675 | 0.885023 | 0 | 0 | 0 | 0 | 0 | 0 | 3,404 | 0.347276 |
8a5ab5ed3f3ad80694d11c3e4b2aca3d095ca892 | 2,400 | py | Python | python/ray/rllib/ddpg2/ddpg_evaluator.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-06-25T08:00:51.000Z | 2018-06-25T08:00:51.000Z | python/ray/rllib/ddpg2/ddpg_evaluator.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2018-01-26T05:11:04.000Z | 2018-01-26T05:11:04.000Z | python/ray/rllib/ddpg2/ddpg_evaluator.py | songqing/ray | 166000b089ee15d44635ebca00f12320f51ce587 | [
"Apache-2.0"
] | 1 | 2020-10-16T08:42:32.000Z | 2020-10-16T08:42:32.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
from ray.rllib.ddpg2.models import DDPGModel
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.optimizers import PolicyEvaluator
from ray.rllib.utils.filter import NoFilter
from ray.rllib.utils.process_rollout import process_rollout
from ray.rllib.utils.sampler import SyncSampler
class DDPGEvaluator(PolicyEvaluator):
def __init__(self, registry, env_creator, config):
self.env = ModelCatalog.get_preprocessor_as_wrapper(
registry, env_creator(config["env_config"]))
# contains model, target_model
self.model = DDPGModel(registry, self.env, config)
self.sampler = SyncSampler(
self.env, self.model.model, NoFilter(),
config["num_local_steps"], horizon=config["horizon"])
def sample(self):
"""Returns a batch of samples."""
rollout = self.sampler.get_data()
rollout.data["weights"] = np.ones_like(rollout.data["rewards"])
# since each sample is one step, no discounting needs to be applied;
# this does not involve config["gamma"]
samples = process_rollout(
rollout, NoFilter(),
gamma=1.0, use_gae=False)
return samples
def update_target(self):
"""Updates target critic and target actor."""
self.model.update_target()
def compute_gradients(self, samples):
"""Returns critic, actor gradients."""
return self.model.compute_gradients(samples)
def apply_gradients(self, grads):
"""Applies gradients to evaluator weights."""
self.model.apply_gradients(grads)
def compute_apply(self, samples):
grads, _ = self.compute_gradients(samples)
self.apply_gradients(grads)
def get_weights(self):
"""Returns model weights."""
return self.model.get_weights()
def set_weights(self, weights):
"""Sets model weights."""
self.model.set_weights(weights)
def get_completed_rollout_metrics(self):
"""Returns metrics on previously completed rollouts.
Calling this clears the queue of completed rollout metrics.
"""
return self.sampler.get_metrics()
RemoteDDPGEvaluator = ray.remote(DDPGEvaluator)
| 31.578947 | 77 | 0.675417 | 1,910 | 0.795833 | 0 | 0 | 0 | 0 | 0 | 0 | 540 | 0.225 |
8a5d2dc08b304db2757537f331d99b9fccf16fe7 | 3,064 | py | Python | python/sysmap/graph.py | harryherold/sysmap | 293e5f0dc22ed709c8fd5c170662e433c039eeab | [
"BSD-3-Clause"
] | 1 | 2020-05-08T13:55:31.000Z | 2020-05-08T13:55:31.000Z | python/sysmap/graph.py | harryherold/sysmap | 293e5f0dc22ed709c8fd5c170662e433c039eeab | [
"BSD-3-Clause"
] | 3 | 2020-01-16T10:30:28.000Z | 2020-01-27T11:23:49.000Z | python/sysmap/graph.py | harryherold/sysmap | 293e5f0dc22ed709c8fd5c170662e433c039eeab | [
"BSD-3-Clause"
] | 1 | 2020-01-16T09:08:14.000Z | 2020-01-16T09:08:14.000Z | from graphviz import Digraph
from collections import namedtuple
class NetworkGraph:
''' Representation of the network connections.
This class contains the entities in the network e.g. hosts or switches.
And the connections between them.
'''
Vertex = namedtuple('Vertexes', ['hosts', 'switches'])
_edges = []
def _sanitize_edge_connection(self, edge):
''' Update '_to' and '_form' field of a edge.
:param edge: One edge connection.
:type edge: dict
:returns: Updated edge with _to and _from key.
:rtype: dict
'''
if edge['to_guid'].startswith('S'):
to_collection = 'switches/'
elif edge['to_guid'].startswith('H'):
to_collection = 'hosts/'
if edge['from_guid'].startswith('S'):
from_collection = 'switches/'
elif edge['from_guid'].startswith('H'):
from_collection = 'hosts/'
edge.update({
'_to': to_collection + edge['to_guid'],
'_from': from_collection + edge['from_guid']
})
return edge
def _sanitize_vertexes(self, vertex):
''' Update '_key' field of vertex to appropriate guid.
:param vertex: Vertex
:type vertex: dict
:returns: An updated dict, '_key' field with 'guid' value.
:rtype: dict
'''
vertex.update({'_key': vertex['guid']})
return vertex
def __init__(self, hsts=None, switches=None, connections=None):
self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts],
switches=[self._sanitize_vertexes(s) for s in switches])
self._edges = [self._sanitize_edge_connection(c) for c in connections]
@property
def vertexes(self):
''' Returns a concatenated list of all vertexes.
:returns: List of vertexes, contains of hosts and switches.
:rtype: List[dict]
'''
return self._vertexes.hosts + self._vertexes.switches
@property
def switches(self):
''' Returns a list of all 'switch' vertexes.
:returns: List of all switches.
:rtype: List[dict]
'''
return self._vertexes.switches
@property
def hosts(self):
''' Returns a list of all 'host' vertexes.
:returns: List of all hosts.
:rtype: List[dict]
'''
return self._vertexes.hosts
@property
def edges(self):
''' Return a list of all 'connection' edges.
:returns: List of all connections.
:rtype: List[dict]
'''
return self._edges
def to_graph(self, graphargs):
''' Draw a dot graph of the network graph.
:params graphargs: Arguments to graphviz.Digraph.
:type graphargs: dict
'''
graph = Digraph(**graphargs)
for v in self._vertexes:
graph.node(v['guid'], v['description'])
for c in self._edges:
graph.edge(c['from_guid'], c['to_guid'])
graph.render()
| 29.461538 | 92 | 0.582572 | 2,997 | 0.978133 | 0 | 0 | 852 | 0.278068 | 0 | 0 | 1,459 | 0.476175 |
8a5d63158988a4154bd4df2b897b694d5cad31f9 | 46,478 | py | Python | alembic/versions/1d092815507a_add_huawei_2g_managedobjects.py | bodastage/bts-database | 96df7915621dd46daf55016eedf5cfc84dd0e3a2 | [
"Apache-2.0"
] | 1 | 2019-08-30T01:20:14.000Z | 2019-08-30T01:20:14.000Z | alembic/versions/1d092815507a_add_huawei_2g_managedobjects.py | bodastage/bts-database | 96df7915621dd46daf55016eedf5cfc84dd0e3a2 | [
"Apache-2.0"
] | 1 | 2018-05-30T09:29:24.000Z | 2018-05-30T10:04:37.000Z | alembic/versions/1d092815507a_add_huawei_2g_managedobjects.py | bodastage/bts-database | 96df7915621dd46daf55016eedf5cfc84dd0e3a2 | [
"Apache-2.0"
] | 3 | 2018-03-10T23:29:30.000Z | 2019-02-19T22:11:09.000Z | """Add Huawei 2G managedobjects
Revision ID: 1d092815507a
Revises: 3fa514f1b7a9
Create Date: 2018-02-13 01:38:59.965000
"""
from alembic import op
import sqlalchemy as sa
import datetime
# revision identifiers, used by Alembic.
revision = '1d092815507a'
down_revision = '3fa514f1b7a9'
branch_labels = None
depends_on = None
def upgrade():
managedobjects = sa.sql.table(
'managedobjects',
sa.Column('pk', sa.Integer, sa.Sequence('seq_managedobjects_pk', ), primary_key=True, nullable=False),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('notes', sa.Text),
sa.Column('label', sa.String(200)),
sa.Column('parent_pk', sa.Integer),
sa.Column('affect_level', sa.Integer),
sa.Column('tech_pk', sa.Integer),
sa.Column('vendor_pk', sa.Integer),
sa.Column('modified_by', sa.Integer),
sa.Column('added_by', sa.Integer),
sa.Column('date_added', sa.TIMESTAMP, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
sa.Column('date_modified', sa.TIMESTAMP, default=datetime.datetime.utcnow)
)
op.bulk_insert(managedobjects, [
{'name': 'BTSCABINET', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'AITFOTHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'AITFREV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALGCTRLPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMBLKPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMBLKSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMCAPACITY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMLVL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMML', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMOSCISW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMOSCITHRD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMSCRN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ALMSHLD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'APPCERT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ATESTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BFDPROTOSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BOXRPT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BRD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCAISS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCAITFTMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCDSTPA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCEXSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCFCPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCJBF', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCNSPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPCUTYPE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPSGBPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPSSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPSSTAT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPSTCDSCPMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCPSUMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCSIGTRC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCTESTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSCTMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSSGPPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BSSLS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSABISMUXFLOW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSABISPRIMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSABISTROP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSAISS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSALM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSALMFLASHTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSALMFLASHTW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSALMPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSAPMUBP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSAPPCERT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSAUTODLDACTINFO', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBAKPWR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBBMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBINDLOCGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBRD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBREAKPOINT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSBWPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCELLPATCHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCERTCHKTSK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCERTDEPLOY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCERTMK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCERTREQ', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCHNFALLBACK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCLK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCONNECT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCPRIPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCRC4', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCRLPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCTRLEX', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSCTRLLNK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSDEVIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSDHCPSVRIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSDHEUBP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSDSCPMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSE1T1BER', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSEAMRC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSENVALMPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSEQUIPMENT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSESN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSETHOAM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSETHOAMAH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSETHPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSFALLBACK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSFLEXABISPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSFMUABP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSGTRANSPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSGUPWRSHRFP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSIDLETS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSIKECFG', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSINTRXUSPEC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSIPGUARD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSIPRT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSJBF', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLAPDWS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLLDPGLOBAL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLNKBKATTR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLOCGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLOCKBCCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSLSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSMNTMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSMPGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSMPLNK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSOMLBACKUP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSOMLDETECT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSOMLTS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSOTHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSPATCHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSPINGSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSPLRALM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSPSUFP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRELIALOGSWITCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRET', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRETDEVICEDATA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRETSUBUNIT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRINGATTR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRSV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRXU2LOCGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRXUBP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRXUBRD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSRXUCHAIN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSSHARING', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTEMPLATERSC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTHEFTALM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTMA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTMADEVICEDATA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTMASUBUNIT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTRANS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTRCMPR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTRUSTCERT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSTRXBACKUP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSVLAN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSXFC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'BTSXMUFP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CAB', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CCGN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CELLBIND2BTS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CELLGLDSS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CERTCHKTSK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CERTMK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CERTREQ', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CLK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CLKMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CLKSRC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CONNTYPE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'COPTLNK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CPUTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CRLPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'CSPRECTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DEVIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DEVRSVDPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DSCPMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DSP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DSPLVDSMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DXX', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DXXCONNECT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'DXXTSEXGRELATION', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'E1T1', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'EMSIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ENVALMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ETHIP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ETHPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ETHREDPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ETHSWITCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FACFG', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FANSPEED', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FCCOMMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'filefooter', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FTPCLTPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FTPSCLT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FTPSCLTDPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FTPSRVSPD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'FTPSSRV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G2GNCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G3GARFCN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G3GNCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GAFCALMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GALLCELLBLKSTAT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GBSCREDGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELL2GBA1', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELL3GARFCN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLAMRQUL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLBASICPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0, 'affect_level': 1},
{'name': 'GCELLBTSSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCACCESS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCAD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCAMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCTMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCCUTRANSYS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCHMGAD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCHMGBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCONGACALGO', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLCSFBPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLDYNTURNOFF', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLEGPRSPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLEXTMSRPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLFREQ', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLFREQSCAN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLGPRS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLGSMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHO2GBA2', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOAD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOEDBPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOEMG', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOFAST', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOFDDBA2', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOFITPEN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOINTERRATLDB', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOIUO', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOPANT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOPTP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOTDDBA2', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOUTRANFDD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHOUTRANTDD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLHSRPLCUSRIDFMG', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLIBCAII', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLIDLEAD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLIDLEBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLIDLEFDDBA1', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLIDLETDDBA1', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLLCS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLMAGRP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLMAIOPLAN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLMOCN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLNC2PARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLNCRESELECTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLNONSTANDARDBW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLNWCTRLMSRPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLOPTREV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLOSPMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLOTHBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLOTHEXT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLOTHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPRACH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPRIEUTRANSYS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPRIVATEOPTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSABISPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSBASE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSCHM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSCS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSDIFFSERVICE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSI1', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSOTHERPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSPWPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPSSMALLPKTRESBAL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0,
'added_by': 0},
{'name': 'GCELLPWR2', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPWR3', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLPWRBASIC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLRESELECTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLRESELECTUTRANTDD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0,
'added_by': 0},
{'name': 'GCELLRESELUTRANFDD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLRSVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSBC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSERVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSOFT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSON', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSRVCC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLSTANDARDOPTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLTA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLTEMPLATERSC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLTMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLTRANPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLUNDPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLVAMOS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLVAMOSPWR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCELLWLAN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCNCFGALMTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCNNODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCNOPERATOR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCNOPERATORREV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCSCHRCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCSCHRSCOPE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GCSFILE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GDSSPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GEXT2GCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GEXT3GCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GEXTLTECELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GFORCESWITCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GHOSTSTATUS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GKPIALMTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GLOBALROUTESW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GLTENCELL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GMRCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GMRSCOPE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GNODEREDCFGCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GNODEREDUNDANCY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GPSCHRCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GPSCHRSCOPE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GPSKPIALMTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GREDGRPHOSTPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GRSVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRX', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXBASE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXCHAN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXCHANHOP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXDEV', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXFC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXHOP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXIUO', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXRLALM', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'GTRXRSVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G_ADJMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G_ADJNODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'G_IPPATH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'HOSTLOGSPD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IDRQTEST', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'INFBRDRESCFG', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'INTBRDPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IPCHK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IPGUARD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IPLOGICPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IPMUX', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'IPRT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ITWKPIALMTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'L2L3ROUTEPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'LDR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'LICALMTHD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'LICPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'LODCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'LOGLIMIT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'M3DE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'M3LE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'M3LKS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'M3LNK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'M3RT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MDTLCS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MNTMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MOCNPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MSGSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MSP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'MTP3TMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'N7DPC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'NRIMSCMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'NRISGSNMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'NSE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'NSVLLOCAL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OBJALMSHLD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OBJAUTHSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OMUCOMMSVCSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OMUETH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OMUPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OMUPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OPC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OPLOCK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OPSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OPT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OSPWDPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'OTHSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PACKETFILTERALMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PHBMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PORTFLOWCTRLPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PORTOSCCTRLPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PSPREFABISCONGCTRL', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PSUSRRESBIND', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PTPBVC', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PWDPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PWRALMSW', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'PWRPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'QUEUEMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'RSVRES', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'RULELIBVER', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SAUCENTER', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SCCPTMR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SCTPLNK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SCTPPROF', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SCTPSRVPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SCUPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SGSN', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SGSNNODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SNTPCLTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SNTPSRVINFO', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SRCONPATH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SS7PATCHSWITCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SSLAUTHMODE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SSLCONF', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SSLCS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SUBNET', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SUBRACK', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SUBSESSION_NE', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SYNSWITCH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'SYS', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TCPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TCRSVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TNALMPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TNLOADBALANCEPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TNRSVDPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TNSOFTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRANSPATCHPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRANSPHYLNKPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRANSRSVPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRCLOGSPD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRMFACTOR', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRMLOADTH', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRMMAP', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRUSTCERT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TRXBIND2PHYBRD', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'TZ', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'UMTESTPARA', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'USEREVTRTNPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'USRRESBIND', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'VLANID', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'WEBLOGINPOLICY', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'XPUPORT', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
])
def downgrade():
op.execute("""DELETE FROM managedobjects WHERE vendor_pk = {0} AND tech_pk = {1}""".format(2, 1))
| 103.055432 | 133 | 0.570162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29,713 | 0.639292 |
8a5d6681ce10a3af268cfdb475c6d9aff87499c6 | 1,211 | py | Python | png/imageRecognition_Simple.py | tanthanadon/senior | 89fc24889b34860982b551e5ea5e0d3550505f65 | [
"MIT"
] | null | null | null | png/imageRecognition_Simple.py | tanthanadon/senior | 89fc24889b34860982b551e5ea5e0d3550505f65 | [
"MIT"
] | 5 | 2020-03-04T13:49:10.000Z | 2020-03-20T04:06:23.000Z | png/imageRecognition_Simple.py | tanthanadon/senior | 89fc24889b34860982b551e5ea5e0d3550505f65 | [
"MIT"
] | null | null | null | from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
image = io.imread("star.jpg")
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show() | 28.833333 | 74 | 0.696945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.110652 |
8a5de963629a6bc23b3e927dcbf31f83ecc1590d | 171 | py | Python | indexof.py | gnuchev/homework | 4083d44561cc9738d3cd8da99f8ef91b69961b6c | [
"MIT"
] | null | null | null | indexof.py | gnuchev/homework | 4083d44561cc9738d3cd8da99f8ef91b69961b6c | [
"MIT"
] | null | null | null | indexof.py | gnuchev/homework | 4083d44561cc9738d3cd8da99f8ef91b69961b6c | [
"MIT"
] | null | null | null | def indexof(listofnames, value):
if value in listofnames:
value_index = listofnames.index(value)
return(listofnames, value_index)
else: return(-1)
| 28.5 | 46 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8a5e3b1295194140be07e7851df9a2e6e39cc960 | 529 | py | Python | Day22_Pong/ball.py | syt1209/PythonProjects | 0409dbd3c0b0ddf00debc38875059c828eb31dec | [
"MIT"
] | 1 | 2021-02-16T00:59:29.000Z | 2021-02-16T00:59:29.000Z | Day22_Pong/ball.py | syt1209/PythonProjects | 0409dbd3c0b0ddf00debc38875059c828eb31dec | [
"MIT"
] | null | null | null | Day22_Pong/ball.py | syt1209/PythonProjects | 0409dbd3c0b0ddf00debc38875059c828eb31dec | [
"MIT"
] | null | null | null | from turtle import Turtle
SPEED = 10
class Ball(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("white")
self.shape("circle")
self.move_speed = 0.1
self.y_bounce = 1
self.x_bounce = 1
def move(self):
new_x = self.xcor() + SPEED*self.x_bounce
new_y = self.ycor() + SPEED*self.y_bounce
self.goto(new_x, new_y)
def reset(self):
self.goto(0, 0)
self.move_speed = 0.1
self.x_bounce *= -1
| 20.346154 | 49 | 0.551985 | 488 | 0.922495 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.028355 |
8a5eacf969c02364f5e4daefab7f03dd79ff6a0f | 447 | py | Python | programs/combine/jry2/treedef.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | 1 | 2021-07-11T08:32:32.000Z | 2021-07-11T08:32:32.000Z | programs/combine/jry2/treedef.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | null | null | null | programs/combine/jry2/treedef.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | 1 | 2020-12-20T16:08:10.000Z | 2020-12-20T16:08:10.000Z | from jry2.semantics import Expr
class TreeNode:
pass
class TreeLeaf(TreeNode):
def __init__(self, term):
self.term = term
def getExpr(self):
return self.term
class TreeInnerNode(TreeNode):
def __init__(self, pred, left, right):
self.pred = pred
self.left = left
self.right = right
def getExpr(self):
return Expr('ite', self.pred, self.left.getExpr(), self.right.getExpr())
| 20.318182 | 80 | 0.630872 | 406 | 0.908277 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.011186 |
8a5ec6dd61aef0b828a5fdf8e68715be0262b256 | 103,584 | py | Python | src/sage/modular/dirichlet.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1 | 2021-10-18T01:24:04.000Z | 2021-10-18T01:24:04.000Z | src/sage/modular/dirichlet.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | null | null | null | src/sage/modular/dirichlet.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Dirichlet characters
A :class:`DirichletCharacter` is the extension of a homomorphism
.. MATH::
(\ZZ/N\ZZ)^* \to R^*,
for some ring `R`, to the map `\ZZ/N\ZZ \to R` obtained by sending
those `x\in\ZZ/N\ZZ` with `\gcd(N,x)>1` to `0`.
EXAMPLES::
sage: G = DirichletGroup(35)
sage: x = G.gens()
sage: e = x[0]*x[1]^2; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1
sage: e.order()
12
This illustrates a canonical coercion::
sage: e = DirichletGroup(5, QQ).0
sage: f = DirichletGroup(5,CyclotomicField(4)).0
sage: e*f
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
AUTHORS:
- William Stein (2005-09-02): Fixed bug in comparison of Dirichlet
characters. It was checking that their values were the same, but
not checking that they had the same level!
- William Stein (2006-01-07): added more examples
- William Stein (2006-05-21): added examples of everything; fix a
*lot* of tiny bugs and design problem that became clear when
creating examples.
- Craig Citro (2008-02-16): speed up __call__ method for
Dirichlet characters, miscellaneous fixes
- Julian Rueth (2014-03-06): use UniqueFactory to cache DirichletGroups
"""
# ****************************************************************************
# Copyright (C) 2004-2006 William Stein <[email protected]>
# Copyright (C) 2014 Julian Rueth <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import sage.categories.all as cat
from sage.misc.all import prod
import sage.misc.prandom as random
import sage.modules.free_module as free_module
import sage.modules.free_module_element as free_module_element
import sage.rings.all as rings
import sage.rings.number_field.number_field as number_field
from sage.libs.pari import pari
from sage.categories.map import Map
from sage.rings.rational_field import is_RationalField
from sage.rings.complex_mpfr import is_ComplexField
from sage.rings.qqbar import is_AlgebraicField
from sage.rings.ring import is_Ring
from sage.misc.functional import round
from sage.misc.cachefunc import cached_method
from sage.misc.fast_methods import WithEqualityById
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.gens_py import multiplicative_iterator
from sage.structure.parent import Parent
from sage.structure.sequence import Sequence
from sage.structure.factory import UniqueFactory
from sage.structure.richcmp import richcmp
from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd,
lcm, fundamental_discriminant, euler_phi, factorial, valuation)
def trivial_character(N, base_ring=rings.RationalField()):
r"""
Return the trivial character of the given modulus, with values in the given
base ring.
EXAMPLES::
sage: t = trivial_character(7)
sage: [t(x) for x in [0..20]]
[0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
sage: t(1).parent()
Rational Field
sage: trivial_character(7, Integers(3))(1).parent()
Ring of integers modulo 3
"""
return DirichletGroup(N, base_ring)(1)
TrivialCharacter = trivial_character
def kronecker_character(d):
"""
Return the quadratic Dirichlet character (d/.) of minimal
conductor.
EXAMPLES::
sage: kronecker_character(97*389*997^2)
Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1
::
sage: a = kronecker_character(1)
sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ!
sage: b.modulus()
2401
AUTHORS:
- Jon Hanke (2006-08-06)
"""
d = rings.Integer(d)
if d == 0:
raise ValueError("d must be nonzero")
D = fundamental_discriminant(d)
G = DirichletGroup(abs(D), rings.RationalField())
return G([kronecker(D,u) for u in G.unit_gens()])
def kronecker_character_upside_down(d):
"""
Return the quadratic Dirichlet character (./d) of conductor d, for
d0.
EXAMPLES::
sage: kronecker_character_upside_down(97*389*997^2)
Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1
AUTHORS:
- Jon Hanke (2006-08-06)
"""
d = rings.Integer(d)
if d <= 0:
raise ValueError("d must be positive")
G = DirichletGroup(d, rings.RationalField())
return G([kronecker(u.lift(),d) for u in G.unit_gens()])
def is_DirichletCharacter(x):
r"""
Return True if x is of type DirichletCharacter.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletCharacter
sage: is_DirichletCharacter(trivial_character(3))
True
sage: is_DirichletCharacter([1])
False
"""
return isinstance(x, DirichletCharacter)
class DirichletCharacter(MultiplicativeGroupElement):
"""
A Dirichlet character.
"""
def __init__(self, parent, x, check=True):
r"""
Create a Dirichlet character with specified values on
generators of `(\ZZ/n\ZZ)^*`.
INPUT:
- ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet
characters
- ``x`` -- one of the following:
- tuple or list of ring elements: the values of the
Dirichlet character on the standard generators of
`(\ZZ/N\ZZ)^*` as returned by
:meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`.
- vector over `\ZZ/e\ZZ`, where `e` is the order of the
standard root of unity for ``parent``.
In both cases, the orders of the elements must divide the
orders of the respective generators of `(\ZZ/N\ZZ)^*`.
OUTPUT:
The Dirichlet character defined by `x` (type
:class:`DirichletCharacter`).
EXAMPLES::
sage: G.<e> = DirichletGroup(13)
sage: G
Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: loads(e.dumps()) == e
True
::
sage: G, x = DirichletGroup(35).objgens()
sage: e = x[0]*x[1]; e
Dirichlet character modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2
sage: e.order()
12
sage: loads(e.dumps()) == e
True
TESTS::
sage: G = DirichletGroup(10)
sage: TestSuite(G[1]).run()
It is checked that the orders of the elements in `x` are
admissible (see :trac:`17283`)::
sage: k.<i> = CyclotomicField(4)
sage: G = DirichletGroup(192)
sage: G([i, -1, -1])
Traceback (most recent call last):
...
ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16, 2), respectively
sage: from sage.modular.dirichlet import DirichletCharacter
sage: M = FreeModule(Zmod(16), 3)
sage: DirichletCharacter(G, M([4, 8, 8]))
Traceback (most recent call last):
...
ValueError: values (= (4, 8, 8) modulo 16) must have additive orders dividing (2, 16, 2), respectively
"""
MultiplicativeGroupElement.__init__(self, parent)
if check:
orders = parent.integers_mod().unit_group().gens_orders()
if len(x) != len(orders):
raise ValueError("wrong number of values (= {}) on generators (want {})".format(x, len(orders)))
if free_module_element.is_FreeModuleElement(x):
x = parent._module(x)
if any(u * v for u, v in zip(x, orders)):
raise ValueError("values (= {} modulo {}) must have additive orders dividing {}, respectively"
.format(x, parent.zeta_order(), orders))
self.element.set_cache(x)
else:
R = parent.base_ring()
x = tuple(map(R, x))
if R.is_exact() and any(u**v != 1 for u, v in zip(x, orders)):
raise ValueError("values (= {}) must have multiplicative orders dividing {}, respectively"
.format(x, orders))
self.values_on_gens.set_cache(x)
else:
if free_module_element.is_FreeModuleElement(x):
self.element.set_cache(x)
else:
self.values_on_gens.set_cache(x)
@cached_method
def __eval_at_minus_one(self):
r"""
Efficiently evaluate the character at -1 using knowledge of its
order. This is potentially much more efficient than computing the
value of -1 directly using dlog and a large power of the image root
of unity.
We use the following. Proposition: Suppose eps is a character mod
`p^n`, where `p` is a prime. Then
`\varepsilon(-1) = -1` if and only if `p = 2` and
the factor of eps at 4 is nontrivial or `p > 2` and 2 does
not divide `\phi(p^n)/\mbox{\rm ord}(\varepsilon)`.
EXAMPLES::
sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one()
-1
"""
D = self.decomposition()
val = self.base_ring()(1)
for e in D:
if e.modulus() % 2 == 0:
if e.modulus() % 4 == 0:
val *= e.values_on_gens()[0] # first gen is -1 for 2-power modulus
elif (euler_phi(e.parent().modulus()) / e.order()) % 2:
val *= -1
return val
def __call__(self, m):
"""
Return the value of this character at the integer `m`.
.. warning::
A table of values of the character is made the first time
you call this (unless `m` equals -1)
EXAMPLES::
sage: G = DirichletGroup(60)
sage: e = prod(G.gens(), G(1))
sage: e
Dirichlet character modulo 60 of conductor 60 mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4
sage: e(-1)
-1
sage: e(2)
0
sage: e(7)
-zeta4
sage: Integers(60).unit_gens()
(31, 41, 37)
sage: e(31)
-1
sage: e(41)
-1
sage: e(37)
zeta4
sage: e(31*37)
-zeta4
sage: parent(e(31*37))
Cyclotomic Field of order 4 and degree 2
"""
N = self.modulus()
m = m % N
if self.values.is_in_cache() or m != N - 1:
return self.values()[m]
else:
return self.__eval_at_minus_one()
def change_ring(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: e = DirichletGroup(7, QQ).0
sage: f = e.change_ring(QuadraticField(3, 'a'))
sage: f.parent()
Group of Dirichlet characters modulo 7 with values in Number Field in a with defining polynomial x^2 - 3 with a = 1.732050807568878?
::
sage: e = DirichletGroup(13).0
sage: e.change_ring(QQ)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta12 to a rational
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: chi = DirichletGroup(5, K)[1]
sage: chi(2)
i
sage: f = K.complex_embeddings()[0]
sage: psi = chi.change_ring(f)
sage: psi(2)
-1.83697019872103e-16 - 1.00000000000000*I
"""
if self.base_ring() is R:
return self
G = self.parent().change_ring(R)
return G.element_class(G, [R(x) for x in self.values_on_gens()])
def _richcmp_(self, other, op):
"""
Compare ``self`` to ``other``.
.. NOTE::
Since there is no coercion between Dirichlet groups
of different moduli, characters of different moduli
compare as unequal, even if they define identical
functions on ``ZZ``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: f = e.restrict(8)
sage: e == e
True
sage: f == f
True
sage: e == f
False
sage: k = DirichletGroup(7)([-1])
sage: k == e
False
"""
return richcmp(self.values_on_gens(), other.values_on_gens(), op)
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: hash(e) == hash((-1,1))
True
"""
return hash(self.values_on_gens())
def __invert__(self):
"""
Return the multiplicative inverse of self.
EXAMPLES::
sage: e = DirichletGroup(13).0
sage: f = ~e
sage: f*e
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = -self.element()
else:
x = tuple(~z for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _mul_(self, other):
"""
Return the product of self and other.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: b
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: a*b # indirect doctest
Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> zeta4
Multiplying elements whose parents have different zeta orders works::
sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1)
sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1])
sage: a * b # indirect doctest
Dirichlet character modulo 3 of conductor 3 mapping 2 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = self.element() + other.element()
else:
x = tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens()))
return G.element_class(G, x, check=False)
def __copy__(self):
"""
Return a (shallow) copy of this Dirichlet character.
EXAMPLES::
sage: G.<a> = DirichletGroup(11)
sage: b = copy(a)
sage: a is b
False
sage: a.element() is b.element()
False
sage: a.values_on_gens() is b.values_on_gens()
True
"""
# This method exists solely because of a bug in the cPickle module --
# see modsym/manin_symbols.py.
G = self.parent()
return G.element_class(G, self.values_on_gens(), check=False)
def __pow__(self, n):
"""
Return self raised to the power of n
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a^2
Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1
sage: b^2
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
"""
G = self.parent()
if G.zeta.is_in_cache():
x = n * self.element()
else:
x = tuple(z**n for z in self.values_on_gens())
return G.element_class(G, x, check=False)
def _repr_short_(self):
r"""
A short string representation of self, often used in string representations of modular forms
EXAMPLES::
sage: chi = DirichletGroup(24).0
sage: chi._repr_short_()
'[-1, 1, 1]'
"""
return str(list(self.values_on_gens()))
def _repr_(self):
"""
String representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: repr(a) # indirect doctest
'Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1'
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: DirichletGroup(1)[0]
Dirichlet character modulo 1 of conductor 1
sage: DirichletGroup(2)[0]
Dirichlet character modulo 2 of conductor 1
"""
s = 'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += ' mapping '
for i in range(r):
if i != 0:
s += ', '
s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i])
return s
def _latex_(self):
r"""
LaTeX representation of self.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(16)
sage: latex(b) # indirect doctest
\hbox{Dirichlet character modulo } 16 \hbox{ of conductor } 16 \hbox{ mapping } 15 \mapsto 1,\ 5 \mapsto \zeta_{4}
TESTS:
Dirichlet characters modulo 1 and 2 are printed correctly (see
:trac:`17338`)::
sage: latex(DirichletGroup(1)[0])
\hbox{Dirichlet character modulo } 1 \hbox{ of conductor } 1
sage: latex(DirichletGroup(2)[0])
\hbox{Dirichlet character modulo } 2 \hbox{ of conductor } 1
"""
s = r'\hbox{Dirichlet character modulo } %s \hbox{ of conductor } %s' % (self.modulus(), self.conductor())
r = len(self.values_on_gens())
if r != 0:
s += r' \hbox{ mapping } '
for i in range(r):
if i != 0:
s += r',\ '
s += self.parent().unit_gens()[i]._latex_() + r' \mapsto ' + self.values_on_gens()[i]._latex_()
return s
def base_ring(self):
"""
Returns the base ring of this Dirichlet character.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: G.gen(0).base_ring()
Cyclotomic Field of order 10 and degree 4
sage: G = DirichletGroup(11, RationalField())
sage: G.gen(0).base_ring()
Rational Field
"""
return self.parent().base_ring()
def bar(self):
"""
Return the complex conjugate of this Dirichlet character.
EXAMPLES::
sage: e = DirichletGroup(5).0
sage: e
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4
sage: e.bar()
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4
"""
return ~self
def bernoulli(self, k, algorithm='recurrence', cache=True, **opts):
r"""
Returns the generalized Bernoulli number `B_{k,eps}`.
INPUT:
- ``k`` -- a non-negative integer
- ``algorithm`` -- either ``'recurrence'`` (default) or
``'definition'``
- ``cache`` -- if True, cache answers
- ``**opts`` -- optional arguments; not used directly, but
passed to the :func:`bernoulli` function if this is called
OUTPUT:
Let `\varepsilon` be a (not necessarily primitive) character
of modulus `N`. This function returns the generalized
Bernoulli number `B_{k,\varepsilon}`, as defined by the
following identity of power series (see for example
[DI1995]_, Section 2.2):
.. MATH::
\sum_{a=1}^N \frac{\varepsilon(a) t e^{at}}{e^{Nt}-1}
= sum_{k=0}^{\infty} \frac{B_{k,\varepsilon}}{k!} t^k.
ALGORITHM:
The ``'recurrence'`` algorithm computes generalized Bernoulli
numbers via classical Bernoulli numbers using the formula in
[Coh2007]_, Proposition 9.4.5; this is usually optimal. The
``definition`` algorithm uses the definition directly.
.. WARNING::
In the case of the trivial Dirichlet character modulo 1,
this function returns `B_{1,\varepsilon} = 1/2`, in
accordance with the above definition, but in contrast to
the value `B_1 = -1/2` for the classical Bernoulli number.
Some authors use an alternative definition giving
`B_{1,\varepsilon} = -1/2`; see the discussion in
[Coh2007]_, Section 9.4.1.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.bernoulli(5)
7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13
sage: eps = DirichletGroup(9).0
sage: eps.bernoulli(3)
10*zeta6 + 4
sage: eps.bernoulli(3, algorithm="definition")
10*zeta6 + 4
TESTS:
Check that :trac:`17586` is fixed::
sage: DirichletGroup(1)[0].bernoulli(1)
1/2
"""
if cache:
try:
self.__bernoulli
except AttributeError:
self.__bernoulli = {}
if k in self.__bernoulli:
return self.__bernoulli[k]
N = self.modulus()
K = self.base_ring()
if N == 1:
# By definition, the first Bernoulli number of the trivial
# character is 1/2, in contrast to the value B_1 = -1/2.
ber = K.one()/2 if k == 1 else K(bernoulli(k))
elif self(-1) != K((-1)**k):
ber = K.zero()
elif algorithm == "recurrence":
# The following code is pretty fast, at least compared to
# the other algorithm below. That said, I'm sure it could
# be sped up by a factor of 10 or more in many cases,
# especially since we end up computing all the Bernoulli
# numbers up to k, which should be done with power series
# instead of calls to the Bernoulli function. Likewise
# computing all binomial coefficients can be done much
# more efficiently.
v = self.values()
S = lambda n: sum(v[r] * r**n for r in range(1, N))
ber = K(sum(binomial(k,j) * bernoulli(j, **opts) *
N**(j-1) * S(k-j) for j in range(k+1)))
elif algorithm == "definition":
# This is better since it computes the same thing, but requires
# no arith in a poly ring over a number field.
prec = k+2
R = rings.PowerSeriesRing(rings.QQ, 't')
t = R.gen()
# g(t) = t/(e^{Nt}-1)
g = t/((N*t).exp(prec) - 1)
# h(n) = g(t)*e^{nt}
h = [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)]
ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k)
else:
raise ValueError("algorithm = '%s' unknown"%algorithm)
if cache:
self.__bernoulli[k] = ber
return ber
def lfunction(self, prec=53, algorithm='pari'):
"""
Return the L-function of ``self``.
The result is a wrapper around a PARI L-function or around
the ``lcalc`` program.
INPUT:
- ``prec`` -- precision (default 53)
- ``algorithm`` -- 'pari' (default) or 'lcalc'
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: L = a.lfunction(); L
PARI L-function associated to Dirichlet character modulo 20
of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: L(4)
0.988944551741105
With the algorithm "lcalc"::
sage: a = a.primitive_character()
sage: L = a.lfunction(algorithm='lcalc'); L
L-function with complex Dirichlet coefficients
sage: L.value(4) # abs tol 1e-14
0.988944551741105 - 5.16608739123418e-18*I
"""
if algorithm is None:
algorithm = 'pari'
if algorithm == 'pari':
from sage.lfunctions.pari import lfun_character, LFunction
Z = LFunction(lfun_character(self), prec=prec)
Z.rename('PARI L-function associated to %s' % self)
return Z
elif algorithm == 'lcalc':
from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character
return Lfunction_from_character(self)
raise ValueError('algorithm must be "pari" or "lcalc"')
@cached_method
def conductor(self):
"""
Computes and returns the conductor of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.conductor()
4
sage: b.conductor()
5
sage: (a*b).conductor()
20
TESTS::
sage: G.<a, b> = DirichletGroup(20)
sage: type(G(1).conductor())
<type 'sage.rings.integer.Integer'>
"""
if self.modulus() == 1 or self.is_trivial():
return rings.Integer(1)
F = factor(self.modulus())
if len(F) > 1:
return prod([d.conductor() for d in self.decomposition()])
p = F[0][0]
# When p is odd, and x =/= 1, the conductor is the smallest p**r such that
# Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1).
# For a given r, whether or not the above divisibility holds
# depends only on the factor of p**(r-1) on the right hand side.
# Since p-1 is coprime to p, this smallest r such that the
# divisibility holds equals Valuation(Order(x),p)+1.
cond = p**(valuation(self.order(),p) + 1)
if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1:
cond *= 2
return rings.Integer(cond)
@cached_method
def decomposition(self):
r"""
Return the decomposition of self as a product of Dirichlet
characters of prime power modulus, where the prime powers exactly
divide the modulus of this character.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: c = a*b
sage: d = c.decomposition(); d
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4]
sage: d[0].parent()
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2
sage: d[1].parent()
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
We can't multiply directly, since coercion of one element into the
other parent fails in both cases::
sage: d[0]*d[1] == c
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2' and 'Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2'
We can multiply if we're explicit about where we want the
multiplication to take place.
::
sage: G(d[0])*G(d[1]) == c
True
Conductors that are divisible by various powers of 2 present
some problems as the multiplicative group modulo `2^k` is
trivial for `k = 1` and non-cyclic for `k \ge 3`::
sage: (DirichletGroup(18).0).decomposition()
[Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo 9 of conductor 9 mapping 2 |--> zeta6]
sage: (DirichletGroup(36).0).decomposition()
[Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
sage: (DirichletGroup(72).0).decomposition()
[Dirichlet character modulo 8 of conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo 9 of conductor 1 mapping 2 |--> 1]
"""
D = self.parent().decomposition()
vals = [[z] for z in self.values_on_gens()]
if self.modulus() % 8 == 0: # 2 factors at 2.
vals[0].append(vals[1][0])
del vals[1]
elif self.modulus() % 4 == 2: # 0 factors at 2.
vals = [1] + vals
return [D[i](vals[i]) for i in range(len(D))]
def extend(self, M):
"""
Returns the extension of this character to a Dirichlet character
modulo the multiple M of the modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: H.<c> = DirichletGroup(4)
sage: c.extend(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: a
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: c.extend(20) == a
True
"""
if M % self.modulus() != 0:
raise ArithmeticError("M(=%s) must be a multiple of the modulus(=%s)"%(M,self.modulus()))
H = DirichletGroup(M, self.base_ring())
return H(self)
def _pari_conversion(self):
r"""
Prepare data for the conversion of the character to Pari.
OUTPUT:
pair (G, v) where G is `(\ZZ / N \ZZ)^*` where `N` is the modulus
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4._pari_conversion()
([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])],
[[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1])
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi._pari_conversion()
([[24, [0]], [8, [2, 2, 2], [7, 13, 17]],
[[2, 2, 3]~, Vecsmall([3, 3, 1])],
[[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]],
[1, 0, 0; 0, 1, 0; 0, 0, 1], [7, 13, 17], [2, 2, 2], [0, 0, 0]],
[1, 0, 0; 0, 1, 0; 0, 0, 1]], [0, 1, 1])
"""
G = pari.znstar(self.modulus(), 1)
pari_orders = G[1][1]
pari_gens = G[1][2]
# one should use the following, but this does not work
# pari_orders = G.cyc()
# pari_gens = G.gen()
values_on_gens = (self(x) for x in pari_gens)
# now compute the input for pari (list of exponents)
P = self.parent()
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = [int(x.argument() / zeta_argument) for x in values_on_gens]
else:
dlog = P._zeta_dlog
v = [dlog[x] for x in values_on_gens]
m = P.zeta_order()
v = [(vi * oi) // m for vi, oi in zip(v, pari_orders)]
return (G, v)
def conrey_number(self):
r"""
Return the Conrey number for this character.
This is a positive integer coprime to q that identifies a
Dirichlet character of modulus q.
See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey
EXAMPLES::
sage: chi4 = DirichletGroup(4).gen()
sage: chi4.conrey_number()
3
sage: chi = DirichletGroup(24)([1,-1,-1]); chi
Dirichlet character modulo 24 of conductor 24
mapping 7 |--> 1, 13 |--> -1, 17 |--> -1
sage: chi.conrey_number()
5
sage: chi = DirichletGroup(60)([1,-1,I])
sage: chi.conrey_number()
17
sage: chi = DirichletGroup(420)([1,-1,-I,1])
sage: chi.conrey_number()
113
TESTS::
sage: eps1 = DirichletGroup(5)([-1])
sage: eps2 = DirichletGroup(5,QQ)([-1])
sage: eps1.conrey_number() == eps2.conrey_number()
True
"""
G, v = self._pari_conversion()
return pari.znconreyexp(G, v).sage()
def lmfdb_page(self):
r"""
Open the LMFDB web page of the character in a browser.
See https://www.lmfdb.org
EXAMPLES::
sage: E = DirichletGroup(4).gen()
sage: E.lmfdb_page() # optional -- webbrowser
"""
import webbrowser
lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}'
url = lmfdb_url.format(self.modulus(), self.conrey_number())
webbrowser.open(url)
def galois_orbit(self, sort=True):
r"""
Return the orbit of this character under the action of the absolute
Galois group of the prime subfield of the base ring.
EXAMPLES::
sage: G = DirichletGroup(30); e = G.1
sage: e.galois_orbit()
[Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4,
Dirichlet character modulo 30 of conductor 5 mapping 11 |--> 1, 7 |--> zeta4]
Another example::
sage: G = DirichletGroup(13)
sage: G.galois_orbits()
[
[Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1],
...,
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1]
]
sage: e = G.0
sage: e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12,
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12]
sage: e = G.0^2; e
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2
sage: e.galois_orbit()
[Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 + 1]
A non-example::
sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0
sage: chi.galois_orbit()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if not self.base_ring().is_integral_domain():
raise TypeError("Galois orbits only defined if base ring is an integral domain")
k = self.order()
if k <= 2:
return [self]
P = self.parent()
z = self.element()
o = int(z.additive_order())
Auts = set([m % o for m in P._automorphisms()])
v = [P.element_class(P, m * z, check=False) for m in Auts]
if sort:
v.sort()
return v
def gauss_sum(self, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
FACTS: If the modulus is a prime `p` and the character is
nontrivial, then the Gauss sum has absolute value `\sqrt{p}`.
CACHING: Computed Gauss sums are *not* cached with this character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.gauss_sum(1)
2*zeta6 - 1
sage: e.gauss_sum(2)
-2*zeta6 + 1
sage: norm(e.gauss_sum())
3
::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.gauss_sum()
-zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1
sage: factor(norm(e.gauss_sum()))
13^24
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum()
-2.440133358345538? + 1.022618791871794?*I
Check that :trac:`19060` is fixed::
sage: K.<z> = CyclotomicField(8)
sage: G = DirichletGroup(13, K)
sage: chi = G([z^2])
sage: chi.gauss_sum()
zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4
Check that :trac:`25127` is fixed::
sage: G = DirichletGroup(1)
sage: chi = G.one()
sage: chi.gauss_sum()
1
.. SEEALSO::
- :func:`sage.arith.misc.gauss_sum` for general finite fields
- :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version
"""
G = self.parent()
K = G.base_ring()
chi = self
m = G.modulus()
if is_ComplexField(K):
return self.gauss_sum_numerical(a=a)
elif is_AlgebraicField(K):
L = K
zeta = L.zeta(m)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
chi = chi.minimize_base_ring()
n = lcm(m, G.zeta_order())
L = rings.CyclotomicField(n)
zeta = L.gen(0) ** (n // m)
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = zeta ** a
g = L(chi(0))
z = L.one()
for c in chi.values()[1:]:
z *= zeta
g += L(c)*z
return g
def gauss_sum_numerical(self, prec=53, a=1):
r"""
Return a Gauss sum associated to this Dirichlet character as an
approximate complex number with prec bits of precision.
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`gauss_sum`.
The Gauss sum associated to `\chi` is
.. MATH::
g_a(\chi) = \sum_{r \in \ZZ/m\ZZ} \chi(r)\,\zeta^{ar},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m^{th}` root of unity.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
sage: abs(e.gauss_sum_numerical())
1.7320508075...
sage: sqrt(3.0)
1.73205080756888
sage: e.gauss_sum_numerical(a=2)
-...e-15 - 1.7320508075...*I
sage: e.gauss_sum_numerical(a=2, prec=100)
4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I
sage: G = DirichletGroup(13)
sage: H = DirichletGroup(13, CC)
sage: e = G.0
sage: f = H.0
sage: e.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: f.gauss_sum_numerical()
-3.07497205... + 1.8826966926...*I
sage: abs(e.gauss_sum_numerical())
3.60555127546...
sage: abs(f.gauss_sum_numerical())
3.60555127546...
sage: sqrt(13.0)
3.60555127546399
TESTS:
The field of algebraic numbers is supported (:trac:`19056`)::
sage: G = DirichletGroup(7, QQbar)
sage: G[1].gauss_sum_numerical()
-2.44013335834554 + 1.02261879187179*I
"""
G = self.parent()
K = G.base_ring()
if is_ComplexField(K):
phi = lambda t : t
CC = K
elif is_AlgebraicField(K):
from sage.rings.complex_mpfr import ComplexField
CC = ComplexField(prec)
phi = CC.coerce_map_from(K)
elif number_field.is_CyclotomicField(K) or is_RationalField(K):
phi = K.complex_embedding(prec)
CC = phi.codomain()
else:
raise NotImplementedError("Gauss sums only currently implemented when the base ring is a cyclotomic field, QQ, QQbar, or a complex field")
zeta = CC.zeta(G.modulus()) ** a
g = phi(self(0))
z = CC.one()
for c in self.values()[1:]:
z *= zeta
g += phi(c)*z
return g
def jacobi_sum(self, char, check=True):
r"""
Return the Jacobi sum associated to these Dirichlet characters
(i.e., J(self,char)).
This is defined as
.. MATH::
J(\chi, \psi) = \sum_{a \in \ZZ / N\ZZ} \chi(a) \psi(1-a)
where `\chi` and `\psi` are both characters modulo `N`.
EXAMPLES::
sage: D = DirichletGroup(13)
sage: e = D.0
sage: f = D[-2]
sage: e.jacobi_sum(f)
3*zeta12^2 + 2*zeta12 - 3
sage: f.jacobi_sum(e)
3*zeta12^2 + 2*zeta12 - 3
sage: p = 7
sage: DP = DirichletGroup(p)
sage: f = DP.0
sage: e.jacobi_sum(f)
Traceback (most recent call last):
...
NotImplementedError: Characters must be from the same Dirichlet Group.
sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j]))
....: for i in range(p-1) for j in range(i, p-1)]
sage: for s in all_jacobi_sums:
....: print(s)
((1,), (1,), 5)
((1,), (zeta6,), -1)
((1,), (zeta6 - 1,), -1)
((1,), (-1,), -1)
((1,), (-zeta6,), -1)
((1,), (-zeta6 + 1,), -1)
((zeta6,), (zeta6,), -zeta6 + 3)
((zeta6,), (zeta6 - 1,), 2*zeta6 + 1)
((zeta6,), (-1,), -2*zeta6 - 1)
((zeta6,), (-zeta6,), zeta6 - 3)
((zeta6,), (-zeta6 + 1,), 1)
((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2)
((zeta6 - 1,), (-1,), 2*zeta6 + 1)
((zeta6 - 1,), (-zeta6,), -1)
((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2)
((-1,), (-1,), 1)
((-1,), (-zeta6,), -2*zeta6 + 3)
((-1,), (-zeta6 + 1,), 2*zeta6 - 3)
((-zeta6,), (-zeta6,), 3*zeta6 - 1)
((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3)
((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2)
Let's check that trivial sums are being calculated correctly::
sage: N = 13
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
11
sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)])
11
And sums where exactly one character is nontrivial (see :trac:`6393`)::
sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1]
sage: Y.jacobi_sum(Z)
-1
sage: Z.jacobi_sum(Y)
-1
Now let's take a look at a non-prime modulus::
sage: N = 9
sage: D = DirichletGroup(N)
sage: g = D(1)
sage: g.jacobi_sum(g)
3
We consider a sum with values in a finite field::
sage: g = DirichletGroup(17, GF(9,'a')).0
sage: g.jacobi_sum(g**2)
2*a
TESTS:
This shows that :trac:`6393` has been fixed::
sage: G = DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1]
sage: # Y is trivial and Z is quartic
sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)])
-1
sage: # The value -1 above is the correct value of the Jacobi sum J(Y, Z).
sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y)
-1
-1
"""
if check:
if self.parent() != char.parent():
raise NotImplementedError("Characters must be from the same Dirichlet Group.")
return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())])
def kloosterman_sum(self, a=1, b=0):
r"""
Return the "twisted" Kloosterman sum associated to this Dirichlet character.
This includes Gauss sums, classical Kloosterman sums, Salié sums, etc.
The Kloosterman sum associated to `\chi` and the integers a,b is
.. MATH::
K(a,b,\chi) = \sum_{r \in (\ZZ/m\ZZ)^\times} \chi(r)\,\zeta^{ar+br^{-1}},
where `m` is the modulus of `\chi` and `\zeta` is a primitive
`m` th root of unity. This reduces to the Gauss sum if `b=0`.
This method performs an exact calculation and returns an element of a
suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`,
which gives an inexact answer (but is generally much quicker).
CACHING: Computed Kloosterman sums are *not* cached with this
character.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G([-1])
sage: e.kloosterman_sum(3,5)
-2*zeta6 + 1
sage: G = DirichletGroup(20)
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*zeta20^6 + 2*zeta20^4 + 4
TESTS::
sage: G = DirichletGroup(20, UniversalCyclotomicField())
sage: e = G([1 for u in G.unit_gens()])
sage: e.kloosterman_sum(7,17)
-2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4
sage: G = DirichletGroup(12, QQbar)
sage: e = G.gens()[0]
sage: e.kloosterman_sum(5,11)
Traceback (most recent call last):
...
NotImplementedError: Kloosterman sums not implemented over this ring
"""
G = self.parent()
zo = G.zeta_order()
m = G.modulus()
g = 0
L = rings.CyclotomicField(m.lcm(zo))
zeta = L.gen(0)
try:
self(1) * zeta**(a+b)
except TypeError:
raise NotImplementedError('Kloosterman sums not implemented '
'over this ring')
n = zeta.multiplicative_order()
zeta = zeta**(n // m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
g += self(c) * zeta**int(a*e + b*e**(-1))
return g
def kloosterman_sum_numerical(self, prec=53, a=1, b=0):
r"""
Return the Kloosterman sum associated to this Dirichlet character as
an approximate complex number with prec bits of precision.
See also :meth:`.kloosterman_sum`, which calculates the sum
exactly (which is generally slower).
INPUT:
- ``prec`` -- integer (default: 53), *bits* of precision
- ``a`` -- integer, as for :meth:`.kloosterman_sum`
- ``b`` -- integer, as for :meth:`.kloosterman_sum`.
EXAMPLES::
sage: G = DirichletGroup(3)
sage: e = G.0
The real component of the numerical value of e is near zero::
sage: v=e.kloosterman_sum_numerical()
sage: v.real() < 1.0e15
True
sage: v.imag()
1.73205080756888
sage: G = DirichletGroup(20)
sage: e = G.1
sage: e.kloosterman_sum_numerical(53,3,11)
3.80422606518061 - 3.80422606518061*I
"""
G = self.parent()
K = G.base_ring()
if not (number_field.is_CyclotomicField(K) or is_RationalField(K)):
raise NotImplementedError("Kloosterman sums only currently implemented when the base ring is a cyclotomic field or QQ.")
phi = K.complex_embedding(prec)
CC = phi.codomain()
g = 0
m = G.modulus()
zeta = CC.zeta(m)
for c in m.coprime_integers(m):
e = rings.Mod(c, m)
z = zeta ** int(a*e + b*(e**(-1)))
g += phi(self(c))*z
return g
@cached_method
def is_even(self):
r"""
Return ``True`` if and only if `\varepsilon(-1) = 1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(13, CC)
sage: e = G.0
sage: e.is_even()
False
sage: e(-1)
-1.000000...
sage: [e.is_even() for e in G]
[True, False, True, False, True, False, True, False, True, False, True, False]
sage: G = DirichletGroup(100000, CC)
sage: G.1.is_even()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(1)) < 0.5
return self(-1) == R(1)
@cached_method
def is_odd(self):
r"""
Return ``True`` if and only if
`\varepsilon(-1) = -1`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(13)
sage: e = G.0
sage: e.is_odd()
True
sage: [e.is_odd() for e in G]
[False, True, False, True, False, True, False, True, False, True, False, True]
sage: G = DirichletGroup(100000, CC)
sage: G.0.is_odd()
True
Note that ``is_even`` need not be the negation of
is_odd, e.g., in characteristic 2::
sage: G.<e> = DirichletGroup(13, GF(4,'a'))
sage: e.is_even()
True
sage: e.is_odd()
True
"""
R = self.base_ring()
# self(-1) is either +1 or -1
if not R.is_exact():
return abs(self(-1) - R(-1)) < 0.5
return self(-1) == R(-1)
@cached_method
def is_primitive(self):
"""
Return ``True`` if and only if this character is
primitive, i.e., its conductor equals its modulus.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
sage: G.<a,b> = DirichletGroup(20, CC)
sage: a.is_primitive()
False
sage: b.is_primitive()
False
sage: (a*b).is_primitive()
True
"""
return (self.conductor() == self.modulus())
@cached_method
def is_trivial(self):
r"""
Returns ``True`` if this is the trivial character,
i.e., has order 1.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.is_trivial()
False
sage: (a^2).is_trivial()
True
"""
if self.element.is_in_cache():
return not self.element()
one = self.base_ring().one()
return all(x == one for x in self.values_on_gens())
def kernel(self):
r"""
Return the kernel of this character.
OUTPUT: Currently the kernel is returned as a list. This may
change.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.kernel()
[1, 9, 13, 17]
sage: b.kernel()
[1, 11]
"""
one = self.base_ring().one()
return [x for x in range(self.modulus()) if self(x) == one]
def maximize_base_ring(self):
r"""
Let
.. MATH::
\varepsilon : (\ZZ/N\ZZ)^* \to \QQ(\zeta_n)
be a Dirichlet character. This function returns an equal Dirichlet
character
.. MATH::
\chi : (\ZZ/N\ZZ)^* \to \QQ(\zeta_m)
where `m` is the least common multiple of `n` and
the exponent of `(\ZZ/N\ZZ)^*`.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20,QQ)
sage: b.maximize_base_ring()
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1
sage: b.maximize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: DirichletGroup(20).base_ring()
Cyclotomic Field of order 4 and degree 2
"""
g = rings.IntegerModRing(self.modulus()).unit_group_exponent()
if g == 1:
g = 2
z = self.base_ring().zeta()
n = z.multiplicative_order()
m = lcm(g,n)
if n == m:
return self
K = rings.CyclotomicField(m)
return self.change_ring(K)
def minimize_base_ring(self):
r"""
Return a Dirichlet character that equals this one, but over as
small a subfield (or subring) of the base ring as possible.
.. note::
This function is currently only implemented when the base
ring is a number field. It's the identity function in
characteristic p.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: e = DirichletGroup(13).0
sage: e.base_ring()
Cyclotomic Field of order 12 and degree 4
sage: e.minimize_base_ring().base_ring()
Cyclotomic Field of order 12 and degree 4
sage: (e^2).minimize_base_ring().base_ring()
Cyclotomic Field of order 6 and degree 2
sage: (e^3).minimize_base_ring().base_ring()
Cyclotomic Field of order 4 and degree 2
sage: (e^12).minimize_base_ring().base_ring()
Rational Field
TESTS:
Check that :trac:`18479` is fixed::
sage: f = Newforms(Gamma1(25), names='a')[1]
sage: eps = f.character()
sage: eps.minimize_base_ring() == eps
True
A related bug (see :trac:`18086`)::
sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3])
sage: chi = DirichletGroup(7, K).0
sage: chi.minimize_base_ring()
Dirichlet character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2
"""
R = self.base_ring()
if R.is_prime_field():
return self
p = R.characteristic()
if p:
K = rings.IntegerModRing(p)
elif self.order() <= 2:
K = rings.QQ
elif (isinstance(R, number_field.NumberField_generic)
and euler_phi(self.order()) < R.absolute_degree()):
K = rings.CyclotomicField(self.order())
else:
return self
try:
return self.change_ring(K)
except (TypeError, ValueError, ArithmeticError):
return self
def modulus(self):
"""
The modulus of this character.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.modulus()
100
sage: e.conductor()
4
"""
return self.parent().modulus()
def level(self):
"""
Synonym for modulus.
EXAMPLES::
sage: e = DirichletGroup(100, QQ).0
sage: e.level()
100
"""
return self.modulus()
@cached_method
def multiplicative_order(self):
"""
The order of this character.
EXAMPLES::
sage: e = DirichletGroup(100).1
sage: e.order() # same as multiplicative_order, since group is multiplicative
20
sage: e.multiplicative_order()
20
sage: e = DirichletGroup(100).0
sage: e.multiplicative_order()
2
"""
if self.parent().zeta.is_in_cache():
return self.element().additive_order()
return lcm([z.multiplicative_order() for z in self.values_on_gens()])
def primitive_character(self):
"""
Returns the primitive character associated to self.
EXAMPLES::
sage: e = DirichletGroup(100).0; e
Dirichlet character modulo 100 of conductor 4 mapping 51 |--> -1, 77 |--> 1
sage: e.conductor()
4
sage: f = e.primitive_character(); f
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: f.modulus()
4
"""
return self.restrict(self.conductor())
def restrict(self, M):
"""
Returns the restriction of this character to a Dirichlet character
modulo the divisor M of the modulus, which must also be a multiple
of the conductor of this character.
EXAMPLES::
sage: e = DirichletGroup(100).0
sage: e.modulus()
100
sage: e.conductor()
4
sage: e.restrict(20)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: e.restrict(4)
Dirichlet character modulo 4 of conductor 4 mapping 3 |--> -1
sage: e.restrict(50)
Traceback (most recent call last):
...
ValueError: conductor(=4) must divide M(=50)
"""
M = int(M)
if self.modulus()%M != 0:
raise ValueError("M(=%s) must divide the modulus(=%s)"%(M,self.modulus()))
if M%self.conductor() != 0:
raise ValueError("conductor(=%s) must divide M(=%s)"%(self.conductor(),M))
H = DirichletGroup(M, self.base_ring())
return H(self)
@cached_method
def values(self):
"""
Return a list of the values of this character on each integer
between 0 and the modulus.
EXAMPLES::
sage: e = DirichletGroup(20)(1)
sage: e.values()
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
sage: e = DirichletGroup(20).gen(0)
sage: e.values()
[0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1]
sage: e = DirichletGroup(20).gen(1)
sage: e.values()
[0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1]
sage: e = DirichletGroup(21).gen(0) ; e.values()
[0, 1, -1, 0, 1, -1, 0, 0, -1, 0, 1, -1, 0, 1, 0, 0, 1, -1, 0, 1, -1]
sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values()
[0, 1, 36, 0, 1, 36, 0, 0, 36, 0, 1, 36, 0, 1, 0, 0, 1, 36, 0, 1, 36]
sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values()
[0, 1, 2, 0, 1, 2, 0, 0, 2, 0, 1, 2, 0, 1, 0, 0, 1, 2, 0, 1, 2]
::
sage: chi = DirichletGroup(100151, CyclotomicField(10)).0
sage: ls = chi.values() ; ls[0:10]
[0,
1,
-zeta10^3,
-zeta10,
-zeta10,
1,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10,
zeta10^3 - zeta10^2 + zeta10 - 1,
zeta10^2]
TESTS:
Test that :trac:`11783` and :trac:`14368` are fixed::
sage: chi = DirichletGroup(1).list()[0]
sage: chi.values()
[1]
sage: chi(1)
1
"""
G = self.parent()
R = G.base_ring()
mod = self.parent().modulus()
if mod == 1:
return [R.one()]
elif mod == 2:
return [R.zero(), R.one()]
result_list = [R.zero()] * mod
gens = G.unit_gens()
orders = G.integers_mod().unit_group().gens_orders()
R_values = G._zeta_powers
val_on_gen = self.element()
exponents = [0] * len(orders)
n = G.integers_mod().one()
value = val_on_gen.base_ring().zero()
while True:
# record character value on n
result_list[n] = R_values[value]
# iterate:
# increase the exponent vector by 1,
# increase n accordingly, and increase value
i = 0
while True:
try:
exponents[i] += 1
except IndexError: # Done!
return result_list
value += val_on_gen[i]
n *= gens[i]
if exponents[i] < orders[i]:
break
exponents[i] = 0
i += 1
@cached_method(do_pickle=True)
def values_on_gens(self):
r"""
Return a tuple of the values of ``self`` on the standard
generators of `(\ZZ/N\ZZ)^*`, where `N` is the modulus.
EXAMPLES::
sage: e = DirichletGroup(16)([-1, 1])
sage: e.values_on_gens ()
(-1, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
pows = self.parent()._zeta_powers
return tuple([pows[i] for i in self.element()])
@cached_method(do_pickle=True)
def element(self):
r"""
Return the underlying `\ZZ/n\ZZ`-module
vector of exponents.
.. warning::
Please do not change the entries of the returned vector;
this vector is mutable *only* because immutable vectors are
not implemented yet.
EXAMPLES::
sage: G.<a,b> = DirichletGroup(20)
sage: a.element()
(2, 0)
sage: b.element()
(0, 1)
.. NOTE::
The constructor of :class:`DirichletCharacter` sets the
cache of :meth:`element` or of :meth:`values_on_gens`. The cache of
one of these methods needs to be set for the other method to work properly,
these caches have to be stored when pickling an instance of
:class:`DirichletCharacter`.
"""
P = self.parent()
M = P._module
if is_ComplexField(P.base_ring()):
zeta = P.zeta()
zeta_argument = zeta.argument()
v = M([int(round(x.argument() / zeta_argument))
for x in self.values_on_gens()])
else:
dlog = P._zeta_dlog
v = M([dlog[x] for x in self.values_on_gens()])
v.set_immutable()
return v
def __setstate__(self, state):
r"""
Restore a pickled element from ``state``.
TESTS::
sage: e = DirichletGroup(16)([-1, 1])
sage: loads(dumps(e)) == e
True
"""
# values_on_gens() used an explicit cache __values_on_gens in the past
# we need to set the cache of values_on_gens() from that if we encounter it in a pickle
values_on_gens_key = '_DirichletCharacter__values_on_gens'
values_on_gens = None
state_dict = state[1]
if values_on_gens_key in state_dict:
values_on_gens = state_dict[values_on_gens_key]
del state_dict[values_on_gens_key]
# element() used an explicit cache __element in the past
# we need to set the cache of element() from that if we encounter it in a pickle
element_key = '_DirichletCharacter__element'
element = None
if element_key in state_dict:
element = state_dict[element_key]
del state_dict[element_key]
super(DirichletCharacter, self).__setstate__(state)
if values_on_gens is not None:
self.values_on_gens.set_cache(values_on_gens)
if element is not None:
self.element.set_cache(element)
class DirichletGroupFactory(UniqueFactory):
r"""
Construct a group of Dirichlet characters modulo `N`.
INPUT:
- ``N`` -- positive integer
- ``base_ring`` -- commutative ring; the value ring for the
characters in this group (default: the cyclotomic field
`\QQ(\zeta_n)`, where `n` is the exponent of `(\ZZ/N\ZZ)^*`)
- ``zeta`` -- (optional) root of unity in ``base_ring``
- ``zeta_order`` -- (optional) positive integer; this must be the
order of ``zeta`` if both are specified
- ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)``
notation works)
- ``integral`` -- boolean (default: ``False``); whether to replace
the default cyclotomic field by its rings of integers as the
base ring. This is ignored if ``base_ring`` is not ``None``.
OUTPUT:
The group of Dirichlet characters modulo `N` with values in a
subgroup `V` of the multiplicative group `R^*` of ``base_ring``.
This is the group of homomorphisms `(\ZZ/N\ZZ)^* \to V` with
pointwise multiplication. The group `V` is determined as follows:
- If both ``zeta`` and ``zeta_order`` are omitted, then `V` is
taken to be `R^*`, or equivalently its `n`-torsion subgroup,
where `n` is the exponent of `(\ZZ/N\ZZ)^*`. Many operations,
such as finding a set of generators for the group, are only
implemented if `V` is cyclic and a generator for `V` can be
found.
- If ``zeta`` is specified, then `V` is taken to be the cyclic
subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is
also given, it must be the multiplicative order of ``zeta``;
this is useful if the base ring is not exact or if the order of
``zeta`` is very large.
- If ``zeta`` is not specified but ``zeta_order`` is, then `V` is
taken to be the group of roots of unity of order dividing
``zeta_order`` in `R`. In this case, `R` must be a domain (so
`V` is cyclic), and `V` must have order ``zeta_order``.
Furthermore, a generator ``zeta`` of `V` is computed, and an
error is raised if such ``zeta`` cannot be found.
EXAMPLES:
The default base ring is a cyclotomic field of order the exponent
of `(\ZZ/N\ZZ)^*`::
sage: DirichletGroup(20)
Group of Dirichlet characters modulo 20 with values in Cyclotomic Field of order 4 and degree 2
We create the group of Dirichlet character mod 20 with values in
the rational numbers::
sage: G = DirichletGroup(20, QQ); G
Group of Dirichlet characters modulo 20 with values in Rational Field
sage: G.order()
4
sage: G.base_ring()
Rational Field
The elements of G print as lists giving the values of the character
on the generators of `(Z/NZ)^*`::
sage: list(G)
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1]
Next we construct the group of Dirichlet character mod 20, but with
values in `\QQ(\zeta_n)`::
sage: G = DirichletGroup(20)
sage: G.1
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
We next compute several invariants of ``G``::
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
sage: G.unit_gens()
(11, 17)
sage: G.zeta()
zeta4
sage: G.zeta_order()
4
In this example we create a Dirichlet group with values in a
number field::
sage: R.<x> = PolynomialRing(QQ)
sage: K.<a> = NumberField(x^4 + 1)
sage: DirichletGroup(5, K)
Group of Dirichlet characters modulo 5 with values in Number Field in a with defining polynomial x^4 + 1
An example where we give ``zeta``, but not its order::
sage: G = DirichletGroup(5, K, a); G
Group of Dirichlet characters modulo 5 with values in the group of order 8 generated by a in Number Field in a with defining polynomial x^4 + 1
sage: G.list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -a^2]
We can also restrict the order of the characters, either with or
without specifying a root of unity::
sage: DirichletGroup(5, K, zeta=-1, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
sage: DirichletGroup(5, K, zeta_order=2)
Group of Dirichlet characters modulo 5 with values in the group of order 2 generated by -1 in Number Field in a with defining polynomial x^4 + 1
::
sage: G.<e> = DirichletGroup(13)
sage: loads(G.dumps()) == G
True
::
sage: G = DirichletGroup(19, GF(5))
sage: loads(G.dumps()) == G
True
We compute a Dirichlet group over a large prime field::
sage: p = next_prime(10^40)
sage: g = DirichletGroup(19, GF(p)); g
Group of Dirichlet characters modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121
Note that the root of unity has small order, i.e., it is not the
largest order root of unity in the field::
sage: g.zeta_order()
2
::
sage: r4 = CyclotomicField(4).ring_of_integers()
sage: G = DirichletGroup(60, r4)
sage: G.gens()
(Dirichlet character modulo 60 of conductor 4 mapping 31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo 60 of conductor 5 mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4)
sage: val = G.gens()[2].values_on_gens()[2] ; val
zeta4
sage: parent(val)
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)
17
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3)
22
sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3
22
sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3)
Residue field of Fractional ideal (-2*zeta4 + 5)
::
sage: DirichletGroup(60, integral=True)
Group of Dirichlet characters modulo 60 with values in Gaussian Integers in Cyclotomic Field of order 4 and degree 2
sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2])
Gaussian Integers in Cyclotomic Field of order 4 and degree 2
If the order of ``zeta`` cannot be determined automatically, we
can specify it using ``zeta_order``::
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6))
Traceback (most recent call last):
...
NotImplementedError: order of element not known
sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6)
Group of Dirichlet characters modulo 7 with values in the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of precision
If the base ring is not a domain (in which case the group of roots
of unity is not necessarily cyclic), some operations still work,
such as creation of elements::
sage: G = DirichletGroup(5, Zmod(15)); G
Group of Dirichlet characters modulo 5 with values in Ring of integers modulo 15
sage: chi = G([13]); chi
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 13
sage: chi^2
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 4
sage: chi.multiplicative_order()
4
Other operations only work if ``zeta`` is specified::
sage: G.gens()
Traceback (most recent call last):
...
NotImplementedError: factorization of polynomials over rings with composite characteristic is not implemented
sage: G = DirichletGroup(5, Zmod(15), zeta=2); G
Group of Dirichlet characters modulo 5 with values in the group of order 4 generated by 2 in Ring of integers modulo 15
sage: G.gens()
(Dirichlet character modulo 5 of conductor 5 mapping 2 |--> 2,)
TESTS:
Dirichlet groups are cached, creating two groups with the same parameters
yields the same object::
sage: DirichletGroup(60) is DirichletGroup(60)
True
"""
def create_key(self, N, base_ring=None, zeta=None, zeta_order=None,
names=None, integral=False):
"""
Create a key that uniquely determines a Dirichlet group.
TESTS::
sage: DirichletGroup.create_key(60)
(Cyclotomic Field of order 4 and degree 2, 60, None, None)
An example to illustrate that ``base_ring`` is a part of the key::
sage: k = DirichletGroup.create_key(2, base_ring=QQ); k
(Rational Field, 2, None, None)
sage: l = DirichletGroup.create_key(2, base_ring=CC); l
(Complex Field with 53 bits of precision, 2, None, None)
sage: k == l
False
sage: G = DirichletGroup.create_object(None, k); G
Group of Dirichlet characters modulo 2 with values in Rational Field
sage: H = DirichletGroup.create_object(None, l); H
Group of Dirichlet characters modulo 2 with values in Complex Field with 53 bits of precision
sage: G == H
False
If ``base_ring`` was not be a part of the key, the keys would compare
equal and the caching would be broken::
sage: k = k[1:]; k
(2, None, None)
sage: l = l[1:]; l
(2, None, None)
sage: k == l
True
sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC)
False
If the base ring is not an integral domain, an error will be
raised if only ``zeta_order`` is specified::
sage: DirichletGroup(17, Integers(15))
Group of Dirichlet characters modulo 17 with values in Ring of integers modulo 15
sage: DirichletGroup(17, Integers(15), zeta_order=4)
Traceback (most recent call last):
...
ValueError: base ring (= Ring of integers modulo 15) must be an integral domain if only zeta_order is specified
sage: G = DirichletGroup(17, Integers(15), zeta=7); G
Group of Dirichlet characters modulo 17 with values in the group of order 4 generated by 7 in Ring of integers modulo 15
sage: G.order()
4
sage: DirichletGroup(-33)
Traceback (most recent call last):
...
ValueError: modulus should be positive
"""
modulus = rings.Integer(N)
if modulus <= 0:
raise ValueError('modulus should be positive')
if base_ring is None:
if not (zeta is None and zeta_order is None):
raise ValueError("zeta and zeta_order must be None if base_ring not specified")
e = rings.IntegerModRing(modulus).unit_group_exponent()
base_ring = rings.CyclotomicField(e)
if integral:
base_ring = base_ring.ring_of_integers()
if not is_Ring(base_ring):
raise TypeError("base_ring (= %s) must be a ring" % base_ring)
# If either zeta or zeta_order is given, compute the other.
if zeta is not None:
zeta = base_ring(zeta)
if zeta_order is None:
zeta_order = zeta.multiplicative_order()
elif zeta_order is not None:
if not base_ring.is_integral_domain():
raise ValueError("base ring (= %s) must be an integral domain if only zeta_order is specified"
% base_ring)
zeta_order = rings.Integer(zeta_order)
zeta = base_ring.zeta(zeta_order)
return (base_ring, modulus, zeta, zeta_order)
def create_object(self, version, key, **extra_args):
"""
Create the object from the key (extra arguments are ignored). This is
only called if the object was not found in the cache.
TESTS::
sage: K = CyclotomicField(4)
sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4))
Group of Dirichlet characters modulo 60 with values in the group of order 4 generated by zeta4 in Cyclotomic Field of order 4 and degree 2
"""
base_ring, modulus, zeta, zeta_order = key
return DirichletGroup_class(base_ring, modulus, zeta, zeta_order)
DirichletGroup = DirichletGroupFactory("DirichletGroup")
def is_DirichletGroup(x):
"""
Returns True if x is a Dirichlet group.
EXAMPLES::
sage: from sage.modular.dirichlet import is_DirichletGroup
sage: is_DirichletGroup(DirichletGroup(11))
True
sage: is_DirichletGroup(11)
False
sage: is_DirichletGroup(DirichletGroup(11).0)
False
"""
return isinstance(x, DirichletGroup_class)
class DirichletGroup_class(WithEqualityById, Parent):
"""
Group of Dirichlet characters modulo `N` with values in a ring `R`.
"""
Element = DirichletCharacter
def __init__(self, base_ring, modulus, zeta, zeta_order):
"""
Create a Dirichlet group.
Not to be called directly (use the factory function ``DirichletGroup``).
The ``DirichletGroup`` factory ensures that either both
``zeta`` and ``zeta_order`` are specified, or that both are
``None``. In the former case, it also ensures that ``zeta``
is an element of ``base_ring`` and that ``zeta_order`` is an
element of ``ZZ``.
TESTS::
sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest
sage: TestSuite(G).run()
sage: G.base() # check that Parent.__init__ has been called
Ring of integers modulo 9
sage: DirichletGroup(13) == DirichletGroup(13)
True
sage: DirichletGroup(13) == DirichletGroup(13, QQ)
False
"""
from sage.categories.groups import Groups
category = Groups().Commutative()
if base_ring.is_integral_domain() or base_ring.is_finite():
# The group of n-th roots of unity in the base ring is
# finite, and hence this Dirichlet group is finite too.
# In particular, it is finitely generated; the added
# FinitelyGenerated() here means that the group has a
# distinguished set of generators.
category = category.Finite().FinitelyGenerated()
Parent.__init__(self, base_ring, category=category)
self._zeta = zeta
self._zeta_order = zeta_order
self._modulus = modulus
self._integers = rings.IntegerModRing(modulus)
def __setstate__(self, state):
"""
Used for unpickling old instances.
TESTS::
sage: G = DirichletGroup(9)
sage: loads(dumps(G)) is G
True
"""
self._set_element_constructor()
if '_zeta_order' in state:
state['_zeta_order'] = rings.Integer(state['_zeta_order'])
super(DirichletGroup_class, self).__setstate__(state)
@property
def _module(self):
"""
Return the free module used to represent Dirichlet characters.
TESTS::
sage: DirichletGroup(12)._module
Vector space of dimension 2 over Ring of integers modulo 2
"""
return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()),
len(self.unit_gens()))
@property
def _zeta_powers(self):
"""
Return a list of powers of the distinguished root of unity.
TESTS::
sage: DirichletGroup(5)._zeta_powers
[1, zeta4, -1, -zeta4]
"""
R = self.base_ring()
a = R.one()
w = [a]
zeta = self.zeta()
zeta_order = self.zeta_order()
if is_ComplexField(R):
for i in range(1, zeta_order):
a = a * zeta
a._set_multiplicative_order(zeta_order/gcd(zeta_order, i))
w.append(a)
else:
for i in range(1, zeta_order):
a = a * zeta
w.append(a)
return w
@property
def _zeta_dlog(self):
"""
Return a dictionary that can be used to compute discrete
logarithms in the value group of this Dirichlet group.
TESTS::
sage: DirichletGroup(5)._zeta_dlog
{-1: 2, -zeta4: 3, zeta4: 1, 1: 0}
"""
return {z: i for i, z in enumerate(self._zeta_powers)}
def change_ring(self, R, zeta=None, zeta_order=None):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a conversion map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
- ``zeta`` -- (optional) root of unity in ``R``
- ``zeta_order`` -- (optional) order of ``zeta``
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: G.change_ring(CyclotomicField(6))
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
TESTS:
We test the case where `R` is a map (:trac:`18072`)::
sage: K.<i> = QuadraticField(-1)
sage: f = K.complex_embeddings()[0]
sage: D = DirichletGroup(5, K)
sage: D.change_ring(f)
Group of Dirichlet characters modulo 5 with values in Complex Field with 53 bits of precision
"""
if zeta is None and self._zeta is not None:
# A root of unity was explicitly given; we use it over the
# new base ring as well.
zeta = self._zeta
if zeta_order is None:
# We reuse _zeta_order if we know that it stays the
# same; otherwise it will be recomputed as the order
# of R(zeta) by the DirichletGroup factory.
p = R.characteristic()
if p == 0 or p.gcd(self._zeta_order) == 1:
zeta_order = self._zeta_order
else:
# No root of unity specified; use the same zeta_order
# (which may still be None).
zeta_order = self._zeta_order
# Map zeta to the new parent
if zeta is not None:
zeta = R(zeta)
if isinstance(R, Map):
R = R.codomain()
return DirichletGroup(self.modulus(), R,
zeta=zeta,
zeta_order=zeta_order)
def base_extend(self, R):
"""
Return the base extension of ``self`` to ``R``.
INPUT:
- ``R`` -- either a ring admitting a *coercion* map from the
base ring of ``self``, or a ring homomorphism with the base
ring of ``self`` as its domain
EXAMPLES::
sage: G = DirichletGroup(7,QQ); G
Group of Dirichlet characters modulo 7 with values in Rational Field
sage: H = G.base_extend(CyclotomicField(6)); H
Group of Dirichlet characters modulo 7 with values in Cyclotomic Field of order 6 and degree 2
Note that the root of unity can change::
sage: H.zeta()
zeta6
This method (in contrast to :meth:`change_ring`) requires a
coercion map to exist::
sage: G.base_extend(ZZ)
Traceback (most recent call last):
...
TypeError: no coercion map from Rational Field to Integer Ring is defined
Base-extended Dirichlet groups do not silently get roots of
unity with smaller order than expected (:trac:`6018`)::
sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4))
sage: H = DirichletGroup(10, CyclotomicField(4))
sage: G is H
True
sage: G3 = DirichletGroup(31, CyclotomicField(3))
sage: G5 = DirichletGroup(31, CyclotomicField(5))
sage: K30 = CyclotomicField(30)
sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30)
Dirichlet character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1
When a root of unity is specified, base extension still works
if the new base ring is not an integral domain::
sage: f = DirichletGroup(17, ZZ, zeta=-1).0
sage: g = f.base_extend(Integers(15))
sage: g(3)
14
sage: g.parent().zeta()
14
"""
if not (isinstance(R, Map)
or R.has_coerce_map_from(self.base_ring())):
raise TypeError("no coercion map from %s to %s is defined"
% (self.base_ring(), R))
return self.change_ring(R)
def _element_constructor_(self, x):
"""
Construct a Dirichlet character from `x`.
EXAMPLES::
sage: G = DirichletGroup(13)
sage: K = G.base_ring()
sage: G(1)
Dirichlet character modulo 13 of conductor 1 mapping 2 |--> 1
sage: G([-1])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> -1
sage: G([K.0])
Dirichlet character modulo 13 of conductor 13 mapping 2 |--> zeta12
sage: G(0)
Traceback (most recent call last):
...
TypeError: cannot convert 0 to an element of Group of Dirichlet characters modulo 13 with values in Cyclotomic Field of order 12 and degree 4
sage: G = DirichletGroup(6)
sage: G(DirichletGroup(3).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).0)
Dirichlet character modulo 6 of conductor 3 mapping 5 |--> -1
sage: G(DirichletGroup(15).1)
Traceback (most recent call last):
...
TypeError: conductor must divide modulus
sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1)
Traceback (most recent call last):
...
TypeError: Unable to coerce zeta4 to a rational
"""
R = self.base_ring()
try:
if x == R.one():
x = [R.one()] * len(self.unit_gens())
except (TypeError, ValueError, ArithmeticError):
pass
if isinstance(x, list): # list of values on each unit generator
return self.element_class(self, x)
elif not isinstance(x, DirichletCharacter):
raise TypeError("cannot convert %s to an element of %s" % (x, self))
elif not x.conductor().divides(self.modulus()):
raise TypeError("conductor must divide modulus")
a = []
for u in self.unit_gens():
v = u.lift()
# have to do this, since e.g., unit gens mod 11 are not units mod 22.
while x.modulus().gcd(v) != 1:
v += self.modulus()
a.append(R(x(v)))
return self.element_class(self, a)
def _coerce_map_from_(self, X):
"""
Decide whether there is a coercion map from `X`.
There is conversion between Dirichlet groups of different
moduli, but no coercion. This implies that Dirichlet
characters of different moduli do not compare as equal.
TESTS::
sage: trivial_character(6) == trivial_character(3) # indirect doctest
False
sage: trivial_character(3) == trivial_character(9)
False
sage: trivial_character(3) == DirichletGroup(3, QQ).0^2
True
"""
return (isinstance(X, DirichletGroup_class) and
self.modulus() == X.modulus() and
self.base_ring().has_coerce_map_from(X.base_ring()) and
(self._zeta is None or
(X._zeta is not None and
self.base_ring()(X._zeta) in self._zeta_powers)))
def __len__(self):
"""
Return the number of elements of this Dirichlet group. This is the
same as self.order().
EXAMPLES::
sage: len(DirichletGroup(20))
8
sage: len(DirichletGroup(20, QQ))
4
sage: len(DirichletGroup(20, GF(5)))
8
sage: len(DirichletGroup(20, GF(2)))
1
sage: len(DirichletGroup(20, GF(3)))
4
"""
return self.order()
def _repr_(self):
"""
Return a print representation of this group, which can be renamed.
EXAMPLES::
sage: G = DirichletGroup(11)
sage: repr(G) # indirect doctest
'Group of Dirichlet characters modulo 11 with values in Cyclotomic Field of order 10 and degree 4'
sage: G.rename('Dir(11)')
sage: G
Dir(11)
"""
s = "Group of Dirichlet characters modulo %s with values in " % self.modulus()
if self._zeta is not None:
s += "the group of order %s generated by %s in " % (self._zeta_order, self._zeta)
s += str(self.base_ring())
return s
@cached_method
def decomposition(self):
r"""
Returns the Dirichlet groups of prime power modulus corresponding
to primes dividing modulus.
(Note that if the modulus is 2 mod 4, there will be a "factor" of
`(\ZZ/2\ZZ)^*`, which is the trivial group.)
EXAMPLES::
sage: DirichletGroup(20).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Cyclotomic Field of order 4 and degree 2,
Group of Dirichlet characters modulo 5 with values in Cyclotomic Field of order 4 and degree 2
]
sage: DirichletGroup(20,GF(5)).decomposition()
[
Group of Dirichlet characters modulo 4 with values in Finite Field of size 5,
Group of Dirichlet characters modulo 5 with values in Finite Field of size 5
]
"""
R = self.base_ring()
return Sequence([DirichletGroup(p**r,R) for p, r \
in factor(self.modulus())],
cr=True,
universe = cat.Objects())
def exponent(self):
"""
Return the exponent of this group.
EXAMPLES::
sage: DirichletGroup(20).exponent()
4
sage: DirichletGroup(20,GF(3)).exponent()
2
sage: DirichletGroup(20,GF(2)).exponent()
1
sage: DirichletGroup(37).exponent()
36
"""
return self.zeta_order()
@cached_method
def _automorphisms(self):
"""
Compute the automorphisms of self. These are always given by raising to
a power, so the return value is a list of integers.
At present this is only implemented if the base ring has characteristic 0 or a prime.
EXAMPLES::
sage: DirichletGroup(17)._automorphisms()
[1, 3, 5, 7, 9, 11, 13, 15]
sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms()
[1, 11, 121, 1331]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms()
Traceback (most recent call last):
...
NotImplementedError: Automorphisms for finite non-field base rings not implemented
"""
n = self.zeta_order()
R = self.base_ring()
p = R.characteristic()
if p == 0:
Auts = [e for e in range(1,n) if gcd(e,n) == 1]
else:
if not rings.ZZ(p).is_prime():
raise NotImplementedError("Automorphisms for finite non-field base rings not implemented")
# The automorphisms in characteristic p are
# k-th powering for
# k = 1, p, p^2, ..., p^(r-1),
# where p^r = 1 (mod n), so r is the mult order of p modulo n.
r = rings.IntegerModRing(n)(p).multiplicative_order()
Auts = [p**m for m in range(0,r)]
return Auts
def galois_orbits(self, v=None, reps_only=False, sort=True, check=True):
"""
Return a list of the Galois orbits of Dirichlet characters in self,
or in v if v is not None.
INPUT:
- ``v`` - (optional) list of elements of self
- ``reps_only`` - (optional: default False) if True
only returns representatives for the orbits.
- ``sort`` - (optional: default True) whether to sort
the list of orbits and the orbits themselves (slightly faster if
False).
- ``check`` - (optional, default: True) whether or not
to explicitly coerce each element of v into self.
The Galois group is the absolute Galois group of the prime subfield
of Frac(R). If R is not a domain, an error will be raised.
EXAMPLES::
sage: DirichletGroup(20).galois_orbits()
[
[Dirichlet character modulo 20 of conductor 20 mapping 11 |--> -1, 17 |--> -1],
...,
[Dirichlet character modulo 20 of conductor 1 mapping 11 |--> 1, 17 |--> 1]
]
sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits()
Traceback (most recent call last):
...
TypeError: Galois orbits only defined if base ring is an integral domain
"""
if v is None:
v = self.list()
else:
if check:
v = [self(x) for x in v]
G = []
seen_so_far = set([])
for x in v:
z = x.element()
e = tuple(z) # change when there are immutable vectors (and below)
if e in seen_so_far:
continue
orbit = x.galois_orbit(sort=sort)
if reps_only:
G.append(x)
else:
G.append(orbit)
for z in orbit:
seen_so_far.add(tuple(z.element()))
G = Sequence(G, cr=True)
if sort:
G.sort()
return G
def gen(self, n=0):
"""
Return the n-th generator of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gen(0)
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: G.gen(1)
Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4
sage: G.gen(2)
Traceback (most recent call last):
...
IndexError: n(=2) must be between 0 and 1
::
sage: G.gen(-1)
Traceback (most recent call last):
...
IndexError: n(=-1) must be between 0 and 1
"""
n = int(n)
g = self.gens()
if n<0 or n>=len(g):
raise IndexError("n(=%s) must be between 0 and %s"%(n,len(g)-1))
return g[n]
@cached_method
def gens(self):
"""
Returns generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.gens()
(Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo 20 of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)
"""
g = []
ord = self.zeta_order()
M = self._module
zero = M(0)
orders = self.integers_mod().unit_group().gens_orders()
for i in range(len(self.unit_gens())):
z = zero.__copy__()
z[i] = ord//gcd(ord, orders[i])
g.append(self.element_class(self, z, check=False))
return tuple(g)
def integers_mod(self):
r"""
Returns the group of integers `\ZZ/N\ZZ`
where `N` is the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.integers_mod()
Ring of integers modulo 20
"""
return self._integers
__iter__ = multiplicative_iterator
def list(self):
"""
Return a list of the Dirichlet characters in this group.
EXAMPLES::
sage: DirichletGroup(5).list()
[Dirichlet character modulo 5 of conductor 1 mapping 2 |--> 1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1,
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -zeta4]
"""
return self._list_from_iterator()
def modulus(self):
"""
Returns the modulus of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.modulus()
20
"""
return self._modulus
def ngens(self):
"""
Returns the number of generators of self.
EXAMPLES::
sage: G = DirichletGroup(20)
sage: G.ngens()
2
"""
return len(self.gens())
@cached_method
def order(self):
"""
Return the number of elements of self. This is the same as
len(self).
EXAMPLES::
sage: DirichletGroup(20).order()
8
sage: DirichletGroup(37).order()
36
"""
ord = rings.Integer(1)
for g in self.gens():
ord *= int(g.order())
return ord
def random_element(self):
"""
Return a random element of self.
The element is computed by multiplying a random power of each
generator together, where the power is between 0 and the order of
the generator minus 1, inclusive.
EXAMPLES::
sage: DirichletGroup(37).random_element()
Dirichlet character modulo 37 of conductor 37 mapping 2 |--> zeta36^4
sage: DirichletGroup(20).random_element()
Dirichlet character modulo 20 of conductor 4 mapping 11 |--> -1, 17 |--> 1
sage: DirichletGroup(60).random_element()
Dirichlet character modulo 60 of conductor 3 mapping 31 |--> 1, 41 |--> -1, 37 |--> 1
"""
e = self(1)
for i in range(self.ngens()):
g = self.gen(i)
n = random.randrange(g.order())
e *= g**n
return e
def unit_gens(self):
r"""
Returns the minimal generators for the units of
`(\ZZ/N\ZZ)^*`, where `N` is the
modulus of self.
EXAMPLES::
sage: DirichletGroup(37).unit_gens()
(2,)
sage: DirichletGroup(20).unit_gens()
(11, 17)
sage: DirichletGroup(60).unit_gens()
(31, 41, 37)
sage: DirichletGroup(20,QQ).unit_gens()
(11, 17)
"""
return self._integers.unit_gens()
@cached_method
def zeta(self):
"""
Return the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(37).zeta()
zeta36
sage: DirichletGroup(20).zeta()
zeta4
sage: DirichletGroup(60).zeta()
zeta4
sage: DirichletGroup(60,QQ).zeta()
-1
sage: DirichletGroup(60, GF(25,'a')).zeta()
2
"""
zeta = self._zeta
if zeta is None:
R = self.base_ring()
e = self._integers.unit_group_exponent()
for d in reversed(e.divisors()):
try:
zeta = R.zeta(d)
break
except ValueError:
pass
self.zeta_order.set_cache(d)
return zeta
@cached_method
def zeta_order(self):
"""
Return the order of the chosen root of unity in the base ring.
EXAMPLES::
sage: DirichletGroup(20).zeta_order()
4
sage: DirichletGroup(60).zeta_order()
4
sage: DirichletGroup(60, GF(25,'a')).zeta_order()
4
sage: DirichletGroup(19).zeta_order()
18
"""
order = self._zeta_order
if order is None:
order = self.zeta().multiplicative_order()
return order
| 34.562563 | 378 | 0.545557 | 97,631 | 0.942521 | 0 | 0 | 21,038 | 0.203099 | 0 | 0 | 75,946 | 0.733176 |
8a5f3213a4f40ad41fa4289061fc1bfb9a560419 | 6,445 | py | Python | src/biotite/file.py | danijoo/biotite | 22072e64676e4e917236eac8493eed4c6a22cc33 | [
"BSD-3-Clause"
] | 208 | 2018-04-20T15:59:42.000Z | 2022-03-22T07:47:12.000Z | src/biotite/file.py | danielmuthama/biotite | cb238a8d8d7dc82b3bcea274d7d91d5c876badcd | [
"BSD-3-Clause"
] | 121 | 2017-11-15T14:52:07.000Z | 2022-03-30T16:31:41.000Z | src/biotite/file.py | danielmuthama/biotite | cb238a8d8d7dc82b3bcea274d7d91d5c876badcd | [
"BSD-3-Clause"
] | 49 | 2018-07-19T09:06:24.000Z | 2022-03-23T17:21:34.000Z | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "Patrick Kunzmann"
__all__ = ["File", "TextFile", "InvalidFileError"]
import abc
import io
import warnings
from .copyable import Copyable
import copy
class File(Copyable, metaclass=abc.ABCMeta):
"""
Base class for all file classes.
The constructor creates an empty file, that can be filled with data
using the class specific setter methods.
Conversely, the class method :func:`read()` reads a file from disk
(or a file-like object from other sources).
In order to write the instance content into a file the
:func:`write()` method is used.
"""
def __init__(self):
# Support for deprecated instance method 'read()':
# When creating an instance, the 'read()' class method is
# replaced by the instance method, so that subsequent
# 'read()' calls are delegated to the instance method
self.read = self._deprecated_read
@classmethod
@abc.abstractmethod
def read(cls, file):
"""
Parse a file (or file-like object).
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : File
An instance from the respective :class:`File` subclass
representing the parsed file.
"""
pass
def _deprecated_read(self, file, *args, **kwargs):
"""
Support for deprecated instance method :func:`read()`.
Internally this calls the :func:`read()` class method and
replaces the data in `self` with the data from the newly created
:class:`File` object
"""
warnings.warn(
"Instance method 'read()' is deprecated, "
"use class method instead",
DeprecationWarning
)
cls = type(self)
new_file = cls.read(file, *args, **kwargs)
self.__dict__.update(new_file.__dict__)
@abc.abstractmethod
def write(self, file):
"""
Write the contents of this :class:`File` object into a file.
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
pass
class TextFile(File, metaclass=abc.ABCMeta):
"""
Base class for all line based text files.
When reading a file, the text content is saved as list of strings,
one for each line.
When writing a file, this list is written into the file.
Attributes
----------
lines : list
List of string representing the lines in the text file.
PROTECTED: Do not modify from outside.
"""
def __init__(self):
super().__init__()
self.lines = []
@classmethod
def read(cls, file, *args, **kwargs):
# File name
if isinstance(file, str):
with open(file, "r") as f:
lines = f.read().splitlines()
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
lines = file.read().splitlines()
file_object = cls(*args, **kwargs)
file_object.lines = lines
return file_object
@staticmethod
def read_iter(file):
"""
Create an iterator over each line of the given text file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Yields
------
line : str
The current line in the file.
"""
# File name
if isinstance(file, str):
with open(file, "r") as f:
while True:
line = f.readline()
if not line:
break
yield line
# File object
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
while True:
line = file.readline()
if not line:
break
yield line
def write(self, file):
"""
Write the contents of this object into a file
(or file-like object).
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""
if isinstance(file, str):
with open(file, "w") as f:
f.write("\n".join(self.lines) + "\n")
else:
if not is_text(file):
raise TypeError("A file opened in 'text' mode is required")
file.write("\n".join(self.lines) + "\n")
def __copy_fill__(self, clone):
super().__copy_fill__(clone)
clone.lines = copy.copy(self.lines)
def __str__(self):
return("\n".join(self.lines))
class InvalidFileError(Exception):
"""
Indicates that the file is not suitable for the requested action,
either because the file does not contain the required data or
because the file is malformed.
"""
pass
def wrap_string(text, width):
"""
A much simpler and hence much more efficient version of
`textwrap.wrap()`.
This function simply wraps the given `text` after `width`
characters, ignoring sentences, whitespaces, etc.
"""
lines = []
for i in range(0, len(text), width):
lines.append(text[i : i+width])
return lines
def is_binary(file):
if isinstance(file, io.BufferedIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.BufferedIOBase):
return True
else:
return False
def is_text(file):
if isinstance(file, io.TextIOBase):
return True
# for file wrappers, e.g. 'TemporaryFile'
elif hasattr(file, "file") and isinstance(file.file, io.TextIOBase):
return True
else:
return False
| 29.162896 | 76 | 0.565244 | 5,214 | 0.808999 | 930 | 0.144298 | 2,266 | 0.35159 | 0 | 0 | 3,504 | 0.543677 |
8a5fa0d5b2ac5b94aac410a26a9a516f09e6dcbd | 4,918 | py | Python | src/cms/views/push_notifications/push_notification_sender.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | null | null | null | src/cms/views/push_notifications/push_notification_sender.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | 5 | 2021-02-10T02:41:20.000Z | 2022-03-12T00:56:56.000Z | src/cms/views/push_notifications/push_notification_sender.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | null | null | null | """
Module for sending Push Notifications
"""
import logging
import requests
from django.conf import settings
from ...models import PushNotificationTranslation
from ...models import Region
from ...constants import push_notifications as pnt_const
logger = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class PushNotificationSender:
"""
Sends push notifications via FCM HTTP API.
Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json
"""
fcm_url = "https://fcm.googleapis.com/fcm/send"
def __init__(self, push_notification):
"""
Load relevant push notification translations and prepare content for sending
:param push_notification: the push notification that should be sent
:type push_notification: ~cms.models.push_notifications.push_notification.PushNotification
"""
self.push_notification = push_notification
self.prepared_pnts = []
self.primary_pnt = PushNotificationTranslation.objects.get(
push_notification=push_notification,
language=push_notification.region.default_language,
)
if len(self.primary_pnt.title) > 0:
self.prepared_pnts.append(self.primary_pnt)
self.load_secondary_pnts()
self.auth_key = self.get_auth_key()
def load_secondary_pnts(self):
"""
Load push notification translations in other languages
"""
secondary_pnts = PushNotificationTranslation.objects.filter(
push_notification=self.push_notification
).exclude(id=self.primary_pnt.id)
for secondary_pnt in secondary_pnts:
if (
secondary_pnt.title == ""
and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode
):
secondary_pnt.title = self.primary_pnt.title
secondary_pnt.text = self.primary_pnt.text
self.prepared_pnts.append(secondary_pnt)
if len(secondary_pnt.title) > 0:
self.prepared_pnts.append(secondary_pnt)
def is_valid(self):
"""
Check if all data for sending push notifications is available
:return: all prepared push notification translations are valid
:rtype: bool
"""
if self.auth_key is None:
return False
for pnt in self.prepared_pnts:
if not pnt.title:
logger.debug("%r has no title", pnt)
return False
return True
@staticmethod
def get_auth_key():
"""
Get FCM API auth key
:return: FCM API auth key
:rtype: str
"""
fcm_auth_config_key = "fcm_auth_key"
auth_key = settings.FCM_KEY
if auth_key.exists():
logger.debug("Got fcm_auth_key from database")
return auth_key.first().value
logger.warning(
"Could not get %r from configuration database", fcm_auth_config_key
)
return None
def send_pn(self, pnt):
"""
Send single push notification translation
:param pnt: the prepared push notification translation to be sent
:type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation
:return: Response of the :mod:`requests` library
:rtype: ~requests.Response
"""
if settings.DEBUG:
region_slug = Region.objects.get(
id=settings.TEST_BLOG_ID
).slug # Testumgebung - prevent sending PNs to actual users in development
else:
region_slug = self.push_notification.region.slug
payload = {
"to": f"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}",
"notification": {"title": pnt.title, "body": pnt.text},
"data": {
"lanCode": pnt.language.slug,
"city": self.push_notification.region.slug,
},
}
headers = {"Authorization": f"key={self.auth_key}"}
return requests.post(self.fcm_url, json=payload, headers=headers)
# pylint: disable=too-many-arguments
def send_all(self):
"""
Send all prepared push notification translations
:return: Success status
:rtype: bool
"""
status = True
for pnt in self.prepared_pnts:
res = self.send_pn(pnt)
if res.status_code == 200:
logger.info("%r sent, FCM id: %r", pnt, res.json()["message_id"])
else:
status = False
logger.warning(
"Received invalid response from FCM for %r, status: %r, body: %r",
pnt,
res.status_code,
res.text,
)
return status
| 34.391608 | 110 | 0.610207 | 4,588 | 0.9329 | 0 | 0 | 491 | 0.099837 | 0 | 0 | 1,867 | 0.379626 |
8a5fbb70b61ec5fc6c7b862f0da3b78b40dc8aa0 | 984 | py | Python | tests/functional/index/create/test_03.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/functional/index/create/test_03.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/functional/index/create/test_03.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: functional.index.create.03
# title: CREATE ASC INDEX
# decription: CREATE ASC INDEX
#
# Dependencies:
# CREATE DATABASE
# CREATE TABLE
# SHOW INDEX
# tracker_id:
# min_versions: []
# versions: 1.0
# qmid: functional.index.create.create_index_03
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 1.0
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE t( a INTEGER);
commit;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """CREATE ASC INDEX test ON t(a);
SHOW INDEX test;"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """TEST INDEX ON T(A)"""
@pytest.mark.version('>=1.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 23.428571 | 70 | 0.654472 | 0 | 0 | 0 | 0 | 183 | 0.185976 | 0 | 0 | 520 | 0.528455 |
8a6044d55abd530531d11b4f87fb12061cb65199 | 370 | py | Python | app/logic/httpcommon/Page.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 10 | 2017-01-13T06:28:04.000Z | 2020-11-18T13:00:26.000Z | app/logic/httpcommon/Page.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | null | null | null | app/logic/httpcommon/Page.py | imvu/bluesteel | ab52133249a693b3cd2d8593c5d47408a3b0fce6 | [
"MIT"
] | 2 | 2018-03-29T14:10:53.000Z | 2019-11-20T08:21:57.000Z | """ Page object file """
class Page():
""" Page object, it contains information about the pare we are refering, index, items per page, etc. """
page_index = 0
items_per_page = 0
def __init__(self, items_per_page, page_index):
""" Creates the page """
self.page_index = int(page_index)
self.items_per_page = int(items_per_page)
| 28.461538 | 108 | 0.648649 | 343 | 0.927027 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.410811 |
8a60852354e6415290eaf2e5371028a21ee46376 | 1,004 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import glob
import re
from os import path as osp
from .market1501 import Market1501
__factory = {
'market1501': Market1501
}
def get_names():
return list(__factory.keys())
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
| 27.888889 | 74 | 0.737052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.593625 |
8a61523d34a63b6c1b5541a6127f60a7a5d5ec7e | 4,684 | py | Python | PyBank/.ipynb_checkpoints/Pymain-checkpoint.py | yash5OG/PythonChallengeW3-Y5 | 4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67 | [
"MIT"
] | null | null | null | PyBank/.ipynb_checkpoints/Pymain-checkpoint.py | yash5OG/PythonChallengeW3-Y5 | 4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67 | [
"MIT"
] | null | null | null | PyBank/.ipynb_checkpoints/Pymain-checkpoint.py | yash5OG/PythonChallengeW3-Y5 | 4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67 | [
"MIT"
] | null | null | null | {
"cells": [
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"# Import libraries\n",
"import os, csv"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [],
"source": [
"#variables for the script\n",
"months = [] #list of months\n",
"pl =[] #list of monthly PL\n",
"pl_changes = [] #list of P&L Changes\n",
"n_months = 0 #count of months\n",
"pl_total = 0 #total of P&L\n",
"plc = 0 #variable to track PL changes\n",
"avg_pl_change = 0 #average of changes in PL\n",
"maxpl = 0 #maximum increase in profits\n",
"minpl = 0 #maximum decrease in losses\n",
"max_i = 0 #index for max pl\n",
"min_i = 0 #index for min pl\n",
"\n",
"#read the resource file\n",
"bankcsv = os.path.join(\".\", \"Resources\", \"budget_data.csv\") #set path\n",
"\n",
"\n",
"#read file\n",
"with open(bankcsv, 'r') as csv_file:\n",
" csv_reader = csv.reader(csv_file,delimiter=\",\")\n",
" header = next(csv_reader)\n",
" \n",
" #for loop to update the counters and lists\n",
" for row in csv_reader:\n",
" n_months += 1\n",
" pl_total += int(row[1])\n",
" pl.append(row[1])\n",
" months.append(row[0])"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"# loop to track the PL change values\n",
"pl_changes = [] \n",
"plc = int(pl[0])\n",
"for i in range(1, len(pl)):\n",
" pl_changes.append(int(pl[i]) - plc)\n",
" plc = int(pl[i])\n",
" i += 1\n",
"#print(pl_changes)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [],
"source": [
"#calculate the average PL Changes, max and min\n",
"avg_pl_change = sum(pl_changes) / len(pl_changes)\n",
"maxpl = max(pl_changes)\n",
"minpl = min(pl_changes)\n",
"#print(avg_pl_change, maxpl, minpl)\n",
"#print(pl_changes.index(maxpl))\n",
"#print(len(pl_changes))"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Financial Analysis\n",
"---------------------------------------------------------------------\n",
"Total Months: 86\n",
"Total: $38382578\n",
"Average Change: $-2315.12\n",
"Greatest Increase in Profits: Feb-2012 ($1926159)\n",
"Greatest Decrease in Profits: Sep-2013 ($-2196167)\n"
]
}
],
"source": [
"#find dates for max and min PL changes\n",
"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated one row above\n",
"min_i = pl_changes.index(minpl) +1\n",
"\n",
"maxmonth = months[max_i]\n",
"minmonth = months[min_i]\n",
"\n",
"#print output to the terminal\n",
"\n",
"print(\"Financial Analysis\")\n",
"print(\"-\"*69)\n",
"print(f\"Total Months: {n_months}\")\n",
"print(f\"Total: ${round(pl_total,2)}\")\n",
"print(f\"Average Change: ${round(avg_pl_change,2)}\")\n",
"print(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\")\n",
"print(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\")\n"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"# write summary to txt file\n",
"output = os.path.join(\".\",\"Analysis\", \"summary.txt\")\n",
"\n",
"# use \"\\n\" to create a new line\n",
"with open(output, 'w') as output:\n",
" output.write(\"Financial Analysis\\n\")\n",
" output.write(\"-\"*69 + \"\\n\")\n",
" output.write(f\"Total Months: {n_months}\\n\")\n",
" output.write(f\"Total: ${round(pl_total,2)}\\n\")\n",
" output.write(f\"Average Change: ${round(avg_pl_change,2)}\\n\")\n",
" output.write(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\n\")\n",
" output.write(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\\n\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 29.093168 | 104 | 0.51281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,674 | 0.784372 |
8a6266df7a1375925ee79de0d3567238f763ecfa | 165 | py | Python | xlib/api/win32/oleaut32/oleaut32.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | xlib/api/win32/oleaut32/oleaut32.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | xlib/api/win32/oleaut32/oleaut32.py | jkennedyvz/DeepFaceLive | 274c20808da089eb7fc0fc0e8abe649379a29ffe | [
"MIT"
] | null | null | null | from ctypes import POINTER, Structure
from ..wintypes import VARIANT, dll_import
@dll_import('OleAut32')
def VariantInit( pvarg : POINTER(VARIANT) ) -> None: ...
| 20.625 | 56 | 0.739394 | 0 | 0 | 0 | 0 | 80 | 0.484848 | 0 | 0 | 10 | 0.060606 |
8a62e622419e3b5175ed6a324e076188b956be4c | 2,313 | py | Python | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import Consumer
from .models import ConsumerAction
from .models import Event
from .models import EventTypeDescriptor
from .models import ExternalConfigurationDescriptor
from .models import FormattedEventMessage
from .models import IdentityRef
from .models import InputDescriptor
from .models import InputFilter
from .models import InputFilterCondition
from .models import InputValidation
from .models import InputValue
from .models import InputValues
from .models import InputValuesError
from .models import InputValuesQuery
from .models import Notification
from .models import NotificationDetails
from .models import NotificationResultsSummaryDetail
from .models import NotificationsQuery
from .models import NotificationSummary
from .models import Publisher
from .models import PublisherEvent
from .models import PublishersQuery
from .models import ReferenceLinks
from .models import ResourceContainer
from .models import SessionToken
from .models import Subscription
from .models import SubscriptionsQuery
from .models import VersionedResource
__all__ = [
'Consumer',
'ConsumerAction',
'Event',
'EventTypeDescriptor',
'ExternalConfigurationDescriptor',
'FormattedEventMessage',
'IdentityRef',
'InputDescriptor',
'InputFilter',
'InputFilterCondition',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'InputValuesQuery',
'Notification',
'NotificationDetails',
'NotificationResultsSummaryDetail',
'NotificationsQuery',
'NotificationSummary',
'Publisher',
'PublisherEvent',
'PublishersQuery',
'ReferenceLinks',
'ResourceContainer',
'SessionToken',
'Subscription',
'SubscriptionsQuery',
'VersionedResource',
]
| 33.042857 | 94 | 0.685257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.45655 |
8a643abfeb244244f7979e846782a5b379e9f35e | 935 | py | Python | pizdyuk/pzd_logging.py | DeathAdder1999/Pizdyuk | 3fd7c71508c79b36e3cc801d78cd1a87eee5aa0b | [
"Apache-2.0"
] | 1 | 2021-05-06T20:23:08.000Z | 2021-05-06T20:23:08.000Z | pizdyuk/pzd_logging.py | aufdnb/Pizdyuk | 75096ffa54df831eb05360d7b39f49000d466f80 | [
"Apache-2.0"
] | null | null | null | pizdyuk/pzd_logging.py | aufdnb/Pizdyuk | 75096ffa54df831eb05360d7b39f49000d466f80 | [
"Apache-2.0"
] | null | null | null | import datetime as date
from pzd_utils import datetime_to_str
class PizdyukLogger:
__logger = None
def __init__(self):
global __logger
if self.__logger:
raise RuntimeError("Logger instance already exists")
@staticmethod
def get_logger():
global __logger
if not PizdyukLogger._PizdyukLogger__logger:
PizdyukLogger._PizdyukLogger__logger = PizdyukLogger()
return PizdyukLogger._PizdyukLogger__logger
def log_info(self, msg):
self.__log(msg, "INFO")
def log_warning(self, warning):
self.__log(warning, "WARNING")
def log_error(self, error):
self.__log(error, "ERROR")
def log_fatal(self, fatal):
self.__log(fatal, "FATAL")
def __log(self, msg, lvl):
date_str = datetime_to_str(date.datetime.now())
log = "[{0}] [{1}] {2}".format(lvl, date_str, msg)
print(log)
| 25.27027 | 66 | 0.632086 | 863 | 0.922995 | 0 | 0 | 232 | 0.248128 | 0 | 0 | 78 | 0.083422 |
8a64487109643353c0e84bbee6dfb1cf09044927 | 834 | py | Python | beta_reconstruction/crystal_relations.py | LightForm-group/beta-reconstruction | 67584f75ee08690226595c5f9dc75dfd164a11a0 | [
"MIT"
] | null | null | null | beta_reconstruction/crystal_relations.py | LightForm-group/beta-reconstruction | 67584f75ee08690226595c5f9dc75dfd164a11a0 | [
"MIT"
] | 1 | 2020-01-07T12:41:26.000Z | 2020-01-07T12:50:40.000Z | beta_reconstruction/crystal_relations.py | LightForm-group/beta-reconstruction | 67584f75ee08690226595c5f9dc75dfd164a11a0 | [
"MIT"
] | null | null | null | import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
| 22.540541 | 71 | 0.689448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.285372 |
8a64819227bba93979e4413095e01b50e7c00dec | 13 | py | Python | a2.py | Changhong-Jiang/test | b907b984cbd9703711f52c9f497cf36b5b4e8752 | [
"MIT"
] | null | null | null | a2.py | Changhong-Jiang/test | b907b984cbd9703711f52c9f497cf36b5b4e8752 | [
"MIT"
] | 1 | 2020-02-28T08:15:58.000Z | 2020-02-28T08:16:41.000Z | a2.py | Changhong-Jiang/test | b907b984cbd9703711f52c9f497cf36b5b4e8752 | [
"MIT"
] | null | null | null | print('222')
| 6.5 | 12 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.384615 |
8a65b0ad04c9a2a75abc3c11ac9fc679788fe298 | 4,318 | py | Python | app/api/v1/views/auth_views.py | emdeechege/Questionaire-API | 1dd05dc25f96ea8ecdce82fe28449ea684991251 | [
"MIT"
] | null | null | null | app/api/v1/views/auth_views.py | emdeechege/Questionaire-API | 1dd05dc25f96ea8ecdce82fe28449ea684991251 | [
"MIT"
] | 20 | 2019-01-08T19:16:45.000Z | 2019-08-22T10:21:43.000Z | app/api/v1/views/auth_views.py | emdeechege/Questioner-API | 1dd05dc25f96ea8ecdce82fe28449ea684991251 | [
"MIT"
] | 1 | 2019-01-11T10:13:42.000Z | 2019-01-11T10:13:42.000Z | from flask import jsonify, Blueprint, request, json, make_response
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from ..utils.validators import Validation
from ..models.auth_models import Users
v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1')
USER = Users()
VALIDATOR = Validation()
@v1_auth_blueprint.route('/signup', methods=['POST'])
def signup():
"""View that controls creation of new users"""
try:
data = request.get_json()
except:
return jsonify({
"status": 400,
"message": "Invalid input"
}), 400
firstname = data.get('firstname')
lastname = data.get('lastname')
othername = data.get('othername')
email = data.get('email')
phone_number = data.get('phone_number')
username = data.get('username')
is_admin = data.get('is_admin')
password = data.get('password')
if not firstname or not firstname.split():
return make_response(jsonify({
"status": 400,
"message": "Firstname is required"
})), 400
if not lastname or not lastname.split():
return make_response(jsonify({
"status": 400,
"message": "Lastname is required"
})), 400
if not email or not email.split():
return make_response(jsonify({
"status": 400,
"message": "Email is required"
})), 400
if not phone_number:
return make_response(jsonify({
"status": 400,
"message": "Phone number is required"
})), 400
if not username or not username.split():
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password or not password.split():
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.validate_phone_number(phone_number):
return jsonify({
"status": 400,
"message": "Please input valid phone number"
}), 400
if VALIDATOR.validate_password(password):
return jsonify({
"status": 400,
"message": "Password not valid"
}), 400
if not VALIDATOR.validate_email(email):
return jsonify({
"status": 400,
"message": "Invalid email"
}), 400
if VALIDATOR.username_exists(username):
return jsonify({
"status": 400,
"message": "Username exists"
}), 400
if VALIDATOR.email_exists(email):
return jsonify({
"status": 400,
"message": "Email exists"
}), 400
password = generate_password_hash(
password, method='pbkdf2:sha256', salt_length=8)
res = USER.signup(
firstname, lastname, othername, email, phone_number, username, is_admin, password)
return jsonify({
"status": 201,
"data": [{
"firstname": firstname,
"lastname": lastname,
"othername": othername,
"email": email,
"phone_number": phone_number,
"username": username,
"is_admin": is_admin
}]
}), 201
@v1_auth_blueprint.route('/login', methods=['POST'])
def login():
""" A view to control users login """
try:
data = request.get_json()
except:
return make_response(jsonify({
"status": 400,
"message": "Wrong input"
})), 400
username = data.get('username')
password = data.get('password')
if not username:
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password:
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.username_exists(username):
return jsonify({
"status": 404,
"message": "User does not exist"
}), 404
auth_token = user.generate_auth_token(username)
return make_response(jsonify({
"status": 200,
"message": 'Logged in successfuly',
"token": auth_token
})), 200
| 28.596026 | 90 | 0.568782 | 0 | 0 | 0 | 0 | 3,947 | 0.914081 | 0 | 0 | 977 | 0.226262 |
8a66a4e65b6c15a92cb15d2436631fabac501551 | 4,314 | py | Python | pint/testsuite/test_definitions.py | s-avni/pint | 4e33d44437991bf7c5e30977643f42ebd6ed40da | [
"BSD-3-Clause"
] | null | null | null | pint/testsuite/test_definitions.py | s-avni/pint | 4e33d44437991bf7c5e30977643f42ebd6ed40da | [
"BSD-3-Clause"
] | null | null | null | pint/testsuite/test_definitions.py | s-avni/pint | 4e33d44437991bf7c5e30977643f42ebd6ed40da | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pint.util import (UnitsContainer)
from pint.converters import (ScaleConverter, OffsetConverter)
from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,
DimensionDefinition, AliasDefinition)
from pint.testsuite import BaseTestCase
class TestDefinition(BaseTestCase):
def test_invalid(self):
self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')
self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')
def test_prefix_definition(self):
for definition in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):
x = Definition.from_string(definition)
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'm')
self.assertEqual(x.aliases, ())
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(0.001), 1)
self.assertEqual(str(x), 'm')
x = Definition.from_string('kilo- = 1e-3 = k-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ())
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ('anotherk', ))
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
def test_baseunit_definition(self):
x = Definition.from_string('meter = [length]')
self.assertIsInstance(x, UnitDefinition)
self.assertTrue(x.is_base)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))
def test_unit_definition(self):
x = Definition.from_string('coulomb = ampere * second')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 1)
self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))
x = Definition.from_string('faraday = 96485.3399 * coulomb')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 96485.3399)
self.assertEqual(x.reference, UnitsContainer(coulomb=1))
x = Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, OffsetConverter)
self.assertEqual(x.converter.scale, 9/5)
self.assertEqual(x.converter.offset, 255.372222)
self.assertEqual(x.reference, UnitsContainer(kelvin=1))
x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _')
self.assertIsInstance(x, UnitDefinition)
self.assertEqual(x.name, 'turn')
self.assertEqual(x.aliases, ('revolution', 'cycle'))
self.assertEqual(x.symbol, 'turn')
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 6.28)
self.assertEqual(x.reference, UnitsContainer(radian=1))
def test_dimension_definition(self):
x = DimensionDefinition('[time]', '', (), converter='')
self.assertTrue(x.is_base)
self.assertEqual(x.name, '[time]')
x = Definition.from_string('[speed] = [length]/[time]')
self.assertIsInstance(x, DimensionDefinition)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))
def test_alias_definition(self):
x = Definition.from_string("@alias meter = metro = metr")
self.assertIsInstance(x, AliasDefinition)
self.assertEqual(x.name, "meter")
self.assertEqual(x.aliases, ("metro", "metr"))
| 44.474227 | 89 | 0.660176 | 3,917 | 0.907974 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.119147 |
8a678b6dfe1f80688ee851169cd059181b03b309 | 5,922 | py | Python | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 5,905 | 2015-01-02T17:05:36.000Z | 2022-03-29T07:28:29.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 6,097 | 2015-01-01T21:20:25.000Z | 2022-03-31T23:55:01.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 2,202 | 2015-01-02T18:31:25.000Z | 2022-03-28T15:35:03.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import dns
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from .logging import get_logger
_logger = get_logger(__name__)
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def _check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def _get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = _check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = _get_and_validate(ns, url, rtype)
validated = True
except Exception as e:
_logger.info(f"DNSSEC error: {repr(e)}")
out = dns.resolver.resolve(url, rtype)
validated = False
return out, validated
| 39.218543 | 418 | 0.700777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,763 | 0.466565 |
8a681bd50a01e317584f76158f59adbe05396fb6 | 61,870 | py | Python | specs/d3d11.py | ds-hwang/apitrace | b74347ebae0d033a013c4de3efb0e9165e9cea8f | [
"MIT"
] | 1 | 2017-06-07T15:28:36.000Z | 2017-06-07T15:28:36.000Z | specs/d3d11.py | jciehl/apitrace | 0e01acc36de14e9ca7c0ced258767ffb99ac96ea | [
"MIT"
] | null | null | null | specs/d3d11.py | jciehl/apitrace | 0e01acc36de14e9ca7c0ced258767ffb99ac96ea | [
"MIT"
] | 1 | 2021-05-21T18:27:29.000Z | 2021-05-21T18:27:29.000Z | ##########################################################################
#
# Copyright 2012 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from dxgi import *
from d3dcommon import *
from d3d11sdklayers import *
HRESULT = MAKE_HRESULT([
"D3D11_ERROR_FILE_NOT_FOUND",
"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS",
"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS",
"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD",
"D3DERR_INVALIDCALL",
"D3DERR_WASSTILLDRAWING",
])
ID3D11DepthStencilState = Interface("ID3D11DepthStencilState", ID3D11DeviceChild)
ID3D11BlendState = Interface("ID3D11BlendState", ID3D11DeviceChild)
ID3D11RasterizerState = Interface("ID3D11RasterizerState", ID3D11DeviceChild)
ID3D11Resource = Interface("ID3D11Resource", ID3D11DeviceChild)
ID3D11Buffer = Interface("ID3D11Buffer", ID3D11Resource)
ID3D11Texture1D = Interface("ID3D11Texture1D", ID3D11Resource)
ID3D11Texture2D = Interface("ID3D11Texture2D", ID3D11Resource)
ID3D11Texture3D = Interface("ID3D11Texture3D", ID3D11Resource)
ID3D11View = Interface("ID3D11View", ID3D11DeviceChild)
ID3D11ShaderResourceView = Interface("ID3D11ShaderResourceView", ID3D11View)
ID3D11RenderTargetView = Interface("ID3D11RenderTargetView", ID3D11View)
ID3D11DepthStencilView = Interface("ID3D11DepthStencilView", ID3D11View)
ID3D11UnorderedAccessView = Interface("ID3D11UnorderedAccessView", ID3D11View)
ID3D11VertexShader = Interface("ID3D11VertexShader", ID3D11DeviceChild)
ID3D11HullShader = Interface("ID3D11HullShader", ID3D11DeviceChild)
ID3D11DomainShader = Interface("ID3D11DomainShader", ID3D11DeviceChild)
ID3D11GeometryShader = Interface("ID3D11GeometryShader", ID3D11DeviceChild)
ID3D11PixelShader = Interface("ID3D11PixelShader", ID3D11DeviceChild)
ID3D11ComputeShader = Interface("ID3D11ComputeShader", ID3D11DeviceChild)
ID3D11InputLayout = Interface("ID3D11InputLayout", ID3D11DeviceChild)
ID3D11SamplerState = Interface("ID3D11SamplerState", ID3D11DeviceChild)
ID3D11Asynchronous = Interface("ID3D11Asynchronous", ID3D11DeviceChild)
ID3D11Query = Interface("ID3D11Query", ID3D11Asynchronous)
ID3D11Predicate = Interface("ID3D11Predicate", ID3D11Query)
ID3D11Counter = Interface("ID3D11Counter", ID3D11Asynchronous)
ID3D11ClassInstance = Interface("ID3D11ClassInstance", ID3D11DeviceChild)
ID3D11ClassLinkage = Interface("ID3D11ClassLinkage", ID3D11DeviceChild)
ID3D11CommandList = Interface("ID3D11CommandList", ID3D11DeviceChild)
ID3D11Device = Interface("ID3D11Device", IUnknown)
D3D11_INPUT_CLASSIFICATION = Enum("D3D11_INPUT_CLASSIFICATION", [
"D3D11_INPUT_PER_VERTEX_DATA",
"D3D11_INPUT_PER_INSTANCE_DATA",
])
D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [
"D3D11_APPEND_ALIGNED_ELEMENT",
])
D3D11_INPUT_ELEMENT_DESC = Struct("D3D11_INPUT_ELEMENT_DESC", [
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(DXGI_FORMAT, "Format"),
(UINT, "InputSlot"),
(D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, "AlignedByteOffset"),
(D3D11_INPUT_CLASSIFICATION, "InputSlotClass"),
(UINT, "InstanceDataStepRate"),
])
D3D11_FILL_MODE = Enum("D3D11_FILL_MODE", [
"D3D11_FILL_WIREFRAME",
"D3D11_FILL_SOLID",
])
D3D11_PRIMITIVE_TOPOLOGY = Enum("D3D11_PRIMITIVE_TOPOLOGY", [
"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED",
"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST",
])
D3D11_PRIMITIVE = Enum("D3D11_PRIMITIVE", [
"D3D11_PRIMITIVE_UNDEFINED",
"D3D11_PRIMITIVE_POINT",
"D3D11_PRIMITIVE_LINE",
"D3D11_PRIMITIVE_TRIANGLE",
"D3D11_PRIMITIVE_LINE_ADJ",
"D3D11_PRIMITIVE_TRIANGLE_ADJ",
"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH",
])
D3D11_CULL_MODE = Enum("D3D11_CULL_MODE", [
"D3D11_CULL_NONE",
"D3D11_CULL_FRONT",
"D3D11_CULL_BACK",
])
D3D11_SO_DECLARATION_ENTRY = Struct("D3D11_SO_DECLARATION_ENTRY", [
(UINT, "Stream"),
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(BYTE, "StartComponent"),
(BYTE, "ComponentCount"),
(BYTE, "OutputSlot"),
])
D3D11_VIEWPORT = Struct("D3D11_VIEWPORT", [
(FLOAT, "TopLeftX"),
(FLOAT, "TopLeftY"),
(FLOAT, "Width"),
(FLOAT, "Height"),
(FLOAT, "MinDepth"),
(FLOAT, "MaxDepth"),
])
D3D11_RESOURCE_DIMENSION = Enum("D3D11_RESOURCE_DIMENSION", [
"D3D11_RESOURCE_DIMENSION_UNKNOWN",
"D3D11_RESOURCE_DIMENSION_BUFFER",
"D3D11_RESOURCE_DIMENSION_TEXTURE1D",
"D3D11_RESOURCE_DIMENSION_TEXTURE2D",
"D3D11_RESOURCE_DIMENSION_TEXTURE3D",
])
D3D11_SRV_DIMENSION = Enum("D3D11_SRV_DIMENSION", [
"D3D11_SRV_DIMENSION_UNKNOWN",
"D3D11_SRV_DIMENSION_BUFFER",
"D3D11_SRV_DIMENSION_TEXTURE1D",
"D3D11_SRV_DIMENSION_TEXTURE1DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2D",
"D3D11_SRV_DIMENSION_TEXTURE2DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2DMS",
"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_SRV_DIMENSION_TEXTURE3D",
"D3D11_SRV_DIMENSION_TEXTURECUBE",
"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY",
"D3D11_SRV_DIMENSION_BUFFEREX",
])
D3D11_DSV_DIMENSION = Enum("D3D11_DSV_DIMENSION", [
"D3D11_DSV_DIMENSION_UNKNOWN",
"D3D11_DSV_DIMENSION_TEXTURE1D",
"D3D11_DSV_DIMENSION_TEXTURE1DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2D",
"D3D11_DSV_DIMENSION_TEXTURE2DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2DMS",
"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY",
])
D3D11_RTV_DIMENSION = Enum("D3D11_RTV_DIMENSION", [
"D3D11_RTV_DIMENSION_UNKNOWN",
"D3D11_RTV_DIMENSION_BUFFER",
"D3D11_RTV_DIMENSION_TEXTURE1D",
"D3D11_RTV_DIMENSION_TEXTURE1DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2D",
"D3D11_RTV_DIMENSION_TEXTURE2DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2DMS",
"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_RTV_DIMENSION_TEXTURE3D",
])
D3D11_UAV_DIMENSION = Enum("D3D11_UAV_DIMENSION", [
"D3D11_UAV_DIMENSION_UNKNOWN",
"D3D11_UAV_DIMENSION_BUFFER",
"D3D11_UAV_DIMENSION_TEXTURE1D",
"D3D11_UAV_DIMENSION_TEXTURE1DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE2D",
"D3D11_UAV_DIMENSION_TEXTURE2DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE3D",
])
D3D11_USAGE = Enum("D3D11_USAGE", [
"D3D11_USAGE_DEFAULT",
"D3D11_USAGE_IMMUTABLE",
"D3D11_USAGE_DYNAMIC",
"D3D11_USAGE_STAGING",
])
D3D11_BIND_FLAG = Flags(UINT, [
"D3D11_BIND_VERTEX_BUFFER",
"D3D11_BIND_INDEX_BUFFER",
"D3D11_BIND_CONSTANT_BUFFER",
"D3D11_BIND_SHADER_RESOURCE",
"D3D11_BIND_STREAM_OUTPUT",
"D3D11_BIND_RENDER_TARGET",
"D3D11_BIND_DEPTH_STENCIL",
"D3D11_BIND_UNORDERED_ACCESS",
])
D3D11_CPU_ACCESS_FLAG = Flags(UINT, [
"D3D11_CPU_ACCESS_WRITE",
"D3D11_CPU_ACCESS_READ",
])
D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [
"D3D11_RESOURCE_MISC_GENERATE_MIPS",
"D3D11_RESOURCE_MISC_SHARED",
"D3D11_RESOURCE_MISC_TEXTURECUBE",
"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS",
"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS",
"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED",
"D3D11_RESOURCE_MISC_RESOURCE_CLAMP",
"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX",
"D3D11_RESOURCE_MISC_GDI_COMPATIBLE",
])
D3D11_MAP = Enum("D3D11_MAP", [
"D3D11_MAP_READ",
"D3D11_MAP_WRITE",
"D3D11_MAP_READ_WRITE",
"D3D11_MAP_WRITE_DISCARD",
"D3D11_MAP_WRITE_NO_OVERWRITE",
])
D3D11_MAP_FLAG = Flags(UINT, [
"D3D11_MAP_FLAG_DO_NOT_WAIT",
])
D3D11_RAISE_FLAG = Flags(UINT, [
"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR",
])
D3D11_CLEAR_FLAG = Flags(UINT, [
"D3D11_CLEAR_DEPTH",
"D3D11_CLEAR_STENCIL",
])
D3D11_RECT = Alias("D3D11_RECT", RECT)
D3D11_BOX = Struct("D3D11_BOX", [
(UINT, "left"),
(UINT, "top"),
(UINT, "front"),
(UINT, "right"),
(UINT, "bottom"),
(UINT, "back"),
])
ID3D11DeviceChild.methods += [
StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
]
D3D11_COMPARISON_FUNC = Enum("D3D11_COMPARISON_FUNC", [
"D3D11_COMPARISON_NEVER",
"D3D11_COMPARISON_LESS",
"D3D11_COMPARISON_EQUAL",
"D3D11_COMPARISON_LESS_EQUAL",
"D3D11_COMPARISON_GREATER",
"D3D11_COMPARISON_NOT_EQUAL",
"D3D11_COMPARISON_GREATER_EQUAL",
"D3D11_COMPARISON_ALWAYS",
])
D3D11_DEPTH_WRITE_MASK = Enum("D3D11_DEPTH_WRITE_MASK", [
"D3D11_DEPTH_WRITE_MASK_ZERO",
"D3D11_DEPTH_WRITE_MASK_ALL",
])
D3D11_STENCIL_OP = Enum("D3D11_STENCIL_OP", [
"D3D11_STENCIL_OP_KEEP",
"D3D11_STENCIL_OP_ZERO",
"D3D11_STENCIL_OP_REPLACE",
"D3D11_STENCIL_OP_INCR_SAT",
"D3D11_STENCIL_OP_DECR_SAT",
"D3D11_STENCIL_OP_INVERT",
"D3D11_STENCIL_OP_INCR",
"D3D11_STENCIL_OP_DECR",
])
D3D11_DEPTH_STENCILOP_DESC = Struct("D3D11_DEPTH_STENCILOP_DESC", [
(D3D11_STENCIL_OP, "StencilFailOp"),
(D3D11_STENCIL_OP, "StencilDepthFailOp"),
(D3D11_STENCIL_OP, "StencilPassOp"),
(D3D11_COMPARISON_FUNC, "StencilFunc"),
])
D3D11_DEPTH_STENCIL_DESC = Struct("D3D11_DEPTH_STENCIL_DESC", [
(BOOL, "DepthEnable"),
(D3D11_DEPTH_WRITE_MASK, "DepthWriteMask"),
(D3D11_COMPARISON_FUNC, "DepthFunc"),
(BOOL, "StencilEnable"),
(UINT8, "StencilReadMask"),
(UINT8, "StencilWriteMask"),
(D3D11_DEPTH_STENCILOP_DESC, "FrontFace"),
(D3D11_DEPTH_STENCILOP_DESC, "BackFace"),
])
ID3D11DepthStencilState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), "pDesc")]),
]
D3D11_BLEND = Enum("D3D11_BLEND", [
"D3D11_BLEND_ZERO",
"D3D11_BLEND_ONE",
"D3D11_BLEND_SRC_COLOR",
"D3D11_BLEND_INV_SRC_COLOR",
"D3D11_BLEND_SRC_ALPHA",
"D3D11_BLEND_INV_SRC_ALPHA",
"D3D11_BLEND_DEST_ALPHA",
"D3D11_BLEND_INV_DEST_ALPHA",
"D3D11_BLEND_DEST_COLOR",
"D3D11_BLEND_INV_DEST_COLOR",
"D3D11_BLEND_SRC_ALPHA_SAT",
"D3D11_BLEND_BLEND_FACTOR",
"D3D11_BLEND_INV_BLEND_FACTOR",
"D3D11_BLEND_SRC1_COLOR",
"D3D11_BLEND_INV_SRC1_COLOR",
"D3D11_BLEND_SRC1_ALPHA",
"D3D11_BLEND_INV_SRC1_ALPHA",
])
D3D11_BLEND_OP = Enum("D3D11_BLEND_OP", [
"D3D11_BLEND_OP_ADD",
"D3D11_BLEND_OP_SUBTRACT",
"D3D11_BLEND_OP_REV_SUBTRACT",
"D3D11_BLEND_OP_MIN",
"D3D11_BLEND_OP_MAX",
])
D3D11_COLOR_WRITE_ENABLE = Enum("D3D11_COLOR_WRITE_ENABLE", [
"D3D11_COLOR_WRITE_ENABLE_ALL",
"D3D11_COLOR_WRITE_ENABLE_RED",
"D3D11_COLOR_WRITE_ENABLE_GREEN",
"D3D11_COLOR_WRITE_ENABLE_BLUE",
"D3D11_COLOR_WRITE_ENABLE_ALPHA",
])
D3D11_RENDER_TARGET_BLEND_DESC = Struct("D3D11_RENDER_TARGET_BLEND_DESC", [
(BOOL, "BlendEnable"),
(D3D11_BLEND, "SrcBlend"),
(D3D11_BLEND, "DestBlend"),
(D3D11_BLEND_OP, "BlendOp"),
(D3D11_BLEND, "SrcBlendAlpha"),
(D3D11_BLEND, "DestBlendAlpha"),
(D3D11_BLEND_OP, "BlendOpAlpha"),
(UINT8, "RenderTargetWriteMask"),
])
D3D11_BLEND_DESC = Struct("D3D11_BLEND_DESC", [
(BOOL, "AlphaToCoverageEnable"),
(BOOL, "IndependentBlendEnable"),
(Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), "RenderTarget"),
])
ID3D11BlendState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BLEND_DESC), "pDesc")]),
]
D3D11_RASTERIZER_DESC = Struct("D3D11_RASTERIZER_DESC", [
(D3D11_FILL_MODE, "FillMode"),
(D3D11_CULL_MODE, "CullMode"),
(BOOL, "FrontCounterClockwise"),
(INT, "DepthBias"),
(FLOAT, "DepthBiasClamp"),
(FLOAT, "SlopeScaledDepthBias"),
(BOOL, "DepthClipEnable"),
(BOOL, "ScissorEnable"),
(BOOL, "MultisampleEnable"),
(BOOL, "AntialiasedLineEnable"),
])
ID3D11RasterizerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RASTERIZER_DESC), "pDesc")]),
]
D3D11_SUBRESOURCE_DATA = Struct("D3D11_SUBRESOURCE_DATA", [
(OpaquePointer(Const(Void)), "pSysMem"),
(UINT, "SysMemPitch"),
(UINT, "SysMemSlicePitch"),
])
D3D11_MAPPED_SUBRESOURCE = Struct("D3D11_MAPPED_SUBRESOURCE", [
(OpaquePointer(Void), "pData"),
(UINT, "RowPitch"),
(UINT, "DepthPitch"),
])
ID3D11Resource.methods += [
StdMethod(Void, "GetType", [Out(Pointer(D3D11_RESOURCE_DIMENSION), "pResourceDimension")]),
StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]),
StdMethod(UINT, "GetEvictionPriority", []),
]
D3D11_BUFFER_DESC = Struct("D3D11_BUFFER_DESC", [
(UINT, "ByteWidth"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
(UINT, "StructureByteStride"),
])
ID3D11Buffer.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BUFFER_DESC), "pDesc")]),
]
D3D11_TEXTURE1D_DESC = Struct("D3D11_TEXTURE1D_DESC", [
(UINT, "Width"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture1D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE1D_DESC), "pDesc")]),
]
D3D11_TEXTURE2D_DESC = Struct("D3D11_TEXTURE2D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture2D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE2D_DESC), "pDesc")]),
]
D3D11_TEXTURE3D_DESC = Struct("D3D11_TEXTURE3D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
(UINT, "MipLevels"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture3D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE3D_DESC), "pDesc")]),
]
D3D11_TEXTURECUBE_FACE = Enum("D3D11_TEXTURECUBE_FACE", [
"D3D11_TEXTURECUBE_FACE_POSITIVE_X",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_X",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Y",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Z",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z",
])
ID3D11View.methods += [
StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D11Resource)), "ppResource")]),
]
D3D11_BUFFER_SRV = Struct("D3D11_BUFFER_SRV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [
"D3D11_BUFFEREX_SRV_FLAG_RAW",
])
D3D11_BUFFEREX_SRV = Struct("D3D11_BUFFEREX_SRV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFEREX_SRV_FLAG, "Flags"),
])
D3D11_TEX1D_SRV = Struct("D3D11_TEX1D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX1D_ARRAY_SRV = Struct("D3D11_TEX1D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_SRV = Struct("D3D11_TEX2D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX2D_ARRAY_SRV = Struct("D3D11_TEX2D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_SRV = Struct("D3D11_TEX3D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_SRV = Struct("D3D11_TEXCUBE_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_ARRAY_SRV = Struct("D3D11_TEXCUBE_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "First2DArrayFace"),
(UINT, "NumCubes"),
])
D3D11_TEX2DMS_SRV = Struct("D3D11_TEX2DMS_SRV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_SRV = Struct("D3D11_TEX2DMS_ARRAY_SRV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_SHADER_RESOURCE_VIEW_DESC = Struct("D3D11_SHADER_RESOURCE_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_SRV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_SRV, "Buffer"),
(D3D11_TEX1D_SRV, "Texture1D"),
(D3D11_TEX1D_ARRAY_SRV, "Texture1DArray"),
(D3D11_TEX2D_SRV, "Texture2D"),
(D3D11_TEX2D_ARRAY_SRV, "Texture2DArray"),
(D3D11_TEX2DMS_SRV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"),
(D3D11_TEX3D_SRV, "Texture3D"),
(D3D11_TEXCUBE_SRV, "TextureCube"),
(D3D11_TEXCUBE_ARRAY_SRV, "TextureCubeArray"),
(D3D11_BUFFEREX_SRV, "BufferEx"),
]), None),
])
ID3D11ShaderResourceView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_RTV = Struct("D3D11_BUFFER_RTV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_TEX1D_RTV = Struct("D3D11_TEX1D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_RTV = Struct("D3D11_TEX1D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_RTV = Struct("D3D11_TEX2D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX2DMS_RTV = Struct("D3D11_TEX2DMS_RTV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2D_ARRAY_RTV = Struct("D3D11_TEX2D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_ARRAY_RTV = Struct("D3D11_TEX2DMS_ARRAY_RTV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_RTV = Struct("D3D11_TEX3D_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_RENDER_TARGET_VIEW_DESC = Struct("D3D11_RENDER_TARGET_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_RTV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_RTV, "Buffer"),
(D3D11_TEX1D_RTV, "Texture1D"),
(D3D11_TEX1D_ARRAY_RTV, "Texture1DArray"),
(D3D11_TEX2D_RTV, "Texture2D"),
(D3D11_TEX2D_ARRAY_RTV, "Texture2DArray"),
(D3D11_TEX2DMS_RTV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"),
(D3D11_TEX3D_RTV, "Texture3D"),
]), None),
])
ID3D11RenderTargetView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), "pDesc")]),
]
D3D11_TEX1D_DSV = Struct("D3D11_TEX1D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_DSV = Struct("D3D11_TEX1D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_DSV = Struct("D3D11_TEX2D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_DSV = Struct("D3D11_TEX2D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_DSV = Struct("D3D11_TEX2DMS_DSV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_DSV = Struct("D3D11_TEX2DMS_ARRAY_DSV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_DSV_FLAG = Flags(UINT, [
"D3D11_DSV_READ_ONLY_DEPTH",
"D3D11_DSV_READ_ONLY_STENCIL",
])
D3D11_DEPTH_STENCIL_VIEW_DESC = Struct("D3D11_DEPTH_STENCIL_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_DSV_DIMENSION, "ViewDimension"),
(D3D11_DSV_FLAG, "Flags"),
(Union(None, [
(D3D11_TEX1D_DSV, "Texture1D"),
(D3D11_TEX1D_ARRAY_DSV, "Texture1DArray"),
(D3D11_TEX2D_DSV, "Texture2D"),
(D3D11_TEX2D_ARRAY_DSV, "Texture2DArray"),
(D3D11_TEX2DMS_DSV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"),
]), None),
])
ID3D11DepthStencilView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_UAV_FLAG = Flags(UINT, [
"D3D11_BUFFER_UAV_FLAG_RAW",
"D3D11_BUFFER_UAV_FLAG_APPEND",
"D3D11_BUFFER_UAV_FLAG_COUNTER",
])
D3D11_BUFFER_UAV = Struct("D3D11_BUFFER_UAV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFER_UAV_FLAG, "Flags"),
])
D3D11_TEX1D_UAV = Struct("D3D11_TEX1D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_UAV = Struct("D3D11_TEX1D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_UAV = Struct("D3D11_TEX2D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_UAV = Struct("D3D11_TEX2D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_UAV = Struct("D3D11_TEX3D_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct("D3D11_UNORDERED_ACCESS_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_UAV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_UAV, "Buffer"),
(D3D11_TEX1D_UAV, "Texture1D"),
(D3D11_TEX1D_ARRAY_UAV, "Texture1DArray"),
(D3D11_TEX2D_UAV, "Texture2D"),
(D3D11_TEX2D_ARRAY_UAV, "Texture2DArray"),
(D3D11_TEX3D_UAV, "Texture3D"),
]), None),
])
ID3D11UnorderedAccessView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), "pDesc")]),
]
D3D11_FILTER = Enum("D3D11_FILTER", [
"D3D11_FILTER_MIN_MAG_MIP_POINT",
"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_ANISOTROPIC",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_ANISOTROPIC",
])
D3D11_FILTER_TYPE = Enum("D3D11_FILTER_TYPE", [
"D3D11_FILTER_TYPE_POINT",
"D3D11_FILTER_TYPE_LINEAR",
])
D3D11_TEXTURE_ADDRESS_MODE = Enum("D3D11_TEXTURE_ADDRESS_MODE", [
"D3D11_TEXTURE_ADDRESS_WRAP",
"D3D11_TEXTURE_ADDRESS_MIRROR",
"D3D11_TEXTURE_ADDRESS_CLAMP",
"D3D11_TEXTURE_ADDRESS_BORDER",
"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE",
])
D3D11_SAMPLER_DESC = Struct("D3D11_SAMPLER_DESC", [
(D3D11_FILTER, "Filter"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressU"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressV"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressW"),
(FLOAT, "MipLODBias"),
(UINT, "MaxAnisotropy"),
(D3D11_COMPARISON_FUNC, "ComparisonFunc"),
(Array(FLOAT, 4), "BorderColor"),
(FLOAT, "MinLOD"),
(FLOAT, "MaxLOD"),
])
ID3D11SamplerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SAMPLER_DESC), "pDesc")]),
]
D3D11_FORMAT_SUPPORT = Flags(UINT, [
"D3D11_FORMAT_SUPPORT_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER",
"D3D11_FORMAT_SUPPORT_SO_BUFFER",
"D3D11_FORMAT_SUPPORT_TEXTURE1D",
"D3D11_FORMAT_SUPPORT_TEXTURE2D",
"D3D11_FORMAT_SUPPORT_TEXTURE3D",
"D3D11_FORMAT_SUPPORT_TEXTURECUBE",
"D3D11_FORMAT_SUPPORT_SHADER_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT",
"D3D11_FORMAT_SUPPORT_MIP",
"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN",
"D3D11_FORMAT_SUPPORT_RENDER_TARGET",
"D3D11_FORMAT_SUPPORT_BLENDABLE",
"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL",
"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE",
"D3D11_FORMAT_SUPPORT_DISPLAY",
"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER",
"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST",
"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON",
])
D3D11_FORMAT_SUPPORT2 = Enum("D3D11_FORMAT_SUPPORT2", [
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE",
])
ID3D11Asynchronous.methods += [
StdMethod(UINT, "GetDataSize", []),
]
D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [
"D3D11_ASYNC_GETDATA_DONOTFLUSH",
])
D3D11_QUERY = Enum("D3D11_QUERY", [
"D3D11_QUERY_EVENT",
"D3D11_QUERY_OCCLUSION",
"D3D11_QUERY_TIMESTAMP",
"D3D11_QUERY_TIMESTAMP_DISJOINT",
"D3D11_QUERY_PIPELINE_STATISTICS",
"D3D11_QUERY_OCCLUSION_PREDICATE",
"D3D11_QUERY_SO_STATISTICS",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE",
"D3D11_QUERY_SO_STATISTICS_STREAM0",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0",
"D3D11_QUERY_SO_STATISTICS_STREAM1",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1",
"D3D11_QUERY_SO_STATISTICS_STREAM2",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2",
"D3D11_QUERY_SO_STATISTICS_STREAM3",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3",
])
D3D11_QUERY_MISC_FLAG = Flags(UINT, [
"D3D11_QUERY_MISC_PREDICATEHINT",
])
D3D11_QUERY_DESC = Struct("D3D11_QUERY_DESC", [
(D3D11_QUERY, "Query"),
(D3D11_QUERY_MISC_FLAG, "MiscFlags"),
])
ID3D11Query.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_QUERY_DESC), "pDesc")]),
]
D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", [
(UINT64, "Frequency"),
(BOOL, "Disjoint"),
])
D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D11_QUERY_DATA_PIPELINE_STATISTICS", [
(UINT64, "IAVertices"),
(UINT64, "IAPrimitives"),
(UINT64, "VSInvocations"),
(UINT64, "GSInvocations"),
(UINT64, "GSPrimitives"),
(UINT64, "CInvocations"),
(UINT64, "CPrimitives"),
(UINT64, "PSInvocations"),
(UINT64, "HSInvocations"),
(UINT64, "DSInvocations"),
(UINT64, "CSInvocations"),
])
D3D11_QUERY_DATA_SO_STATISTICS = Struct("D3D11_QUERY_DATA_SO_STATISTICS", [
(UINT64, "NumPrimitivesWritten"),
(UINT64, "PrimitivesStorageNeeded"),
])
D3D11_COUNTER = Enum("D3D11_COUNTER", [
"D3D11_COUNTER_DEVICE_DEPENDENT_0",
])
D3D11_COUNTER_TYPE = Enum("D3D11_COUNTER_TYPE", [
"D3D11_COUNTER_TYPE_FLOAT32",
"D3D11_COUNTER_TYPE_UINT16",
"D3D11_COUNTER_TYPE_UINT32",
"D3D11_COUNTER_TYPE_UINT64",
])
D3D11_COUNTER_DESC = Struct("D3D11_COUNTER_DESC", [
(D3D11_COUNTER, "Counter"),
(UINT, "MiscFlags"),
])
D3D11_COUNTER_INFO = Struct("D3D11_COUNTER_INFO", [
(D3D11_COUNTER, "LastDeviceDependentCounter"),
(UINT, "NumSimultaneousCounters"),
(UINT8, "NumDetectableParallelUnits"),
])
ID3D11Counter.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_COUNTER_DESC), "pDesc")]),
]
D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum("D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", [
"D3D11_STANDARD_MULTISAMPLE_PATTERN",
"D3D11_CENTER_MULTISAMPLE_PATTERN",
])
D3D11_DEVICE_CONTEXT_TYPE = Enum("D3D11_DEVICE_CONTEXT_TYPE", [
"D3D11_DEVICE_CONTEXT_IMMEDIATE",
"D3D11_DEVICE_CONTEXT_DEFERRED",
])
D3D11_CLASS_INSTANCE_DESC = Struct("D3D11_CLASS_INSTANCE_DESC", [
(UINT, "InstanceId"),
(UINT, "InstanceIndex"),
(UINT, "TypeId"),
(UINT, "ConstantBuffer"),
(UINT, "BaseConstantBufferOffset"),
(UINT, "BaseTexture"),
(UINT, "BaseSampler"),
(BOOL, "Created"),
])
ID3D11ClassInstance.methods += [
StdMethod(Void, "GetClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), "pDesc")]),
StdMethod(Void, "GetInstanceName", [Out(LPSTR, "pInstanceName"), Out(Pointer(SIZE_T), "pBufferLength")]),
StdMethod(Void, "GetTypeName", [Out(LPSTR, "pTypeName"), Out(Pointer(SIZE_T), "pBufferLength")]),
]
ID3D11ClassLinkage.methods += [
StdMethod(HRESULT, "GetClassInstance", [(LPCSTR, "pClassInstanceName"), (UINT, "InstanceIndex"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
StdMethod(HRESULT, "CreateClassInstance", [(LPCSTR, "pClassTypeName"), (UINT, "ConstantBufferOffset"), (UINT, "ConstantVectorOffset"), (UINT, "TextureOffset"), (UINT, "SamplerOffset"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
]
ID3D11CommandList.methods += [
StdMethod(UINT, "GetContextFlags", []),
]
D3D11_FEATURE_DATA_THREADING = Struct("D3D11_FEATURE_DATA_THREADING", [
(BOOL, "DriverConcurrentCreates"),
(BOOL, "DriverCommandLists"),
])
D3D11_FEATURE_DATA_DOUBLES = Struct("D3D11_FEATURE_DATA_DOUBLES", [
(BOOL, "DoublePrecisionFloatShaderOps"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT, "OutFormatSupport"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT2", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT2, "OutFormatSupport2"),
])
D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct("D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", [
(BOOL, "ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x"),
])
D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic("D3D11_FEATURE", "Feature", [
("D3D11_FEATURE_THREADING", Pointer(D3D11_FEATURE_DATA_THREADING)),
("D3D11_FEATURE_DOUBLES", Pointer(D3D11_FEATURE_DATA_DOUBLES)),
("D3D11_FEATURE_FORMAT_SUPPORT", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)),
("D3D11_FEATURE_FORMAT_SUPPORT2", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)),
("D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)),
], Blob(Void, "FeatureSupportDataSize"), False)
ID3D11DeviceContext.methods += [
StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D11PixelShader), "pPixelShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D11VertexShader), "pVertexShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]),
StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]),
StdMethod(HRESULT, "Map", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource"), (D3D11_MAP, "MapType"), (D3D11_MAP_FLAG, "MapFlags"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), "pMappedResource")]),
StdMethod(Void, "Unmap", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource")]),
StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D11InputLayout), "pInputLayout")]),
StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppVertexBuffers"), (Pointer(Const(UINT)), "pStrides"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D11Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]),
StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D11GeometryShader), "pShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "IASetPrimitiveTopology", [(D3D11_PRIMITIVE_TOPOLOGY, "Topology")]),
StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "Begin", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(Void, "End", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(HRESULT, "GetData", [(ObjPointer(ID3D11Asynchronous), "pAsync"), Out(OpaqueBlob(Void, "DataSize"), "pData"), (UINT, "DataSize"), (D3D11_ASYNC_GETDATA_FLAG, "GetDataFlags")]),
StdMethod(Void, "SetPredication", [(ObjPointer(ID3D11Predicate), "pPredicate"), (BOOL, "PredicateValue")]),
StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView")]),
StdMethod(Void, "OMSetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumRTVs"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D11BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (UINT, "SampleMask")]),
StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D11DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]),
StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppSOTargets"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "DrawAuto", []),
StdMethod(Void, "DrawIndexedInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "DrawInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "Dispatch", [(UINT, "ThreadGroupCountX"), (UINT, "ThreadGroupCountY"), (UINT, "ThreadGroupCountZ")]),
StdMethod(Void, "DispatchIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "RSSetState", [(ObjPointer(ID3D11RasterizerState), "pRasterizerState")]),
StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D11_VIEWPORT), "NumViewports"), "pViewports")]),
StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D11_RECT), "NumRects"), "pRects")]),
StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D11_BOX)), "pSrcBox")]),
StdMethod(Void, "CopyResource", [(ObjPointer(ID3D11Resource), "pDstResource"), (ObjPointer(ID3D11Resource), "pSrcResource")]),
StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D11_BOX)), "pDstBox"), (OpaquePointer(Const(Void)), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]),
StdMethod(Void, "CopyStructureCount", [(ObjPointer(ID3D11Buffer), "pDstBuffer"), (UINT, "DstAlignedByteOffset"), (ObjPointer(ID3D11UnorderedAccessView), "pSrcView")]),
StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D11RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]),
StdMethod(Void, "ClearUnorderedAccessViewUint", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(UINT), 4), "Values")]),
StdMethod(Void, "ClearUnorderedAccessViewFloat", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(FLOAT), 4), "Values")]),
StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (D3D11_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]),
StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D11ShaderResourceView), "pShaderResourceView")]),
StdMethod(Void, "SetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource"), (FLOAT, "MinLOD")]),
StdMethod(FLOAT, "GetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource")]),
StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]),
StdMethod(Void, "ExecuteCommandList", [(ObjPointer(ID3D11CommandList), "pCommandList"), (BOOL, "RestoreContextState")]),
StdMethod(Void, "HSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSSetShader", [(ObjPointer(ID3D11HullShader), "pHullShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "HSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSSetShader", [(ObjPointer(ID3D11DomainShader), "pDomainShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSSetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "CSSetShader", [(ObjPointer(ID3D11ComputeShader), "pComputeShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "CSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Pointer(UINT), "pStrides"), Out(Pointer(UINT), "pOffsets")]),
StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D11Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]),
StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), "pTopology")]),
StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]),
StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), (Array(ObjPointer(ID3D11RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(Void, "OMGetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(ObjPointer(ID3D11RenderTargetView), "NumRTVs"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(UINT), "pSampleMask")]),
StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]),
StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppSOTargets")]),
StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(Void, "RSGetViewports", [Out(Pointer(UINT), "pNumViewports"), Out(Array(D3D11_VIEWPORT, "*pNumViewports"), "pViewports")]),
StdMethod(Void, "RSGetScissorRects", [Out(Pointer(UINT), "pNumRects"), Out(Array(D3D11_RECT, "*pNumRects"), "pRects")]),
StdMethod(Void, "HSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSGetShader", [Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "HSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSGetShader", [Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "DSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSGetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "CSGetShader", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "CSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "ClearState", []),
StdMethod(Void, "Flush", []),
StdMethod(D3D11_DEVICE_CONTEXT_TYPE, "GetType", []),
StdMethod(UINT, "GetContextFlags", []),
StdMethod(HRESULT, "FinishCommandList", [(BOOL, "RestoreDeferredContextState"), Out(Pointer(ObjPointer(ID3D11CommandList)), "ppCommandList")]),
]
D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [
"D3D11_CREATE_DEVICE_SINGLETHREADED",
"D3D11_CREATE_DEVICE_DEBUG",
"D3D11_CREATE_DEVICE_SWITCH_TO_REF",
"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS",
"D3D11_CREATE_DEVICE_BGRA_SUPPORT",
])
ID3D11Device.methods += [
StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D11_BUFFER_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Buffer)), "ppBuffer")]),
StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture1D)), "ppTexture1D")]),
StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture2D)), "ppTexture2D")]),
StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture3D)), "ppTexture3D")]),
StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), "ppSRView")]),
StdMethod(HRESULT, "CreateUnorderedAccessView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), "ppUAView")]),
StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), "ppRTView")]),
StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader")]),
StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (Array(Const(UINT), "NumStrides"), "pBufferStrides"), (UINT, "NumStrides"), (UINT, "RasterizedStream"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader")]),
StdMethod(HRESULT, "CreateHullShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader")]),
StdMethod(HRESULT, "CreateDomainShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader")]),
StdMethod(HRESULT, "CreateComputeShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader")]),
StdMethod(HRESULT, "CreateClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D11_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState")]),
StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState")]),
StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D11_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D11_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D11SamplerState)), "ppSamplerState")]),
StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D11_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D11Query)), "ppQuery")]),
StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D11_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate")]),
StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D11Counter)), "ppCounter")]),
StdMethod(HRESULT, "CreateDeferredContext", [(UINT, "ContextFlags"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppDeferredContext")]),
StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]),
StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D11_FORMAT_SUPPORT), "pFormatSupport")]),
StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")]),
StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D11_COUNTER_INFO), "pCounterInfo")]),
StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D11_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")]),
StdMethod(HRESULT, "CheckFeatureSupport", [(D3D11_FEATURE, "Feature"), Out(D3D11_FEATURE_DATA, "pFeatureSupportData"), (UINT, "FeatureSupportDataSize")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
StdMethod(D3D_FEATURE_LEVEL, "GetFeatureLevel", []),
StdMethod(D3D11_CREATE_DEVICE_FLAG, "GetCreationFlags", []),
StdMethod(HRESULT, "GetDeviceRemovedReason", []),
StdMethod(Void, "GetImmediateContext", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdMethod(HRESULT, "SetExceptionMode", [(D3D11_RAISE_FLAG, "RaiseFlags")]),
StdMethod(UINT, "GetExceptionMode", []),
]
d3d11 = API("d3d11")
d3d11.addFunctions([
StdFunction(HRESULT, "D3D11CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdFunction(HRESULT, "D3D11CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
# XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set
StdFunction(HRESULT, "D3D11CoreRegisterLayers", [LPCVOID, DWORD], internal=True),
StdFunction(SIZE_T, "D3D11CoreGetLayeredDeviceSize", [LPCVOID, DWORD], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateLayeredDevice", [LPCVOID, DWORD, LPCVOID, (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppvObj")], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateDevice", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True),
])
d3d11.addInterfaces([
IDXGIAdapter1,
IDXGIDevice1,
IDXGIResource,
ID3D11Debug,
ID3D11InfoQueue,
ID3D11SwitchToRef,
])
| 50.016168 | 596 | 0.739227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29,628 | 0.478875 |
8a682a5b6be55bf5cb429b4f53cde390f56c0458 | 1,244 | py | Python | day08.py | Pil0u/adventofcode2020 | 97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8 | [
"MIT"
] | null | null | null | day08.py | Pil0u/adventofcode2020 | 97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8 | [
"MIT"
] | null | null | null | day08.py | Pil0u/adventofcode2020 | 97a6c291fc1653bcb1ea7abd7f38e71e2c0458f8 | [
"MIT"
] | null | null | null | from copy import deepcopy
def boot(seq):
index = 0
played_indices = set()
acc = 0
while True:
if index == len(seq):
return True, acc
if index in played_indices:
return False, acc
played_indices.add(index)
line = seq[index].split()
op = line[0]
value = int(line[1])
if op == 'nop':
index += 1
if op == 'acc':
acc += value
index += 1
if op == 'jmp':
index += value
def generate_sequences(list_):
all_seqs = []
for idx, value in enumerate(list_):
if value[:3] == 'nop':
seq = deepcopy(list_)
seq[idx] = 'jmp' + value[3:]
all_seqs.append(seq)
if value[:3] == 'jmp':
seq = deepcopy(list_)
seq[idx] = 'nop' + value[3:]
all_seqs.append(seq)
return all_seqs
def result(input_):
# Part 1
part_one = boot(input_)[1]
# Part 2
all_sequences = generate_sequences(input_)
for sequence in all_sequences:
result = boot(sequence)
if result[0] is not False:
part_two = result[1]
break
return part_one, part_two
| 19.4375 | 46 | 0.498392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.040997 |
8a685db25a2acacd77798f8f41ad85739a6b001d | 3,825 | py | Python | train_fcn.py | onlyNata/segModel | 7a823e096b3ed7f554a331c5fba39e24c9e0d8bf | [
"MIT"
] | 3 | 2018-07-02T06:15:36.000Z | 2019-06-10T06:26:18.000Z | train_fcn.py | onlyNata/segModel | 7a823e096b3ed7f554a331c5fba39e24c9e0d8bf | [
"MIT"
] | null | null | null | train_fcn.py | onlyNata/segModel | 7a823e096b3ed7f554a331c5fba39e24c9e0d8bf | [
"MIT"
] | 1 | 2018-10-19T08:07:59.000Z | 2018-10-19T08:07:59.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 16:34:21 2018
@author: LiHongWang
"""
import os
import tensorflow as tf
from model import fcn_vgg
from model import fcn_mobile
from model import fcn_resnet_v2
from data import input_data
slim = tf.contrib.slim
def main():
num_classes=2
tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord'
train_dir = './fm2/'
if not os.path.exists(train_dir):
os.makedirs(train_dir)
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device("/cpu:0"):
samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66,
crop_size=[224,224],
batch_size=4)
batch_queue = slim.prefetch_queue.prefetch_queue(samples,
capacity=128 )
tra_batch = batch_queue.dequeue()
logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes)
# logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes)
# logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes)
cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,
labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name="entropy")
loss = tf.reduce_mean(cross_entropy,name='loss')
slim.losses.add_loss(loss)
total_loss = slim.losses.get_total_loss()
# print("image", tra_batch['image'])
# print("label", tf.cast(tra_batch['label']*255, tf.uint8))
# print("prediction", tf.cast(prediction*255, tf.uint8))
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total_Loss', total_loss)
tf.summary.image("image", tra_batch['image'], max_outputs=4)
tf.summary.image("label", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4)
tf.summary.image("prediction", tf.cast(prediction*255, tf.uint8), max_outputs=4)
lr = tf.train.exponential_decay(0.001,
global_step,
10000,
0.8,
staircase=True)
#lr = tf.constant(0.001, tf.float32)
tf.summary.scalar('learning_rate', lr)
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
# Specify the optimizer and create the train op:
optimizer = tf.train.RMSPropOptimizer(lr,0.9)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config=tf.ConfigProto(gpu_options=gpu_options)
final_loss = slim.learning.train(train_op,
logdir=train_dir,
log_every_n_steps=100,
save_summaries_secs=20,
save_interval_secs=1800,
init_fn=None,#fcn_mobile.get_init_fn(),
session_config=config,
number_of_steps=65000)
print('Finished training. Last batch loss %f' % final_loss)
if __name__=='__main__':
main() | 34.151786 | 99 | 0.539608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.218562 |
8a69c6a560d7f1d6a12a9bb69281971b56733693 | 1,637 | py | Python | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='Tomas Aparicio',
author_email='[email protected]',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| 38.069767 | 77 | 0.588882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.590715 |
8a69d4b012c5607f3bca22996d7b21d1f2aed261 | 2,049 | py | Python | demos/netmiko_textfsm.py | ryanaa08/NPA | 45173efa60713858bb8b1d884fe12c50fe69920c | [
"BSD-Source-Code"
] | 4 | 2019-01-15T16:15:26.000Z | 2021-12-05T16:03:15.000Z | demos/netmiko_textfsm.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | null | null | null | demos/netmiko_textfsm.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | 2 | 2019-07-04T16:38:19.000Z | 2020-01-31T15:38:27.000Z | # make sure templates are present and netmiko knows about them
# git clone https://github.com/networktocode/ntc-templates
# export NET_TEXTFSM=/home/ntc/ntc-templates/templates/
# see https://github.com/networktocode/ntc-templates/tree/master/templates
# for list of templates
from netmiko import ConnectHandler
import json
user = 'ntc'
pwd = 'ntc123'
d_type = 'cisco_ios'
csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type)
sh_ip_int_br = csr1.send_command("show ip int brief", use_textfsm=True)
# [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '5.12.1.1', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}]
# is type list
print (type(sh_ip_int_br))
# list of dicts
print (type(sh_ip_int_br[0]))
for each_dict in sh_ip_int_br:
print "\n"
for key in each_dict.keys():
print key
for each_dict in sh_ip_int_br:
print "\n"
for key, value in each_dict.items():
print key + " is " + value
sh_ver_ios = csr1.send_command("show version", use_textfsm=True)
# [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}]
# print the json nicely
print (json.dumps(sh_ver_ios, indent=4))
print sh_ver_ios
# list
print type(sh_ver_ios)
# each item is a dict
print type(sh_ver_ios[0])
# list of dicts with some nested lists with the dicts
for each_dict in sh_ver_ios:
print "\n"
for key, value in each_dict.items():
if type(value) is list:
print key + " is "
for list_entry in value:
print list_entry
if type(value) is str:
print key + " is " + value
| 35.947368 | 420 | 0.660322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,131 | 0.551977 |
8a69e368874ca389ea7a44e379f62b44b8a60c98 | 4,411 | py | Python | iap/validate_jwt.py | spitfire55/python-docs-samples | b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed | [
"Apache-2.0"
] | 4 | 2018-12-23T18:17:14.000Z | 2020-01-05T19:13:58.000Z | iap/validate_jwt.py | spitfire55/python-docs-samples | b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed | [
"Apache-2.0"
] | 16 | 2019-06-15T00:02:56.000Z | 2021-03-25T23:22:38.000Z | iap/validate_jwt.py | spitfire55/python-docs-samples | b8fe0d1c5c9f7f5d27965fa3367117af7b1f0aed | [
"Apache-2.0"
] | 4 | 2018-06-03T14:43:25.000Z | 2019-11-24T04:05:18.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample showing how to validate the Identity-Aware Proxy (IAP) JWT.
This code should be used by applications in Google Compute Engine-based
environments (such as Google App Engine flexible environment, Google
Compute Engine, or Google Container Engine) to provide an extra layer
of assurance that a request was authorized by IAP.
For applications running in the App Engine standard environment, use
App Engine's Users API instead.
"""
# [START iap_validate_jwt]
import jwt
import requests
def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number,
cloud_project_id):
"""Validate a JWT passed to your App Engine app by Identity-Aware Proxy.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
cloud_project_id: The project *ID* for your Google Cloud project.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/apps/{}'.format(
cloud_project_number, cloud_project_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number,
backend_service_id):
"""Validate an IAP JWT for your (Compute|Container) Engine service.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
backend_service_id: The ID of the backend service used to access the
application. See
https://cloud.google.com/iap/docs/signed-headers-howto
for details on how to get this value.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/global/backendServices/{}'.format(
cloud_project_number, backend_service_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def _validate_iap_jwt(iap_jwt, expected_audience):
try:
key_id = jwt.get_unverified_header(iap_jwt).get('kid')
if not key_id:
return (None, None, '**ERROR: no key ID**')
key = get_iap_key(key_id)
decoded_jwt = jwt.decode(
iap_jwt, key,
algorithms=['ES256'],
audience=expected_audience)
return (decoded_jwt['sub'], decoded_jwt['email'], '')
except (jwt.exceptions.InvalidTokenError,
requests.exceptions.RequestException) as e:
return (None, None, '**ERROR: JWT validation error {}**'.format(e))
def get_iap_key(key_id):
"""Retrieves a public key from the list published by Identity-Aware Proxy,
re-fetching the key file if necessary.
"""
key_cache = get_iap_key.key_cache
key = key_cache.get(key_id)
if not key:
# Re-fetch the key file.
resp = requests.get(
'https://www.gstatic.com/iap/verify/public_key')
if resp.status_code != 200:
raise Exception(
'Unable to fetch IAP keys: {} / {} / {}'.format(
resp.status_code, resp.headers, resp.text))
key_cache = resp.json()
get_iap_key.key_cache = key_cache
key = key_cache.get(key_id)
if not key:
raise Exception('Key {!r} not found'.format(key_id))
return key
# Used to cache the Identity-Aware Proxy public keys. This code only
# refetches the file when a JWT is signed with a key not present in
# this cache.
get_iap_key.key_cache = {}
# [END iap_validate_jwt]
| 38.692982 | 79 | 0.682838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,743 | 0.621854 |
8a6b4f25018fb455967003872eafa0810ca93675 | 1,995 | py | Python | examples/calc.py | manatlan/htag | cf085077adf04bec8a2b059497efedb210c59936 | [
"MIT"
] | 1 | 2022-03-12T09:42:13.000Z | 2022-03-12T09:42:13.000Z | examples/calc.py | manatlan/thag | 0c57a103a8dbdbf9e1f09c759f1c35c1c3eff359 | [
"MIT"
] | null | null | null | examples/calc.py | manatlan/thag | 0c57a103a8dbdbf9e1f09c759f1c35c1c3eff359 | [
"MIT"
] | null | null | null | import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__)))
from htag import Tag
"""
This example show you how to make a "Calc App"
(with physical buttons + keyboard events)
There is no work for rendering the layout ;-)
Can't be simpler !
"""
class Calc(Tag.div):
statics=[Tag.H.style("""
.mycalc *,button {font-size:2em;font-family: monospace}
""")]
def init(self):
self.txt=""
self.aff = Tag.Div(" ",_style="border:1px solid black")
self["class"]="mycalc"
self <= self.aff
self <= Tag.button("C", _onclick=self.bind( self.clean) )
self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in "0123456789+-x/."]
self <= Tag.button("=", _onclick=self.bind( self.compute ) )
#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard
self["onkeyup"] = self.bind( self.presskey, b"event.key" )
def presskey(self,key):
if key in "0123456789+-*/.":
self.press(key)
elif key=="Enter":
self.compute()
elif key in ["Delete","Backspace"]:
self.clean()
#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/
def press(self,val):
self.txt += val
self.aff.set( self.txt )
def compute(self):
try:
self.txt = str(eval(self.txt.replace("x","*")))
self.aff.set( self.txt )
except:
self.txt = ""
self.aff.set( "Error" )
def clean(self):
self.txt=""
self.aff.set(" ")
if __name__=="__main__":
# import logging
# logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG)
# logging.getLogger("htag.tag").setLevel( logging.INFO )
# and execute it in a pywebview instance
from htag.runners import *
# here is another runner, in a simple browser (thru ajax calls)
BrowserHTTP( Calc ).run()
# PyWebWiew( Calc ).run()
| 28.913043 | 96 | 0.543358 | 1,318 | 0.660652 | 0 | 0 | 0 | 0 | 0 | 0 | 834 | 0.418045 |
8a6c2e5a6d6baef647e0e3b1e7b605691b398cfe | 188 | py | Python | res/example1.py | tghira16/Giraphics | 74265c4c0220c677e0fa3e5e65fd0b7087401106 | [
"MIT"
] | 1 | 2021-03-24T10:09:57.000Z | 2021-03-24T10:09:57.000Z | res/example1.py | tghira16/Giraphics | 74265c4c0220c677e0fa3e5e65fd0b7087401106 | [
"MIT"
] | null | null | null | res/example1.py | tghira16/Giraphics | 74265c4c0220c677e0fa3e5e65fd0b7087401106 | [
"MIT"
] | null | null | null | from giraphics.graphing.graph import Graph
def func(x):
return (x-3)*(x+2)*x*0.2
g = Graph(800,600,8,6, 'example1.svg')
g.bg()
g.grid()
g.axes()
g.graph(func)
g.save()
g.display() | 12.533333 | 42 | 0.632979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.074468 |
8a6c4e202130d51c730ab01bd3f2f21e5ec32862 | 758 | py | Python | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 29 | 2020-02-07T06:41:25.000Z | 2022-03-16T18:04:07.000Z | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 6 | 2020-04-27T01:36:27.000Z | 2022-01-31T11:59:05.000Z | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 12 | 2020-05-05T05:34:06.000Z | 2022-03-26T07:32:46.000Z | from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
| 36.095238 | 141 | 0.60686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.420746 |
8a6c803544f7e0d285bc37ff4aefd197349a5940 | 456 | py | Python | src/trw/reporting/__init__.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | 3 | 2019-07-04T01:20:41.000Z | 2020-01-27T02:36:12.000Z | src/trw/reporting/__init__.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | null | null | null | src/trw/reporting/__init__.py | civodlu/trw | b9a1cf045f61d6df9c65c014ef63b4048972dcdc | [
"MIT"
] | 2 | 2020-10-19T13:46:06.000Z | 2021-12-27T02:18:10.000Z | #from trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \
# safe_lookup, len_batch
from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image
from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows
from .reporting_bokeh import report, create_default_reporting_options
from .reporting_bokeh_samples import PanelDataSamplesTabular
| 65.142857 | 118 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.317982 |
8a6d51f8a422fff8bc79749ffb6d71189dc006bc | 2,509 | py | Python | vframe_cli/commands/templates/image-mp.py | julescarbon/vframe | 0798841fa9eb7e1252e4cdf71d68d991c26acab8 | [
"MIT"
] | 1 | 2021-05-15T11:06:39.000Z | 2021-05-15T11:06:39.000Z | vframe_cli/commands/templates/image-mp.py | julescarbon/vframe | 0798841fa9eb7e1252e4cdf71d68d991c26acab8 | [
"MIT"
] | null | null | null | vframe_cli/commands/templates/image-mp.py | julescarbon/vframe | 0798841fa9eb7e1252e4cdf71d68d991c26acab8 | [
"MIT"
] | null | null | null | #############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_dir_in', required=True)
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,
help='Glob extension')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads', default=None)
@click.pass_context
def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):
"""Multiprocessor image template"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from dataclasses import asdict
import numpy as np
import cv2 as cv
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.settings import app_cfg
from vframe.settings.modelzoo_cfg import modelzoo
from vframe.models.dnn import DNN
from vframe.image.dnn_factory import DNNFactory
from vframe.utils import file_utils
from vframe.utils.video_utils import FileVideoStream, mediainfo
log = app_cfg.LOG
# set N threads
if not opt_threads:
opt_threads = cpu_count() # maximum
# glob items
fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)
if any(opt_slice):
fp_items = fp_items[opt_slice[0]:opt_slice[1]]
log.info(f'Processing: {len(fp_items):,} files')
# -----------------------------------------------------------
# start pool worker
def pool_worker(pool_item):
# init threaded video reader
fp = pool_item['fp']
result = {'fp': fp}
# add media metadata
im = cv.imread(fp)
for i in range(20):
im = cv.blur(im, (35,35))
return result
# end pool worker
# -----------------------------------------------------------
# convert file list into object with
pool_items = [{'fp': fp} for fp in fp_items]
# init processing pool iterator
# use imap instead of map via @hkyi Stack Overflow 41920124
desc = f'image-mp x{opt_threads}'
with Pool(opt_threads) as p:
pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc)) | 28.83908 | 94 | 0.610602 | 0 | 0 | 0 | 0 | 2,244 | 0.89438 | 0 | 0 | 955 | 0.38063 |
8a6d637336ee5d703603ebc196b3672612c215ab | 1,976 | py | Python | src/learndash/api_resources/user.py | MarkMacDon/learndash-python | a3fbfc45567a524b80c732d735f2ae101119f2e4 | [
"MIT"
] | null | null | null | src/learndash/api_resources/user.py | MarkMacDon/learndash-python | a3fbfc45567a524b80c732d735f2ae101119f2e4 | [
"MIT"
] | 1 | 2021-05-06T19:01:24.000Z | 2021-05-06T19:01:24.000Z | src/learndash/api_resources/user.py | MarkMacDon/learndash-python | a3fbfc45567a524b80c732d735f2ae101119f2e4 | [
"MIT"
] | 2 | 2021-05-05T22:45:04.000Z | 2021-07-24T08:47:02.000Z | import learndash
from learndash.api_resources.abstract import ListableAPIResource
from learndash.api_resources.abstract import RetrievableAPIResource
from learndash.api_resources.abstract import UpdateableAPIResource
from learndash.api_resources.abstract import NestedAPIResource
from learndash.api_resources.typing import UserDict
from learndash.api_resources.typing import UserCourseProgressDict
from learndash.api_resources.typing import UserCourseDict
from learndash.api_resources.typing import UserGroupDict
from learndash.api_resources.typing import UserQuizProgressDict
class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]):
api_path = learndash.path_users
def course_progress(self, id=None):
return UserCourseProgress(id, parent=self)
def courses(self, id=None):
return UserCourse(id, parent=self)
def groups(self, id=None):
return UserGroup(id, parent=self)
def quiz_progress(self, id=None):
return UserQuizProgress(id, parent=self)
class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource):
api_path = learndash.path_user_course_progress
# class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource):
class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable
api_path = learndash.path_user_courses
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable
api_path = learndash.path_user_groups
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource):
api_path = learndash.path_user_quiz_progress
| 35.927273 | 113 | 0.802632 | 1,305 | 0.660425 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.120951 |
8a6dd286ad198b0a16465871a4cd84854d419ad0 | 1,824 | py | Python | lib/galaxy/tool_util/deps/container_resolvers/__init__.py | sneumann/galaxy | f6011bab5b8adbabae4986a45849bb9158ffc8bb | [
"CC-BY-3.0"
] | 1 | 2019-07-27T19:30:55.000Z | 2019-07-27T19:30:55.000Z | lib/galaxy/tool_util/deps/container_resolvers/__init__.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 4 | 2021-02-08T20:28:34.000Z | 2022-03-02T02:52:55.000Z | lib/galaxy/tool_util/deps/container_resolvers/__init__.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 1 | 2018-05-30T07:38:54.000Z | 2018-05-30T07:38:54.000Z | """The module defines the abstract interface for resolving container images for tool execution."""
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
import six
from galaxy.util.dictifiable import Dictifiable
@six.python_2_unicode_compatible
@six.add_metaclass(ABCMeta)
class ContainerResolver(Dictifiable):
"""Description of a technique for resolving container images for tool execution."""
# Keys for dictification.
dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies']
can_uninstall_dependencies = False
def __init__(self, app_info=None, **kwds):
"""Default initializer for ``ContainerResolver`` subclasses."""
self.app_info = app_info
self.resolver_kwds = kwds
def _get_config_option(self, key, default=None):
"""Look in resolver-specific settings for option and then fallback to
global settings.
"""
if self.app_info and hasattr(self.app_info, key):
return getattr(self.app_info, key)
else:
return default
@abstractmethod
def resolve(self, enabled_container_types, tool_info, **kwds):
"""Find a container matching all supplied requirements for tool.
The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description
of the tool and its requirements.
"""
@abstractproperty
def resolver_type(self):
"""Short label for the type of container resolution."""
def _container_type_enabled(self, container_description, enabled_container_types):
"""Return a boolean indicating if the specified container type is enabled."""
return container_description.type in enabled_container_types
def __str__(self):
return "%s[]" % self.__class__.__name__
| 33.777778 | 98 | 0.707237 | 1,525 | 0.836075 | 0 | 0 | 1,586 | 0.869518 | 0 | 0 | 773 | 0.423794 |
8a6e9d6c995b4c34ef5a6722c4973c2c7fb333f1 | 1,065 | py | Python | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | 1 | 2020-01-22T15:35:29.000Z | 2020-01-22T15:35:29.000Z | #!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| 35.5 | 107 | 0.613146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.433803 |
8a6f626dba5ce35c66724326d654b9ba19117497 | 4,322 | py | Python | dataProcessing.py | TauferLab/PENGUIN | af789163078310f2504b8a0163df4395ccf119f1 | [
"Apache-2.0"
] | null | null | null | dataProcessing.py | TauferLab/PENGUIN | af789163078310f2504b8a0163df4395ccf119f1 | [
"Apache-2.0"
] | null | null | null | dataProcessing.py | TauferLab/PENGUIN | af789163078310f2504b8a0163df4395ccf119f1 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find all DIRECTORIES containing non-hidden files ending in FILENAME
def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.append(directory.path)
return directories
#get all non-hidden data files in DIRECTORY with extension EXT
def getDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.append(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < minLoss:
minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, minLoss
return i, minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in information (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header='infer', sep=' '):
data = pd.read_csv(FILENAME, sep, header=header)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample less often, but not more often. We verify that we're not being asked for a granularity that is smaller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we get to this point in function, it means z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
| 38.936937 | 204 | 0.657103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,380 | 0.319297 |
8a6fea40902a5d1ec59a6cdd9117e96fcdef70a1 | 572 | py | Python | algo_probs/newcoder/classic/nc52.py | Jackthebighead/recruiment-2022 | a81007908e3c2f65a6be3ff2d62dfb92d0753b0d | [
"MIT"
] | null | null | null | algo_probs/newcoder/classic/nc52.py | Jackthebighead/recruiment-2022 | a81007908e3c2f65a6be3ff2d62dfb92d0753b0d | [
"MIT"
] | null | null | null | algo_probs/newcoder/classic/nc52.py | Jackthebighead/recruiment-2022 | a81007908e3c2f65a6be3ff2d62dfb92d0753b0d | [
"MIT"
] | null | null | null | # 题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,"()"和"()[]{}"都是合法的括号序列,但"(]"和"([)]"不合法。
# @param s string字符串
# @return bool布尔型
#
class Solution:
def isValid(self , s ):
# write code here
if not s: return True
stack = []
dic = {'{':'}','[':']','(':')'}
for char in s:
if not stack or char in dic: stack.append(char)
elif stack and dic.get(stack[-1])!=char: return False
else:
stack.pop()
continue
return True
| 30.105263 | 114 | 0.47028 | 398 | 0.554318 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.465181 |
8a73038a9d54b6fdd609f321f9fbc694a2017b7b | 2,385 | py | Python | piecrust/processing/util.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/processing/util.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/processing/util.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | import os.path
import time
import logging
import yaml
from piecrust.processing.base import Processor
logger = logging.getLogger(__name__)
class _ConcatInfo(object):
timestamp = 0
files = None
delim = "\n"
class ConcatProcessor(Processor):
PROCESSOR_NAME = 'concat'
def __init__(self):
super(ConcatProcessor, self).__init__()
self._cache = {}
def matches(self, path):
return path.endswith('.concat')
def getDependencies(self, path):
info = self._load(path)
return info.files
def getOutputFilenames(self, filename):
return [filename[:-7]]
def process(self, path, out_dir):
dirname, filename = os.path.split(path)
out_path = os.path.join(out_dir, filename[:-7])
info = self._load(path)
if not info.files:
raise Exception("No files specified in: %s" %
os.path.relpath(path, self.app.root_dir))
logger.debug("Concatenating %d files to: %s" %
(len(info.files), out_path))
encoded_delim = info.delim.encode('utf8')
with open(out_path, 'wb') as ofp:
for p in info.files:
with open(p, 'rb') as ifp:
ofp.write(ifp.read())
if info.delim:
ofp.write(encoded_delim)
return True
def _load(self, path):
cur_time = time.time()
info = self._cache.get(path)
if (info is not None and
(cur_time - info.timestamp <= 1 or
os.path.getmtime(path) < info.timestamp)):
return info
if info is None:
info = _ConcatInfo()
self._cache[path] = info
with open(path, 'r') as fp:
config = yaml.load(fp)
info.files = config.get('files', [])
info.delim = config.get('delim', "\n")
info.timestamp = cur_time
path_mode = config.get('path_mode', 'relative')
if path_mode == 'relative':
dirname, _ = os.path.split(path)
info.files = [os.path.join(dirname, f) for f in info.files]
elif path_mode == 'absolute':
info.files = [os.path.join(self.app.root_dir, f)
for f in info.files]
else:
raise Exception("Unknown path mode: %s" % path_mode)
return info
| 28.392857 | 71 | 0.554717 | 2,238 | 0.938365 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.074633 |
8a7310d8abb463c70846c800ef296e8c1423ac2b | 186 | py | Python | src/events/cell_pressed.py | ArcosJuan/Get-out-of-my-fucking-maze | ca2cfeaaeecb6c6f583ad647d020f25176170805 | [
"MIT"
] | 2 | 2021-09-09T14:03:40.000Z | 2021-11-03T03:35:55.000Z | src/events/cell_pressed.py | ArcosJuan/Get-out-of-my-fucking-maze | ca2cfeaaeecb6c6f583ad647d020f25176170805 | [
"MIT"
] | null | null | null | src/events/cell_pressed.py | ArcosJuan/Get-out-of-my-fucking-maze | ca2cfeaaeecb6c6f583ad647d020f25176170805 | [
"MIT"
] | null | null | null | from src.events import Event
class CellPressed(Event):
def __init__(self, position):
self.position = position
def get_position(self):
return self.position | 18.6 | 33 | 0.672043 | 155 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8a73f2115b3d49a7048eebbbf6a7d009bf2bcb02 | 864 | py | Python | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
| 36 | 83 | 0.706019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.52662 |
8a746baf4af656a91220d07018cb78e6eb2e1b1f | 119 | py | Python | xortool/__init__.py | runapp/xortool | 9dac27387e7883775936a31e67598eaba182e053 | [
"MIT"
] | 14 | 2017-06-14T06:10:07.000Z | 2019-02-22T03:21:15.000Z | Cryptography/tools/xortool-master/xortool/__init__.py | rookie-12/My-Gray-Hacker-Resources | e9b10ac7b0e557a9e624a5a6e761f9af4488d777 | [
"MIT"
] | 1 | 2021-04-30T21:19:32.000Z | 2021-04-30T21:19:32.000Z | Cryptography/tools/xortool-master/xortool/__init__.py | rookie-12/My-Gray-Hacker-Resources | e9b10ac7b0e557a9e624a5a6e761f9af4488d777 | [
"MIT"
] | 7 | 2015-10-01T09:47:05.000Z | 2022-01-21T14:25:37.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__all__ = ["args", "colors", "libcolors", "routine"]
__version__ = "0.96"
| 19.833333 | 52 | 0.605042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.689076 |
8a748a255fe78209cc5338aaab9ff134d24befab | 1,134 | py | Python | baopig/ressources/ressources.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null | baopig/ressources/ressources.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null | baopig/ressources/ressources.py | ChreSyr/baopig | 6264ab9a851b1ed0a031292abe7f159a53b3fc5e | [
"MIT"
] | null | null | null |
from baopig.pybao.objectutilities import Object
from baopig.pybao.issomething import *
class RessourcePack:
def config(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__('_'+name, value)
class FontsRessourcePack(RessourcePack):
def __init__(self,
file=None,
height=15,
color=(0, 0, 0),
):
assert is_color(color)
self._file = file
self._height = height
self._color = color
file = property(lambda self: self._file)
color = property(lambda self: self._color)
height = property(lambda self: self._height)
class ScenesRessourcePack(RessourcePack):
def __init__(self,
background_color=(170, 170, 170),
):
assert is_color(background_color)
self._background_color = background_color
background_color = property(lambda self: self._background_color)
# TODO : ButtonRessourcePack.style.create_surface(size)
class _RessourcePack:
def __init__(self):
self.font = FontsRessourcePack()
self.scene = ScenesRessourcePack()
ressources = _RessourcePack()
| 19.894737 | 68 | 0.666667 | 945 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.051146 |
8a75b4a74e6ecd635d9404db9ea5df06d5114069 | 10,282 | py | Python | bufr_extract_unique_stations.py | glamod/glamod-misc | 4c8743dd3aa436377017c49bec990b11fe1c6f7d | [
"BSD-3-Clause"
] | null | null | null | bufr_extract_unique_stations.py | glamod/glamod-misc | 4c8743dd3aa436377017c49bec990b11fe1c6f7d | [
"BSD-3-Clause"
] | 16 | 2018-10-23T08:06:18.000Z | 2018-10-30T10:20:01.000Z | bufr_extract_unique_stations.py | glamod/glamod-misc | 4c8743dd3aa436377017c49bec990b11fe1c6f7d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2.7
"""
Extract unique set of station locations (and names) along with number of obs
RJHD - Exeter - October 2017
"""
# ECMWF import defaults
import traceback
import sys
from eccodes import *
# RJHD imports
import cartopy
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import gc
VERBOSE = 1 # verbose error reporting.
ATTRS = [
'code',
'units',
'scale',
'reference',
'width'
]
INTMDI = 2147483647
#***************************************************
def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):
infile = open(infilename)
year = int(infilename.split(".")[0].split("_")[-1])
cmatch = 0
counter = 0
# loop all messages (with stop statement)
while 1:
"""OPEN MESSAGE"""
# get handle for message
bufr = codes_bufr_new_from_file(infile)
if bufr is None:
break
if counter%100000 == 0:
print "message: {:d}".format(counter)
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
"""ITERATOR TO EXTRACT KEYS"""
these_keys = []
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(bufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
# print(" %s" % keyname)
these_keys += [keyname]
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
# Use these to select obs from land/marine surface
name_keys = ["#1#shipOrMobileLandStationIdentifier", "#1#stationNumber"]
processed = False
for nk in name_keys:
if nk in these_keys:
try:
name = codes_get(bufr, nk)
lat = codes_get(bufr, "#1#latitude")
lon = codes_get(bufr, "#1#longitude")
sloc = tloc = nloc = [-1]
if name in station_names:
sloc, = np.where(station_names == name)
if lat in latitudes:
tloc, = np.where(latitudes == lat)
if lon in longitudes:
nloc, = np.where(longitudes == lon)
if tloc[0] == -1 and nloc[0] == -1:
# if not in list, then add
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0] != nloc[0]:
# add if one element of position is unique
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif tloc[0] != -1 and tloc[0] == nloc[0]:
# if position matches exactly, up observation counter
observations[tloc[0]] += 1
end_year[tloc[0]] = year
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
if fixed_station[tloc[0]] != True:
# if listed as land and now marine, take marine
fixed_station[tloc[0]] = False
else:
if fixed_station[tloc[0]] != False:
# easier to leave as mobile/marine than to move
# hopefully will stand out later
pass
else:
cmatch += 1
processed = True
except CodesInternalError:
raw_input("key error?")
# check for new keys which give station ID information
if not processed:
other_keys = ["#1#carrierBalloonOrAircraftIdentifier", "#1#aircraftFlightNumber"]
new_key = True
for ok in other_keys:
if ok in these_keys: new_key = False
if new_key:
raw_input(these_keys)
# if counter > 10000: break
counter += 1
codes_release(bufr)
# print "Number of unique locations in this year: {}".format(len(latitudes))
return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file
#***************************************************
def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = "", figtext = "", doText = False):
'''
Standard scatter map
:param str outname: output filename root
:param array data: data to plot
:param array lons: longitudes
:param array lats: latitudes
:param obj cmap: colourmap to use
:param array bounds: bounds for discrete colormap
:param str cb_label: colorbar label
'''
norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)
fig = plt.figure(figsize =(10,6.5))
plt.clf()
ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())
ax.gridlines() #draw_labels=True)
ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k")
ax.coastlines()
ext = ax.get_extent() # save the original extent
scatter = plt.scatter(lons, lats, c = data, cmap = cmap, norm = norm, s=10, \
transform = cartopy.crs.Geodetic(), edgecolor = "r", linewidth = 0.1)
cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05, \
aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)
# thicken border of colorbar and the dividers
# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
# cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
# cb.outline.set_color('k')
# cb.outline.set_linewidth(2)
cb.dividers.set_color('k')
cb.dividers.set_linewidth(2)
ax.set_extent(ext, ax.projection) # fix the extent change from colormesh
plt.title(title)
if doText: plt.text(0.01, 0.98, "#stations: {}".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)
plt.savefig(outname)
plt.close()
return # scatter_map
#***************************************************
def main(ms = "era40_", year = 1980):
LOCS = "/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/"
print year
station_names = np.array([])
fixed_station = np.array([])
latitudes = np.array([])
longitudes = np.array([])
observations = np.array([])
start_year = np.array([])
end_year = np.array([])
if ms == "erai_" and year < 1979:
return
else:
INFILE = "{}mars_{}{}.bufr".format(LOCS, ms, year)
try:
station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \
process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
land = np.where(np.array(fixed_station) == True)
marine = np.where(np.array(fixed_station) == False)
bounds = np.linspace(0,max(observations),10).astype(int)
cmap = plt.cm.YlOrRd_r
if ms == "erai_":
title = "MARS - SYNOP - {}".format(year)
else:
title = "MARS - ERA40 - {}".format(year)
scatter_map("mars_{}{}_land_observations.png".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, "Number of Observations", title, doText = True)
scatter_map("mars_{}{}_marine_observations.png".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, "Number of Observations", title)
station_names = 0
fixed_station = 0
latitudes = 0
longitudes = 0
observations = 0
start_year = 0
end_year = 0
land = 0
marine = 0
gc.collect()
return # main
#***************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ms', dest='ms', action='store', default = "era40_",
help='Run on ERA40 ["era40_"] (default) or ERA-I ["erai_"] data')
parser.add_argument('--year', dest='year', action='store', default = 1980,
help='Which year to process - default 1980')
args = parser.parse_args()
main(ms = args.ms, year = args.year)
sys.exit()
#***************************************************
# END
#***************************************************
| 33.167742 | 184 | 0.540751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,907 | 0.282727 |
8a7777964dadf66bcb5c8207f5f26c1301e49cee | 3,977 | py | Python | libsaas/services/twilio/applications.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 155 | 2015-01-27T15:17:59.000Z | 2022-02-20T00:14:08.000Z | libsaas/services/twilio/applications.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 14 | 2015-01-12T08:22:37.000Z | 2021-06-16T19:49:31.000Z | libsaas/services/twilio/applications.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 43 | 2015-01-28T22:41:45.000Z | 2021-09-21T04:44:26.000Z | from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class ApplicationsBase(resource.TwilioResource):
path = 'Applications'
class Application(ApplicationsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Applications(ApplicationsBase):
@base.apimethod
def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Applications belonging to an account.
:var FriendlyName: Only return the Account resources with friendly
names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectAppsBase(resource.TwilioResource):
path = 'ConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectApp(ConnectAppsBase):
pass
class ConnectApps(ConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectAppsBase(resource.TwilioResource):
path = 'AuthorizedConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectApp(AuthorizedConnectAppsBase):
pass
class AuthorizedConnectApps(AuthorizedConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Authorized Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
| 28.007042 | 78 | 0.652753 | 3,836 | 0.964546 | 0 | 0 | 2,586 | 0.650239 | 0 | 0 | 1,903 | 0.478501 |
8a78745915eb3a4aaf90865a024b4d8bafd46ca7 | 5,151 | py | Python | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| 48.140187 | 117 | 0.644729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,205 | 0.428072 |
8a78d7cdf72b62f6c5c9341d633e72ed6d4ea01c | 4,001 | py | Python | pykeops/common/get_options.py | dvolgyes/keops | 58b2c5f7822a7468a6da2ce439939e7dad04d7f3 | [
"MIT"
] | 1 | 2020-09-29T13:21:30.000Z | 2020-09-29T13:21:30.000Z | pykeops/common/get_options.py | dvolgyes/keops | 58b2c5f7822a7468a6da2ce439939e7dad04d7f3 | [
"MIT"
] | null | null | null | pykeops/common/get_options.py | dvolgyes/keops | 58b2c5f7822a7468a6da2ce439939e7dad04d7f3 | [
"MIT"
] | null | null | null | import re
import numpy as np
from collections import OrderedDict
import pykeops
import pykeops.config
############################################################
# define backend
############################################################
class SetBackend():
"""
This class is used to centralized the options used in PyKeops.
"""
dev = OrderedDict([('CPU',0),('GPU',1)])
grid = OrderedDict([('1D',0),('2D',1)])
memtype = OrderedDict([('host',0), ('device',1)])
possible_options_list = ['auto',
'CPU',
'GPU',
'GPU_1D', 'GPU_1D_device', 'GPU_1D_host',
'GPU_2D', 'GPU_2D_device', 'GPU_2D_host'
]
def define_tag_backend(self, backend, variables):
"""
Try to make a good guess for the backend... available methods are: (host means Cpu, device means Gpu)
CPU : computations performed with the host from host arrays
GPU_1D_device : computations performed on the device from device arrays, using the 1D scheme
GPU_2D_device : computations performed on the device from device arrays, using the 2D scheme
GPU_1D_host : computations performed on the device from host arrays, using the 1D scheme
GPU_2D_host : computations performed on the device from host data, using the 2D scheme
:param backend (str), variables (tuple)
:return (tagCPUGPU, tag1D2D, tagHostDevice)
"""
# check that the option is valid
if (backend not in self.possible_options_list):
raise ValueError('Invalid backend. Should be one of ', self.possible_options_list)
# auto : infer everything
if backend == 'auto':
return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables)
split_backend = re.split('_',backend)
if len(split_backend) == 1: # CPU or GPU
return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables)
elif len(split_backend) == 2: # GPU_1D or GPU_2D
return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables)
elif len(split_backend) == 3: # the option is known
return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]]
def define_backend(self, backend, variables):
tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables)
return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice]
@staticmethod
def _find_dev():
return int(pykeops.config.gpu_available)
@staticmethod
def _find_mem(variables):
if all([type(var) is np.ndarray for var in variables ]): # Infer if we're working with numpy arrays or torch tensors:
MemType = 0
elif pykeops.config.torch_found:
import torch
if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]):
from pykeops.torch.utils import is_on_device
VarsAreOnGpu = tuple(map(is_on_device, tuple(variables)))
if all(VarsAreOnGpu):
MemType = 1
elif not any(VarsAreOnGpu):
MemType = 0
else:
raise ValueError('At least two input variables have different memory locations (Cpu/Gpu).')
else:
raise TypeError('All variables should either be numpy arrays or torch tensors.')
return MemType
@staticmethod
def _find_grid():
return 0
def get_tag_backend(backend, variables, str = False):
"""
entry point to get the correct backend
"""
res = SetBackend()
if not str:
return res.define_tag_backend(backend, variables)
else:
return res.define_backend(backend, variables)
| 38.84466 | 125 | 0.59935 | 3,477 | 0.869033 | 0 | 0 | 1,058 | 0.264434 | 0 | 0 | 1,447 | 0.36166 |
8a78e9f69beda0a9b40161770e8196cc19774191 | 4,306 | py | Python | prepare_features_vc.py | tkm2261/dnn-voice-changer | 63a4ca0b2d8a33a26fc5aaec168180152df1b429 | [
"MIT"
] | 13 | 2018-03-09T07:56:50.000Z | 2022-03-26T12:23:22.000Z | prepare_features_vc.py | tkm2261/dnn-voice-changer | 63a4ca0b2d8a33a26fc5aaec168180152df1b429 | [
"MIT"
] | null | null | null | prepare_features_vc.py | tkm2261/dnn-voice-changer | 63a4ca0b2d8a33a26fc5aaec168180152df1b429 | [
"MIT"
] | 2 | 2018-06-16T03:44:56.000Z | 2021-04-06T17:32:38.000Z | """Prepare acoustic features for one-to-one voice conversion.
usage:
prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker>
options:
--max_files=<N> Max num files to be collected. [default: 100]
--dst_dir=<d> Destination directory [default: data/cmu_arctic_vc].
--overwrite Overwrite files.
-h, --help show this help message and exit
"""
from __future__ import division, print_function, absolute_import
from docopt import docopt
import numpy as np
from nnmnkwii.datasets import FileSourceDataset
from nnmnkwii import preprocessing as P
from nnmnkwii.preprocessing.alignment import DTWAligner
from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016
import pysptk
import pyworld
from scipy.io import wavfile
from tqdm import tqdm
from os.path import basename, splitext, exists, expanduser, join, dirname
import os
import sys
from hparams import vc as hp
from hparams import hparams_debug_string
# vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be
# drop-in replacement. See below for details:
# https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources
class MGCSource(cmu_arctic.WavFileDataSource):
def __init__(self, data_root, speakers, max_files=None):
super(MGCSource, self).__init__(data_root, speakers,
max_files=max_files)
self.alpha = None
def collect_features(self, wav_path):
fs, x = wavfile.read(wav_path)
x = x.astype(np.float64)
f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period)
f0 = pyworld.stonemask(x, f0, timeaxis, fs)
spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs)
spectrogram = P.trim_zeros_frames(spectrogram)
if self.alpha is None:
self.alpha = pysptk.util.mcepalpha(fs)
mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha)
# Drop 0-th coefficient
mgc = mgc[:, 1:]
# 50Hz cut-off MS smoothing
hop_length = int(fs * (hp.frame_period * 0.001))
modfs = fs / hop_length
mgc = P.modspec_smoothing(mgc, modfs, cutoff=50)
# Add delta
mgc = P.delta_features(mgc, hp.windows)
return mgc.astype(np.float32)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
DATA_ROOT = args["<DATA_ROOT>"]
source_speaker = args["<source_speaker>"]
target_speaker = args["<target_speaker>"]
max_files = int(args["--max_files"])
dst_dir = args["--dst_dir"]
overwrite = args["--overwrite"]
print(hparams_debug_string(hp))
X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker],
max_files=max_files))
Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker],
max_files=max_files))
skip_feature_extraction = exists(join(dst_dir, "X")) \
and exists(join(dst_dir, "Y"))
if overwrite:
skip_feature_extraction = False
if skip_feature_extraction:
print("Features seems to be prepared, skipping feature extraction.")
sys.exit(0)
# Create dirs
for speaker, name in [(source_speaker, "X"), (target_speaker, "Y")]:
d = join(dst_dir, name)
print("Destination dir for {}: {}".format(speaker, d))
if not exists(d):
os.makedirs(d)
# Convert to arrays
print("Convert datasets to arrays")
X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1)
# Alignment
print("Perform alignment")
X, Y = DTWAligner().transform((X, Y))
print("Save features to disk")
for idx, (x, y) in tqdm(enumerate(zip(X, Y))):
# paths
src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0]
tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0]
src_path = join(dst_dir, "X", src_name)
tgt_path = join(dst_dir, "Y", tgt_name)
# Trim and ajast frames
x = P.trim_zeros_frames(x)
y = P.trim_zeros_frames(y)
x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2)
# Save
np.save(src_path, x)
np.save(tgt_path, y)
| 35.883333 | 86 | 0.656061 | 1,112 | 0.258244 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.244542 |
8a7905cf7b3fc947d0fefe5c680371a050d82807 | 1,855 | py | Python | lib/tests/streamlit/pydeck_test.py | zgtz/streamlit | be797682394955ef2b94a5f7641b6f9d8fd1dbfc | [
"Apache-2.0"
] | 1 | 2022-01-19T10:48:49.000Z | 2022-01-19T10:48:49.000Z | lib/tests/streamlit/pydeck_test.py | zgtz/streamlit | be797682394955ef2b94a5f7641b6f9d8fd1dbfc | [
"Apache-2.0"
] | 52 | 2021-10-04T21:52:48.000Z | 2021-12-29T02:18:44.000Z | lib/tests/streamlit/pydeck_test.py | zgtz/streamlit | be797682394955ef2b94a5f7641b6f9d8fd1dbfc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import pydeck as pdk
from tests import testutil
import streamlit as st
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
df1 = pd.DataFrame({"lat": [1, 2, 3, 4], "lon": [10, 20, 30, 40]})
class PyDeckTest(testutil.DeltaGeneratorTestCase):
def test_basic(self):
"""Test that pydeck object orks."""
st.pydeck_chart(
pdk.Deck(
layers=[
pdk.Layer("ScatterplotLayer", data=df1),
]
)
)
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual["layers"][0]["@@type"], "ScatterplotLayer")
self.assertEqual(
actual["layers"][0]["data"],
[
{"lat": 1, "lon": 10},
{"lat": 2, "lon": 20},
{"lat": 3, "lon": 30},
{"lat": 4, "lon": 40},
],
)
def test_no_args(self):
"""Test that it can be called with no args."""
st.pydeck_chart()
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual, deck_gl_json_chart.EMPTY_MAP)
| 30.409836 | 75 | 0.618329 | 1,029 | 0.554717 | 0 | 0 | 0 | 0 | 0 | 0 | 766 | 0.412938 |
8a790773c525636d7fecb88a7d84df906ba09ba6 | 40,698 | py | Python | sdks/python/apache_beam/io/gcp/bigquery_tools.py | Doctusoft/beam | 91d59e78ffec3771a1d646c4e320fff571393829 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/io/gcp/bigquery_tools.py | Doctusoft/beam | 91d59e78ffec3771a1d646c4e320fff571393829 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:56:11.000Z | 2022-02-10T06:56:11.000Z | sdks/python/apache_beam/io/gcp/bigquery_tools.py | Doctusoft/beam | 91d59e78ffec3771a1d646c4e320fff571393829 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools used by BigQuery sources and sinks.
Classes, constants and functions in this file are experimental and have no
backwards compatibility guarantees.
These tools include wrappers and clients to interact with BigQuery APIs.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
from __future__ import absolute_import
import datetime
import decimal
import json
import logging
import re
import sys
import time
import uuid
from builtins import object
from future.utils import iteritems
from apache_beam import coders
from apache_beam.internal.gcp import auth
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options import value_provider
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import DoFn
from apache_beam.utils import retry
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
MAX_RETRIES = 3
JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not JSON compliant.'
def default_encoder(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
raise TypeError(
"Object of type '%s' is not JSON serializable" % type(obj).__name__)
def get_hashable_destination(destination):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
destination: Either a TableReference object from the bigquery API.
The object has the following attributes: projectId, datasetId, and
tableId. Or a string representing the destination containing
'PROJECT:DATASET.TABLE'.
Returns:
A string representing the destination containing
'PROJECT:DATASET.TABLE'.
"""
if isinstance(destination, bigquery.TableReference):
return '%s:%s.%s' % (
destination.projectId, destination.datasetId, destination.tableId)
else:
return destination
def parse_table_schema_from_json(schema_string):
"""Parse the Table Schema provided as string.
Args:
schema_string: String serialized table schema, should be a valid JSON.
Returns:
A TableSchema of the BigQuery export from either the Query or the Table.
"""
json_schema = json.loads(schema_string)
def _parse_schema_field(field):
"""Parse a single schema field from dictionary.
Args:
field: Dictionary object containing serialized schema.
Returns:
A TableFieldSchema for a single column in BigQuery.
"""
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema
fields = [_parse_schema_field(f) for f in json_schema['fields']]
return bigquery.TableSchema(fields=fields)
def parse_table_reference(table, dataset=None, project=None):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
table: The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument is None
then the table argument must contain the entire table reference:
'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a
bigquery.TableReference instance in which case dataset and project are
ignored and the reference is returned as a result. Additionally, for date
partitioned tables, appending '$YYYYmmdd' to the table name is supported,
e.g. 'DATASET.TABLE$YYYYmmdd'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table (and possibly dataset)
argument.
Returns:
A TableReference object from the bigquery API. The object has the following
attributes: projectId, datasetId, and tableId.
Raises:
ValueError: if the table reference as a string does not match the expected
format.
"""
if isinstance(table, bigquery.TableReference):
return table
elif callable(table):
return table
elif isinstance(table, value_provider.ValueProvider):
return table
table_reference = bigquery.TableReference()
# If dataset argument is not specified, the expectation is that the
# table argument will contain a full table reference instead of just a
# table name.
if dataset is None:
match = re.match(
r'^((?P<project>.+):)?(?P<dataset>\w+)\.(?P<table>[\w\$]+)$', table)
if not match:
raise ValueError(
'Expected a table reference (PROJECT:DATASET.TABLE or '
'DATASET.TABLE) instead of %s.' % table)
table_reference.projectId = match.group('project')
table_reference.datasetId = match.group('dataset')
table_reference.tableId = match.group('table')
else:
table_reference.projectId = project
table_reference.datasetId = dataset
table_reference.tableId = table
return table_reference
# -----------------------------------------------------------------------------
# BigQueryWrapper.
class BigQueryWrapper(object):
"""BigQuery client wrapper with utilities for querying.
The wrapper is used to organize all the BigQuery integration points and
offer a common place where retry logic for failures can be controlled.
In addition it offers various functions used both in sources and sinks
(e.g., find and create tables, query a table, etc.).
"""
TEMP_TABLE = 'temp_table_'
TEMP_DATASET = 'temp_dataset_'
def __init__(self, client=None):
self.client = client or bigquery.BigqueryV2(
http=get_new_http(),
credentials=auth.get_service_credentials(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self._unique_row_id = 0
# For testing scenarios where we pass in a client we do not want a
# randomized prefix for row IDs.
self._row_id_prefix = '' if client else uuid.uuid4()
self._temporary_table_suffix = uuid.uuid4().hex
@property
def unique_row_id(self):
"""Returns a unique row ID (str) used to avoid multiple insertions.
If the row ID is provided, BigQuery will make a best effort to not insert
the same row multiple times for fail and retry scenarios in which the insert
request may be issued several times. This comes into play for sinks executed
in a local runner.
Returns:
a unique row ID string
"""
self._unique_row_id += 1
return '%s_%d' % (self._row_id_prefix, self._unique_row_id)
def _get_temp_table(self, project_id):
return parse_table_reference(
table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix,
dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix,
project=project_id)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_query_location(self, project_id, query, use_legacy_sql):
"""
Get the location of tables referenced in a query.
This method returns the location of the first referenced table in the query
and depends on the BigQuery service to provide error handling for
queries that reference tables in multiple locations.
"""
reference = bigquery.JobReference(jobId=uuid.uuid4().hex,
projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=True,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
)),
jobReference=reference))
response = self.client.jobs.Insert(request)
if response.statistics is None:
# This behavior is only expected in tests
logging.warning(
"Unable to get location, missing response.statistics. Query: %s",
query)
return None
referenced_tables = response.statistics.query.referencedTables
if referenced_tables: # Guards against both non-empty and non-None
table = referenced_tables[0]
location = self.get_table_location(
table.projectId,
table.datasetId,
table.tableId)
logging.info("Using location %r from table %r referenced by query %s",
location, table, query)
return location
logging.debug("Query %s does not reference any tables.", query)
return None
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_copy_job(self,
project_id,
job_id,
from_table_reference,
to_table_reference,
create_disposition=None,
write_disposition=None):
reference = bigquery.JobReference()
reference.jobId = job_id
reference.projectId = project_id
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
copy=bigquery.JobConfigurationTableCopy(
destinationTable=to_table_reference,
sourceTable=from_table_reference,
createDisposition=create_disposition,
writeDisposition=write_disposition,
)
),
jobReference=reference,
)
)
logging.info("Inserting job request: %s", request)
response = self.client.jobs.Insert(request)
logging.info("Response was %s", response)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_load_job(self,
project_id,
job_id,
table_reference,
source_uris,
schema=None,
write_disposition=None,
create_disposition=None):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
load=bigquery.JobConfigurationLoad(
sourceUris=source_uris,
destinationTable=table_reference,
schema=schema,
writeDisposition=write_disposition,
createDisposition=create_disposition,
sourceFormat='NEWLINE_DELIMITED_JSON',
autodetect=schema is None,
)
),
jobReference=reference,
)
)
response = self.client.jobs.Insert(request)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results,
job_id, dry_run=False):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=dry_run,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
allowLargeResults=True,
destinationTable=self._get_temp_table(project_id),
flattenResults=flatten_results)),
jobReference=reference))
response = self.client.jobs.Insert(request)
return response.jobReference.jobId
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_query_results(self, project_id, job_id,
page_token=None, max_results=10000):
request = bigquery.BigqueryJobsGetQueryResultsRequest(
jobId=job_id, pageToken=page_token, projectId=project_id,
maxResults=max_results)
response = self.client.jobs.GetQueryResults(request)
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter)
def _insert_all_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Calls the insertAll BigQuery API endpoint.
Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\
/rest/v2/tabledata/insertAll."""
# The rows argument is a list of
# bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as
# required by the InsertAll() method.
request = bigquery.BigqueryTabledataInsertAllRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(
skipInvalidRows=skip_invalid_rows,
# TODO(silviuc): Should have an option for ignoreUnknownValues?
rows=rows))
response = self.client.tabledata.InsertAll(request)
# response.insertErrors is not [] if errors encountered.
return not response.insertErrors, response.insertErrors
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table(self, project_id, dataset_id, table_id):
"""Lookup a table's metadata object.
Args:
client: bigquery.BigqueryV2 instance
project_id, dataset_id, table_id: table lookup parameters
Returns:
bigquery.Table instance
Raises:
HttpError if lookup failed.
"""
request = bigquery.BigqueryTablesGetRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
response = self.client.tables.Get(request)
return response
def _create_table(self, project_id, dataset_id, table_id, schema):
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId=project_id, datasetId=dataset_id, tableId=table_id),
schema=schema)
request = bigquery.BigqueryTablesInsertRequest(
projectId=project_id, datasetId=dataset_id, table=table)
response = self.client.tables.Insert(request)
logging.debug("Created the table with id %s", table_id)
# The response is a bigquery.Table instance.
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_dataset(self, project_id, dataset_id, location=None):
# Check if dataset already exists otherwise create it
try:
dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
return dataset
except HttpError as exn:
if exn.status_code == 404:
dataset_reference = bigquery.DatasetReference(
projectId=project_id, datasetId=dataset_id)
dataset = bigquery.Dataset(datasetReference=dataset_reference)
if location is not None:
dataset.location = location
request = bigquery.BigqueryDatasetsInsertRequest(
projectId=project_id, dataset=dataset)
response = self.client.datasets.Insert(request)
# The response is a bigquery.Dataset instance.
return response
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _is_table_empty(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTabledataListRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
maxResults=1)
response = self.client.tabledata.List(request)
# The response is a bigquery.TableDataList instance.
return response.totalRows == 0
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesDeleteRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
try:
self.client.tables.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Table %s:%s.%s does not exist', project_id,
dataset_id, table_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_dataset(self, project_id, dataset_id, delete_contents=True):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=project_id, datasetId=dataset_id,
deleteContents=delete_contents)
try:
self.client.datasets.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
dataset_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table_location(self, project_id, dataset_id, table_id):
table = self.get_table(project_id, dataset_id, table_id)
return table.location
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def create_temporary_dataset(self, project_id, location):
dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix
# Check if dataset exists to make sure that the temporary id is unique
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
if project_id is not None:
# Unittests don't pass projectIds so they can be run without error
raise RuntimeError(
'Dataset %s:%s already exists so cannot be used as temporary.'
% (project_id, dataset_id))
except HttpError as exn:
if exn.status_code == 404:
logging.warning(
'Dataset %s:%s does not exist so we will create it as temporary '
'with location=%s',
project_id, dataset_id, location)
self.get_or_create_dataset(project_id, dataset_id, location=location)
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def clean_up_temporary_dataset(self, project_id):
temp_table = self._get_temp_table(project_id)
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=temp_table.datasetId))
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
temp_table.datasetId)
return
else:
raise
self._delete_dataset(temp_table.projectId, temp_table.datasetId, True)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_job(self, project, job_id, location=None):
request = bigquery.BigqueryJobsGetRequest()
request.jobId = job_id
request.projectId = project
request.location = location
return self.client.jobs.Get(request)
def perform_load_job(self,
destination,
files,
job_id,
schema=None,
write_disposition=None,
create_disposition=None):
"""Starts a job to load data into BigQuery.
Returns:
bigquery.JobReference with the information about the job that was started.
"""
return self._insert_load_job(
destination.projectId, job_id, destination, files,
schema=schema,
create_disposition=create_disposition,
write_disposition=write_disposition)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_table(
self, project_id, dataset_id, table_id, schema,
create_disposition, write_disposition):
"""Gets or creates a table based on create and write dispositions.
The function mimics the behavior of BigQuery import jobs when using the
same create and write dispositions.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
schema: A bigquery.TableSchema instance or None.
create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.
write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.
Returns:
A bigquery.Table instance if table was found or created.
Raises:
RuntimeError: For various mismatches between the state of the table and
the create/write dispositions passed in. For example if the table is not
empty and WRITE_EMPTY was specified then an error will be raised since
the table was expected to be empty.
"""
from apache_beam.io.gcp.bigquery import BigQueryDisposition
found_table = None
try:
found_table = self.get_table(project_id, dataset_id, table_id)
except HttpError as exn:
if exn.status_code == 404:
if create_disposition == BigQueryDisposition.CREATE_NEVER:
raise RuntimeError(
'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'
% (project_id, dataset_id, table_id))
else:
raise
# If table exists already then handle the semantics for WRITE_EMPTY and
# WRITE_TRUNCATE write dispositions.
if found_table:
table_empty = self._is_table_empty(project_id, dataset_id, table_id)
if (not table_empty and
write_disposition == BigQueryDisposition.WRITE_EMPTY):
raise RuntimeError(
'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'
% (project_id, dataset_id, table_id))
# Delete the table and recreate it (later) if WRITE_TRUNCATE was
# specified.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
self._delete_table(project_id, dataset_id, table_id)
# Create a new table potentially reusing the schema from a previously
# found table in case the schema was not specified.
if schema is None and found_table is None:
raise RuntimeError(
'Table %s:%s.%s requires a schema. None can be inferred because the '
'table does not exist.'
% (project_id, dataset_id, table_id))
if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:
return found_table
else:
created_table = self._create_table(project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema=schema or found_table.schema)
logging.info('Created table %s.%s.%s with schema %s. Result: %s.',
project_id, dataset_id, table_id,
schema or found_table.schema,
created_table)
# if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete
# the table before this point.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
# BigQuery can route data to the old table for 2 mins max so wait
# that much time before creating the table and writing it
logging.warning('Sleeping for 150 seconds before the write as ' +
'BigQuery inserts can be routed to deleted table ' +
'for 2 mins after the delete and create.')
# TODO(BEAM-2673): Remove this sleep by migrating to load api
time.sleep(150)
return created_table
else:
return created_table
def run_query(self, project_id, query, use_legacy_sql, flatten_results,
dry_run=False):
job_id = self._start_query_job(project_id, query, use_legacy_sql,
flatten_results, job_id=uuid.uuid4().hex,
dry_run=dry_run)
if dry_run:
# If this was a dry run then the fact that we get here means the
# query has no errors. The start_query_job would raise an error otherwise.
return
page_token = None
while True:
response = self._get_query_results(project_id, job_id, page_token)
if not response.jobComplete:
# The jobComplete field can be False if the query request times out
# (default is 10 seconds). Note that this is a timeout for the query
# request not for the actual execution of the query in the service. If
# the request times out we keep trying. This situation is quite possible
# if the query will return a large number of rows.
logging.info('Waiting on response from query: %s ...', query)
time.sleep(1.0)
continue
# We got some results. The last page is signalled by a missing pageToken.
yield response.rows, response.schema
if not response.pageToken:
break
page_token = response.pageToken
def insert_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Inserts rows into the specified table.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
rows: A list of plain Python dictionaries. Each dictionary is a row and
each key in it is the name of a field.
skip_invalid_rows: If there are rows with insertion errors, whether they
should be skipped, and all others should be inserted successfully.
Returns:
A tuple (bool, errors). If first element is False then the second element
will be a bigquery.InserttErrorsValueListEntry instance containing
specific errors.
"""
# Prepare rows for insertion. Of special note is the row ID that we add to
# each row in order to help BigQuery avoid inserting a row multiple times.
# BigQuery will do a best-effort if unique IDs are provided. This situation
# can happen during retries on failures.
# TODO(silviuc): Must add support to writing TableRow's instead of dicts.
final_rows = []
for row in rows:
json_object = bigquery.JsonObject()
for k, v in iteritems(row):
if isinstance(v, decimal.Decimal):
# decimal values are converted into string because JSON does not
# support the precision that decimal supports. BQ is able to handle
# inserts into NUMERIC columns by receiving JSON with string attrs.
v = str(v)
json_object.additionalProperties.append(
bigquery.JsonObject.AdditionalProperty(
key=k, value=to_json_value(v)))
final_rows.append(
bigquery.TableDataInsertAllRequest.RowsValueListEntry(
insertId=str(self.unique_row_id),
json=json_object))
result, errors = self._insert_all_rows(
project_id, dataset_id, table_id, final_rows, skip_invalid_rows)
return result, errors
def _convert_cell_value_to_dict(self, value, field):
if field.type == 'STRING':
# Input: "XYZ" --> Output: "XYZ"
return value
elif field.type == 'BOOLEAN':
# Input: "true" --> Output: True
return value == 'true'
elif field.type == 'INTEGER':
# Input: "123" --> Output: 123
return int(value)
elif field.type == 'FLOAT':
# Input: "1.23" --> Output: 1.23
return float(value)
elif field.type == 'TIMESTAMP':
# The UTC should come from the timezone library but this is a known
# issue in python 2.7 so we'll just hardcode it as we're reading using
# utcfromtimestamp.
# Input: 1478134176.985864 --> Output: "2016-11-03 00:49:36.985864 UTC"
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')
elif field.type == 'BYTES':
# Input: "YmJi" --> Output: "YmJi"
return value
elif field.type == 'DATE':
# Input: "2016-11-03" --> Output: "2016-11-03"
return value
elif field.type == 'DATETIME':
# Input: "2016-11-03T00:49:36" --> Output: "2016-11-03T00:49:36"
return value
elif field.type == 'TIME':
# Input: "00:49:36" --> Output: "00:49:36"
return value
elif field.type == 'RECORD':
# Note that a schema field object supports also a RECORD type. However
# when querying, the repeated and/or record fields are flattened
# unless we pass the flatten_results flag as False to the source
return self.convert_row_to_dict(value, field)
elif field.type == 'NUMERIC':
return decimal.Decimal(value)
elif field.type == 'GEOGRAPHY':
return value
else:
raise RuntimeError('Unexpected field type: %s' % field.type)
def convert_row_to_dict(self, row, schema):
"""Converts a TableRow instance using the schema to a Python dict."""
result = {}
for index, field in enumerate(schema.fields):
value = None
if isinstance(schema, bigquery.TableSchema):
cell = row.f[index]
value = from_json_value(cell.v) if cell.v is not None else None
elif isinstance(schema, bigquery.TableFieldSchema):
cell = row['f'][index]
value = cell['v'] if 'v' in cell else None
if field.mode == 'REPEATED':
if value is None:
# Ideally this should never happen as repeated fields default to
# returning an empty list
result[field.name] = []
else:
result[field.name] = [self._convert_cell_value_to_dict(x['v'], field)
for x in value]
elif value is None:
if not field.mode == 'NULLABLE':
raise ValueError('Received \'None\' as the value for the field %s '
'but the field is not NULLABLE.' % field.name)
result[field.name] = None
else:
result[field.name] = self._convert_cell_value_to_dict(value, field)
return result
# -----------------------------------------------------------------------------
# BigQueryReader, BigQueryWriter.
class BigQueryReader(dataflow_io.NativeSourceReader):
"""A reader for a BigQuery source."""
def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True,
flatten_results=True, kms_key=None):
self.source = source
self.test_bigquery_client = test_bigquery_client
if auth.is_running_in_gce:
self.executing_project = auth.executing_project
elif hasattr(source, 'pipeline_options'):
self.executing_project = (
source.pipeline_options.view_as(GoogleCloudOptions).project)
else:
self.executing_project = None
# TODO(silviuc): Try to automatically get it from gcloud config info.
if not self.executing_project and test_bigquery_client is None:
raise RuntimeError(
'Missing executing project information. Please use the --project '
'command line option to specify it.')
self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder)
# Schema for the rows being read by the reader. It is initialized the
# first time something gets read from the table. It is not required
# for reading the field values in each row but could be useful for
# getting additional details.
self.schema = None
self.use_legacy_sql = use_legacy_sql
self.flatten_results = flatten_results
self.kms_key = kms_key
if self.source.table_reference is not None:
# If table schema did not define a project we default to executing
# project.
project_id = self.source.table_reference.projectId
if not project_id:
project_id = self.executing_project
self.query = 'SELECT * FROM [%s:%s.%s];' % (
project_id,
self.source.table_reference.datasetId,
self.source.table_reference.tableId)
elif self.source.query is not None:
self.query = self.source.query
else:
# Enforce the "modes" enforced by BigQuerySource.__init__.
# If this exception has been raised, the BigQuerySource "modes" have
# changed and this method will need to be updated as well.
raise ValueError("BigQuerySource must have either a table or query")
def _get_source_location(self):
"""
Get the source location (e.g. ``"EU"`` or ``"US"``) from either
- :data:`source.table_reference`
or
- The first referenced table in :data:`source.query`
See Also:
- :meth:`BigQueryWrapper.get_query_location`
- :meth:`BigQueryWrapper.get_table_location`
Returns:
Optional[str]: The source location, if any.
"""
if self.source.table_reference is not None:
tr = self.source.table_reference
return self.client.get_table_location(
tr.projectId if tr.projectId is not None else self.executing_project,
tr.datasetId, tr.tableId)
else: # It's a query source
return self.client.get_query_location(
self.executing_project,
self.source.query,
self.source.use_legacy_sql)
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.create_temporary_dataset(
self.executing_project, location=self._get_source_location())
return self
def __exit__(self, exception_type, exception_value, traceback):
self.client.clean_up_temporary_dataset(self.executing_project)
def __iter__(self):
for rows, schema in self.client.run_query(
project_id=self.executing_project, query=self.query,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results):
if self.schema is None:
self.schema = schema
for row in rows:
if self.row_as_dict:
yield self.client.convert_row_to_dict(row, schema)
else:
yield row
class BigQueryWriter(dataflow_io.NativeSinkWriter):
"""The sink writer for a BigQuerySink."""
def __init__(self, sink, test_bigquery_client=None, buffer_size=None):
self.sink = sink
self.test_bigquery_client = test_bigquery_client
self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder)
# Buffer used to batch written rows so we reduce communication with the
# BigQuery service.
self.rows_buffer = []
self.rows_buffer_flush_threshold = buffer_size or 1000
# Figure out the project, dataset, and table used for the sink.
self.project_id = self.sink.table_reference.projectId
# If table schema did not define a project we default to executing project.
if self.project_id is None and hasattr(sink, 'pipeline_options'):
self.project_id = (
sink.pipeline_options.view_as(GoogleCloudOptions).project)
assert self.project_id is not None
self.dataset_id = self.sink.table_reference.datasetId
self.table_id = self.sink.table_reference.tableId
def _flush_rows_buffer(self):
if self.rows_buffer:
logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer),
self.project_id, self.dataset_id, self.table_id)
passed, errors = self.client.insert_rows(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=self.rows_buffer)
self.rows_buffer = []
if not passed:
raise RuntimeError('Could not successfully insert rows to BigQuery'
' table [%s:%s.%s]. Errors: %s' %
(self.project_id, self.dataset_id,
self.table_id, errors))
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.get_or_create_table(
self.project_id, self.dataset_id, self.table_id, self.sink.table_schema,
self.sink.create_disposition, self.sink.write_disposition)
return self
def __exit__(self, exception_type, exception_value, traceback):
self._flush_rows_buffer()
def Write(self, row):
self.rows_buffer.append(row)
if len(self.rows_buffer) > self.rows_buffer_flush_threshold:
self._flush_rows_buffer()
class RowAsDictJsonCoder(coders.Coder):
"""A coder for a table row (represented as a dict) to/from a JSON string.
This is the default coder for sources and sinks if the coder argument is not
specified.
"""
def encode(self, table_row):
# The normal error when dumping NAN/INF values is:
# ValueError: Out of range float values are not JSON compliant
# This code will catch this error to emit an error that explains
# to the programmer that they have used NAN/INF values.
try:
return json.dumps(
table_row, allow_nan=False, default=default_encoder).encode('utf-8')
except ValueError as e:
raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
return json.loads(encoded_table_row.decode('utf-8'))
class RetryStrategy(object):
RETRY_ALWAYS = 'RETRY_ALWAYS'
RETRY_NEVER = 'RETRY_NEVER'
RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR'
_NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'}
@staticmethod
def should_retry(strategy, error_message):
if strategy == RetryStrategy.RETRY_ALWAYS:
return True
elif strategy == RetryStrategy.RETRY_NEVER:
return False
elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and
error_message not in RetryStrategy._NON_TRANSIENT_ERRORS):
return True
else:
return False
class AppendDestinationsFn(DoFn):
"""Adds the destination to an element, making it a KV pair.
Outputs a PCollection of KV-pairs where the key is a TableReference for the
destination, and the value is the record itself.
Experimental; no backwards compatibility guarantees.
"""
def __init__(self, destination):
self.destination = AppendDestinationsFn._get_table_fn(destination)
@staticmethod
def _value_provider_or_static_val(elm):
if isinstance(elm, value_provider.ValueProvider):
return elm
else:
# The type argument is a NoOp, because we assume the argument already has
# the proper formatting.
return value_provider.StaticValueProvider(lambda x: x, value=elm)
@staticmethod
def _get_table_fn(destination):
if callable(destination):
return destination
else:
return lambda x: AppendDestinationsFn._value_provider_or_static_val(
destination).get()
def process(self, element):
yield (self.destination(element), element)
| 39.095101 | 80 | 0.685832 | 34,072 | 0.837191 | 1,819 | 0.044695 | 17,874 | 0.439186 | 0 | 0 | 14,154 | 0.347781 |
8a790aaa3beecccbae1e5fe2d0bb1478dbadd597 | 1,841 | py | Python | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 3 | 2018-11-27T06:30:23.000Z | 2021-05-30T15:56:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2018-11-15T02:00:31.000Z | 2021-12-06T02:20:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2020-11-06T18:46:35.000Z | 2020-11-06T18:46:35.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import (
get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,
collect_data_files)
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt5 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])
extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),
'Qt', 'bin')])
# In the new consolidated mode any PyQt depends on _qt
hiddenimports = ['sip', 'PyQt5.Qt']
# Collect just the qt.conf file.
datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if
x[0].endswith('qt.conf')]
# For Qt<5.4 to work on Mac OS X it is necessary to include `qt_menu.nib`.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
# Version of the currently installed Qt 5.x shared library.
qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')
if is_module_satisfies('Qt < 5.4', qt_version):
datas = [(qt_menu_nib_dir('PyQt5'), '')]
| 42.813953 | 90 | 0.669745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.571972 |
8a7922d582e70ee076c3374be8cdb74d33423c9b | 1,038 | py | Python | tests/ast/nodes/test_from_node.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 1,471 | 2017-12-25T05:47:57.000Z | 2019-11-19T07:47:53.000Z | tests/ast/nodes/test_from_node.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 915 | 2019-11-21T05:48:16.000Z | 2022-03-31T23:51:03.000Z | tests/ast/nodes/test_from_node.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 321 | 2017-12-25T16:37:21.000Z | 2019-11-15T17:44:06.000Z | from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
| 25.95 | 65 | 0.719653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.042389 |
8a79bd5eb2532e1ffdd3b87d6be696b8303afc7f | 2,624 | py | Python | generator/modules/opencv.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | 1 | 2021-11-18T18:34:29.000Z | 2021-11-18T18:34:29.000Z | generator/modules/opencv.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | null | null | null | generator/modules/opencv.py | dayta-ai/deepo | fa720e39052e63adfe0f2b9dbd8444a0d69c2540 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .tools import Tools
from .boost import Boost
from .python import Python
@dependency(Tools, Python, Boost)
@source('git')
@version('4.0.1')
class Opencv(Module):
def build(self):
return r'''
RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \
DEBIAN_FRONTEND=noninteractive \
add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" && \
apt update && \
$APT_INSTALL \
libatlas-base-dev \
libgflags-dev \
libgoogle-glog-dev \
libhdf5-serial-dev \
libleveldb-dev \
liblmdb-dev \
libprotobuf-dev \
libsnappy-dev \
protobuf-compiler \
libopencv-dev \
yasm \
libjpeg-dev \
libjasper-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libdc1394-22-dev \
libv4l-dev \
libtbb-dev \
libqt4-dev \
libgtk2.0-dev \
libfaac-dev \
libmp3lame-dev \
libopencore-amrnb-dev \
libopencore-amrwb-dev \
libtheora-dev \
libvorbis-dev \
libxvidcore-dev \
x264 \
v4l-utils \
ffmpeg \
&& \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \
mkdir -p opencv/build && cd opencv/build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D WITH_IPP=OFF \
-D WITH_CUDA=OFF \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENCL=ON \
-D WITH_GTK=ON \
-D WITH_LIBV4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D WITH_FFMPEG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
.. && \
make -j"$(nproc)" install && \
ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2
'''.format(self.version)
| 35.945205 | 100 | 0.463796 | 2,393 | 0.911966 | 0 | 0 | 2,460 | 0.9375 | 0 | 0 | 2,348 | 0.894817 |
8a7a7334b3428135d28ee8a3da56e39eed250254 | 1,564 | py | Python | day16/solve16.py | jmacarthur/aoc2017 | 2a3096aabf464ef52c05f9437498035cfb5ca1a6 | [
"MIT"
] | null | null | null | day16/solve16.py | jmacarthur/aoc2017 | 2a3096aabf464ef52c05f9437498035cfb5ca1a6 | [
"MIT"
] | null | null | null | day16/solve16.py | jmacarthur/aoc2017 | 2a3096aabf464ef52c05f9437498035cfb5ca1a6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import copy
stage_length = 16
stage = map(chr, range(ord('a'),ord('a')+stage_length))
def spin(amount):
"""To save time, this function isn't used except at the end.
Normally, a counter marks the start of the stage and this changes
instead. """
global stage
stage = stage[amount:] + stage[:amount]
def swap(pos1, pos2):
global stage
(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])
with open(sys.argv[1], 'rt') as f:
program = ",".join(f.readlines()).split(",")
n = 0
pos = 0
arguments_list = [x[1:].strip().split("/") for x in program]
action_list = [x[0] for x in program]
history = []
# Change this to 1 for the solution to part 1.
iterations = 1000000000
while n<iterations:
for s in range(0,len(program)):
arguments = arguments_list[s]
if action_list[s] == 's':
pos += stage_length-int(arguments[0])
elif action_list[s] == 'x':
swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)
elif action_list[s] == 'p':
pos1 = stage.index(arguments[0])
pos2 = stage.index(arguments[1])
swap(pos1, pos2)
if stage in history:
print("Duplicate found: %r at index %d matches at stage %d"%(stage, history.index(stage), n))
loop_length = n - history.index(stage)
complete_cycles = (iterations - n) / loop_length
n += complete_cycles * loop_length
history.append(copy.copy(stage))
n += 1
spin(pos % stage_length)
print "".join(stage)
| 30.076923 | 101 | 0.621483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.18734 |
8a7abfc40ef422e33ab3c8284edc61617b59e3dc | 1,165 | py | Python | skimage/segmentation/tests/test_felzenszwalb.py | jaberg/scikits-image | 2ab3e2dfb341189ef2ff9370c6cf3d33ef6ec88d | [
"BSD-3-Clause"
] | 2 | 2020-02-17T18:54:33.000Z | 2021-09-28T15:18:23.000Z | skimage/segmentation/tests/test_felzenszwalb.py | jaberg/scikits-image | 2ab3e2dfb341189ef2ff9370c6cf3d33ef6ec88d | [
"BSD-3-Clause"
] | 1 | 2020-03-30T12:31:55.000Z | 2020-03-30T12:31:55.000Z | skimage/segmentation/tests/test_felzenszwalb.py | emmanuelle/scikit-image | eccc41907135cf81b99c4be18a480a9bc705485d | [
"BSD-3-Clause"
] | 1 | 2019-12-20T19:19:59.000Z | 2019-12-20T19:19:59.000Z | import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_greater
from skimage.segmentation import felzenszwalb
def test_grey():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 40)
def test_color():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| 29.125 | 73 | 0.628326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.169957 |
8a7ac7f87e160e8f864dafce2acd68a6454b8a68 | 1,419 | py | Python | tests/middleware/test_csrf_middleware.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | tests/middleware/test_csrf_middleware.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | tests/middleware/test_csrf_middleware.py | w3x10e8/core | d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49 | [
"MIT"
] | null | null | null | from masonite.request import Request
from masonite.view import View
from masonite.auth.Csrf import Csrf
from masonite.app import App
from masonite.middleware import CsrfMiddleware
from masonite.testsuite.TestSuite import generate_wsgi
import pytest
from masonite.exceptions import InvalidCSRFToken
class TestCSRFMiddleware:
def setup_method(self):
self.app = App()
self.request = Request(generate_wsgi())
self.view = View(self.app)
self.app.bind('Request', self.request)
self.request = self.app.make('Request')
self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view)
def test_middleware_shares_correct_input(self):
self.middleware.before()
assert 'csrf_field' in self.view.dictionary
assert self.view.dictionary['csrf_field'].startswith("<input type='hidden' name='__token' value='")
def test_middleware_throws_exception_on_post(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.middleware.exempt = []
with pytest.raises(InvalidCSRFToken):
self.middleware.before()
def test_incoming_token_does_not_throw_exception_with_token(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')})
self.middleware.exempt = []
self.middleware.before()
| 36.384615 | 107 | 0.718816 | 1,118 | 0.787879 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.107118 |
8a7bd23662f4d2b0b0c83db0df08df0f16f7923c | 690 | py | Python | phoible/views.py | ltxom/phoible | 7ce6f5e62d885f142dba61937d920e68fa7f9fca | [
"Apache-2.0"
] | 31 | 2015-01-20T01:36:22.000Z | 2022-03-11T16:47:30.000Z | phoible/views.py | ltxom/phoible | 7ce6f5e62d885f142dba61937d920e68fa7f9fca | [
"Apache-2.0"
] | 22 | 2015-03-09T11:11:31.000Z | 2022-03-07T14:08:29.000Z | phoible/views.py | ltxom/phoible | 7ce6f5e62d885f142dba61937d920e68fa7f9fca | [
"Apache-2.0"
] | 12 | 2015-11-16T18:28:43.000Z | 2021-05-20T21:55:49.000Z | from pyramid.view import view_config
import os
@view_config(route_name='faq', renderer='faq.mako')
def faq_view(request):
dir_path = os.path.dirname(__file__)
faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html')
with open(faq_file, 'r') as f:
faq_page = f.read()
return {'content': faq_page}
@view_config(route_name='conventions', renderer='conventions.mako')
def conventions_view(request):
dir_path = os.path.dirname(__file__)
conventions_file = os.path.join(dir_path, 'static/conventions.html')
with open(conventions_file, 'r') as file:
conventions_page = file.read().replace('\n', '')
return {'content': conventions_page}
| 32.857143 | 72 | 0.708696 | 0 | 0 | 0 | 0 | 637 | 0.923188 | 0 | 0 | 131 | 0.189855 |
8a7c5a43d05f7336921551d124cf954c34bc06e5 | 46,013 | py | Python | tests/restapi/test_routes.py | aiace9/aiida-core | 09ac91654648adb684a58d5d2d7b1c11a503dae8 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | tests/restapi/test_routes.py | aiace9/aiida-core | 09ac91654648adb684a58d5d2d7b1c11a503dae8 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | tests/restapi/test_routes.py | aiace9/aiida-core | 09ac91654648adb684a58d5d2d7b1c11a503dae8 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-lines
"""Unittests for REST API."""
import tempfile
from flask_cors.core import ACL_ORIGIN
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import json
from aiida.common.links import LinkType
from aiida.restapi.run_api import configure_api
class RESTApiTestCase(AiidaTestCase):
"""
Setup of the tests for the AiiDA RESTful-api
"""
_url_prefix = '/api/v4'
_dummy_data = {}
_PERPAGE_DEFAULT = 20
_LIMIT_DEFAULT = 400
@classmethod
def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements
"""
Add objects to the database for different requests/filters/orderings etc.
"""
super().setUpClass()
api = configure_api(catch_internal_server=True)
cls.app = api.app
cls.app.config['TESTING'] = True
# create test inputs
cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
structure.store()
structure.add_comment('This is test comment.')
structure.add_comment('Add another comment.')
cif = orm.CifData(ase=structure.get_ase())
cif.store()
parameter1 = orm.Dict(dict={'a': 1, 'b': 2})
parameter1.store()
parameter2 = orm.Dict(dict={'c': 3, 'd': 4})
parameter2.store()
kpoint = orm.KpointsData()
kpoint.set_kpoints_mesh([4, 4, 4])
kpoint.store()
resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
calcfunc = orm.CalcFunctionNode(computer=cls.computer)
calcfunc.store()
calc = orm.CalcJobNode(computer=cls.computer)
calc.set_option('resources', resources)
calc.set_attribute('attr1', 'OK')
calc.set_attribute('attr2', 'OK')
calc.set_extra('extra1', False)
calc.set_extra('extra2', 'extra_info')
calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure')
calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter')
aiida_in = 'The input file\nof the CalcJob node'
# Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_in)
handle.flush()
handle.seek(0)
calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True)
calc.store()
# create log message for calcjob
import logging
from aiida.common.log import LOG_LEVEL_REPORT
from aiida.common.timezone import now
from aiida.orm import Log
log_record = {
'time': now(),
'loggername': 'loggername',
'levelname': logging.getLevelName(LOG_LEVEL_REPORT),
'dbnode_id': calc.id,
'message': 'This is a template record message',
'metadata': {
'content': 'test'
},
}
Log(**log_record)
aiida_out = 'The output file\nof the CalcJob node'
retrieved_outputs = orm.FolderData()
# Add the calcjob_outputs folder with the aiida.out file to the FolderData node
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_out)
handle.flush()
handle.seek(0)
retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True)
retrieved_outputs.store()
retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved')
kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create')
calc1 = orm.CalcJobNode(computer=cls.computer)
calc1.set_option('resources', resources)
calc1.store()
dummy_computers = [{
'label': 'test1',
'hostname': 'test1.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'pbspro',
}, {
'label': 'test2',
'hostname': 'test2.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'torque',
}, {
'label': 'test3',
'hostname': 'test3.epfl.ch',
'transport_type': 'local',
'scheduler_type': 'slurm',
}, {
'label': 'test4',
'hostname': 'test4.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'slurm',
}]
for dummy_computer in dummy_computers:
computer = orm.Computer(**dummy_computer)
computer.store()
# Prepare typical REST responses
cls.process_dummy_data()
def get_dummy_data(self):
return self._dummy_data
def get_url_prefix(self):
return self._url_prefix
@classmethod
def process_dummy_data(cls):
# pylint: disable=fixme
"""
This functions prepare atomic chunks of typical responses from the
RESTapi and puts them into class attributes
"""
# TODO: Storing the different nodes as lists and accessing them
# by their list index is very fragile and a pain to debug.
# Please change this!
computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type']
computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({
'comp': [{
'id': {
'order': 'asc'
}
}]
}).dict()
# Cast UUID into a string (e.g. in sqlalchemy it comes as a UUID object)
computers = [_['comp'] for _ in computers]
for comp in computers:
if comp['uuid'] is not None:
comp['uuid'] = str(comp['uuid'])
cls._dummy_data['computers'] = computers
calculation_projections = ['id', 'uuid', 'user_id', 'node_type']
calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc',
project=calculation_projections).order_by({
'calc': [{
'id': {
'order': 'desc'
}
}]
}).dict()
calculations = [_['calc'] for _ in calculations]
for calc in calculations:
if calc['uuid'] is not None:
calc['uuid'] = str(calc['uuid'])
cls._dummy_data['calculations'] = calculations
data_projections = ['id', 'uuid', 'user_id', 'node_type']
data_types = {
'cifdata': orm.CifData,
'parameterdata': orm.Dict,
'structuredata': orm.StructureData,
'data': orm.Data,
}
for label, dataclass in data_types.items():
data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({
'data': [{
'id': {
'order': 'desc'
}
}]
}).dict()
data = [_['data'] for _ in data]
for datum in data:
if datum['uuid'] is not None:
datum['uuid'] = str(datum['uuid'])
cls._dummy_data[label] = data
def split_path(self, url):
# pylint: disable=no-self-use
"""
Split the url with "?" to get url path and it's parameters
:param url: Web url
:return: url path and url parameters
"""
parts = url.split('?')
path = ''
query_string = ''
if parts:
path = parts[0]
if len(parts) > 1:
query_string = parts[1]
return path, query_string
def compare_extra_response_data(self, node_type, url, response, uuid=None):
"""
In url response, we pass some extra information/data along with the node
results. e.g. url method, node_type, path, pk, query_string, url,
url_root,
etc.
:param node_type: url requested fot the type of the node
:param url: web url
:param response: url response
:param uuid: url requested for the node pk
"""
path, query_string = self.split_path(url)
self.assertEqual(response['method'], 'GET')
self.assertEqual(response['resource_type'], node_type)
self.assertEqual(response['path'], path)
self.assertEqual(response['id'], uuid)
self.assertEqual(response['query_string'], query_string)
self.assertEqual(response['url'], f'http://localhost{url}')
self.assertEqual(response['url_root'], 'http://localhost/')
# node details and list with limit, offset, page, perpage
def process_test(
self,
entity_type,
url,
full_list=False,
empty_list=False,
expected_list_ids=None,
expected_range=None,
expected_errormsg=None,
uuid=None,
result_node_type=None,
result_name=None
):
# pylint: disable=too-many-arguments
"""
Check whether response matches expected values.
:param entity_type: url requested for the type of the node
:param url: web url
:param full_list: if url is requested to get full list
:param empty_list: if the response list is empty
:param expected_list_ids: list of expected ids from data
:param expected_range: [start, stop] range of expected ids from data
:param expected_errormsg: expected error message in response
:param uuid: url requested for the node pk
:param result_node_type: node type in response data
:param result_name: result name in response e.g. incoming, outgoing
"""
if expected_list_ids is None:
expected_list_ids = []
if expected_range is None:
expected_range = []
if result_node_type is None and result_name is None:
result_node_type = entity_type
result_name = entity_type
url = self._url_prefix + url
with self.app.test_client() as client:
rv_response = client.get(url)
response = json.loads(rv_response.data)
if expected_errormsg:
self.assertEqual(response['message'], expected_errormsg)
else:
if full_list:
expected_data = self._dummy_data[result_node_type]
elif empty_list:
expected_data = []
elif expected_list_ids:
expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids]
elif expected_range != []:
expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]]
else:
from aiida.common.exceptions import InputValidationError
raise InputValidationError('Pass the expected range of the dummydata')
expected_node_uuids = [node['uuid'] for node in expected_data]
result_node_uuids = [node['uuid'] for node in response['data'][result_name]]
self.assertEqual(expected_node_uuids, result_node_uuids)
self.compare_extra_response_data(entity_type, url, response, uuid)
class RESTApiTestSuite(RESTApiTestCase):
# pylint: disable=too-many-public-methods
"""
Define unittests for rest api
"""
############### generic endpoints ########################
def test_server(self):
"""
Test that /server endpoint returns AiiDA version
"""
url = f'{self.get_url_prefix()}/server'
from aiida import __version__
with self.app.test_client() as client:
response = client.get(url)
data = json.loads(response.data)['data']
self.assertEqual(__version__, data['AiiDA_version'])
self.assertEqual(self.get_url_prefix(), data['API_prefix'])
def test_base_url(self):
"""
Test that / returns list of endpoints
"""
with self.app.test_client() as client:
data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data']
data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data']
self.assertTrue(len(data_base['available_endpoints']) > 0)
self.assertDictEqual(data_base, data_server)
def test_cors_headers(self):
"""
Test that REST API sets cross-origin resource sharing headers
"""
url = f'{self.get_url_prefix()}/server'
with self.app.test_client() as client:
response = client.get(url)
headers = response.headers
self.assertEqual(headers.get(ACL_ORIGIN), '*')
############### computers endpoint ########################
def test_computers_details(self):
"""
Requests the details of single computer
"""
node_uuid = self.get_dummy_data()['computers'][1]['uuid']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid
)
def test_computers_list(self):
"""
Get the full list of computers from database
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_list_limit_offset(self):
"""
Get the list of computers from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4]
)
def test_computers_list_limit_only(self):
"""
Get the list of computers from database using limit
parameter.
It should return the no of rows specified in limit from
database.
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2])
def test_computers_list_offset_only(self):
"""
Get the list of computers from database using offset
parameter
It should return all the rows from database starting from
the no. specified in offset
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None])
def test_computers_list_limit_offset_perpage(self):
"""
If we pass the limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error
)
def test_computers_list_page_limit_offset(self):
"""
If we use the page, limit and offset at same time, it
would return the error message.
"""
expected_error = 'requesting a specific page is incompatible with ' \
'limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error
)
def test_complist_pagelimitoffset_perpage(self):
"""
If we use the page, limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self,
'computers',
'/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id',
expected_errormsg=expected_error
)
def test_computers_list_page_default(self):
"""
it returns the no. of rows defined as default perpage option
from database.
no.of pages = total no. of computers in database / perpage
"/page" acts as "/page/1?perpage=default_value"
"""
RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True)
def test_computers_list_page_perpage(self):
"""
no.of pages = total no. of computers in database / perpage
Using this formula it returns the no. of rows for requested page
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2]
)
def test_computers_list_page_perpage_exceed(self):
"""
no.of pages = total no. of computers in database / perpage
If we request the page which exceeds the total no. of pages then
it would return the error message.
"""
expected_error = 'Non existent page requested. The page range is [1 : ' \
'3]'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error
)
############### list filters ########################
def test_computers_filter_id1(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_id2(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id > 2)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None]
)
def test_computers_filter_pk(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_name(self):
"""
Add filter for the name of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?name="test1"', expected_list_ids=[1])
def test_computers_filter_hostname(self):
"""
Add filter for the hostname of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?hostname="test1.epfl.ch"', expected_list_ids=[1])
def test_computers_filter_transport_type(self):
"""
Add filter for the transport_type of computer and get the filtered
computer
list
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?transport_type="local"&name="test3"&orderby=+id', expected_list_ids=[3]
)
############### list orderby ########################
def test_computers_orderby_id_asc(self):
"""
Returns the computers list ordered by "id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True)
def test_computers_orderby_id_asc_sign(self):
"""
Returns the computers list ordered by "+id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_orderby_id_desc(self):
"""
Returns the computers list ordered by "id" in descending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_name_asc(self):
"""
Returns the computers list ordered by "name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_asc_sign(self):
"""
Returns the computers list ordered by "+name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_desc(self):
"""
Returns the computers list ordered by "name" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1]
)
def test_computers_orderby_scheduler_type_asc(self):
"""
Returns the computers list ordered by "scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_comp_orderby_scheduler_ascsign(self):
"""
Returns the computers list ordered by "+scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=+scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_computers_orderby_schedulertype_desc(self):
"""
Returns the computers list ordered by "scheduler_type" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?pk>{str(node_pk)}&transport_type=\"ssh\"&orderby=-scheduler_type",
expected_list_ids=[2, 4, 1]
)
############### list orderby combinations #######################
def test_computers_orderby_mixed1(self):
"""
Returns the computers list first order by "transport_type" in
ascending order and if it is having same transport_type, order it
by "id"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=transport_type,id',
expected_list_ids=[3, 1, 2, 4]
)
def test_computers_orderby_mixed2(self):
"""
Returns the computers list first order by "scheduler_type" in
descending order and if it is having same scheduler_type, order it
by "name"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name',
expected_list_ids=[2, 3, 4, 1]
)
def test_computers_orderby_mixed3(self):
"""
Returns the computers list first order by "scheduler_type" in
ascending order and if it is having same scheduler_type, order it
by "hostname" descending order
Response::
test4 slurm
test3 slurm
test2 torque
test1 pbspro
localhost pbspro
==========
Expected::
test1 pbspro
localhost pbspro
test4 slurm
test3 slurm
test2 torque
test1 test4
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type,
-hostname",
expected_list_ids=[1,0,4,3,2])
"""
############### list filter combinations #######################
def test_computers_filter_mixed1(self):
"""
Add filter for the hostname and id of computer and get the
filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f"/computers?id>{str(node_pk)}&hostname=\"test1.epfl.ch\"", expected_list_ids=[1]
)
def test_computers_filter_mixed2(self):
"""
Add filter for the id, hostname and transport_type of the computer
and get the filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>{str(node_pk)}&hostname=\"test3.epfl.ch\"&transport_type=\"ssh\"",
empty_list=True
)
############### list all parameter combinations #######################
def test_computers_mixed1(self):
"""
url parameters: id, limit and offset
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4]
)
def test_computers_mixed2(self):
"""
url parameters: id, page, perpage
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4]
)
def test_computers_mixed3(self):
"""
url parameters: id, transport_type, orderby
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>={str(node_pk)}&transport_type=\"ssh\"&orderby=-id&limit=2",
expected_list_ids=[4, 2]
)
########## pass unknown url parameter ###########
def test_computers_unknown_param(self):
"""
url parameters: id, limit and offset
from aiida.common.exceptions import InputValidationError
RESTApiTestCase.node_exception(self, "/computers?aa=bb&id=2", InputValidationError)
"""
############### calculation retrieved_inputs and retrieved_outputs #############
def test_calculation_retrieved_inputs(self):
"""
Get the list of given calculation retrieved_inputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}])
def test_calculation_retrieved_outputs(self):
"""
Get the list of given calculation retrieved_outputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}])
############### calculation incoming #############
def test_calculation_inputs(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f'/nodes/{str(node_uuid)}/links/incoming?orderby=id',
expected_list_ids=[5, 3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_input_filters(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f"/nodes/{str(node_uuid)}/links/incoming?node_type=\"data.dict.Dict.\"",
expected_list_ids=[3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_iotree(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(len(response['data']['nodes']), 1)
self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1)
self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1)
self.assertEqual(len(response['data']['metadata']), 1)
expected_attr = [
'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing'
]
received_attr = response['data']['nodes'][0].keys()
for attr in expected_attr:
self.assertIn(attr, received_attr)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation attributes #############
def test_calculation_attributes(self):
"""
Get list of calculation attributes
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], attributes)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_contents_attributes_filter(self):
"""
Get list of calculation attributes with filter attributes_filter
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\"attr1\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], {'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation node attributes filter #############
def test_calculation_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['attributes'], attributes)
############### calculation node extras_filter #############
def test_calculation_extras_filter(self):
"""
Get the list of given calculation extras filtered
"""
extras = {'extra1': False, 'extra2': 'extra_info'}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1'])
self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2'])
############### structure node attributes filter #############
def test_structure_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
cell = [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell)
############### node attributes_filter with pagination #############
def test_node_attributes_filter_pagination(self):
"""
Check that node attributes specified in attributes_filter are
returned as a dictionary when pagination is set
"""
expected_attributes = ['resources', 'cell']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('attributes', node)
self.assertNotIn('attributes.resources', node)
self.assertNotIn('attributes.cell', node)
self.assertEqual(len(node['attributes']), len(expected_attributes))
for attr in expected_attributes:
self.assertIn(attr, node['attributes'])
############### node get one attributes_filter with pagination #############
def test_node_single_attributes_filter(self):
"""
Check that when only one node attribute is specified in attributes_filter
only this attribute is returned as a dictionary when pagination is set
"""
expected_attribute = ['resources']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['attributes'].keys()), expected_attribute)
############### node extras_filter with pagination #############
def test_node_extras_filter_pagination(self):
"""
Check that node extras specified in extras_filter are
returned as a dictionary when pagination is set
"""
expected_extras = ['extra1', 'extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('extras', node)
self.assertNotIn('extras.extra1', node)
self.assertNotIn('extras.extra2', node)
self.assertEqual(len(node['extras']), len(expected_extras))
for extra in expected_extras:
self.assertIn(extra, node['extras'])
############### node get one extras_filter with pagination #############
def test_node_single_extras_filter(self):
"""
Check that when only one node extra is specified in extras_filter
only this extra is returned as a dictionary when pagination is set
"""
expected_extra = ['extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['extras'].keys()), expected_extra)
############### node full_type filter #############
def test_nodes_full_type_filter(self):
"""
Get the list of nodes filtered by full_type
"""
expected_node_uuids = []
for calc in self.get_dummy_data()['calculations']:
if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.':
expected_node_uuids.append(calc['uuid'])
url = f"{self.get_url_prefix()}/nodes/?full_type=\"process.calculation.calcjob.CalcJobNode.|\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
for node in response['data']['nodes']:
self.assertIn(node['uuid'], expected_node_uuids)
############### Structure visualization and download #############
def test_structure_derived_properties(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(
response['data']['derived_properties']['dimensionality'], {
'dim': 3,
'value': 8.0,
'label': 'volume'
}
)
self.assertEqual(response['data']['derived_properties']['formula'], 'Ba')
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_structure_download(self):
"""
Test download of structure file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf'
with self.app.test_client() as client:
rv_obj = client.get(url)
structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, structure_data)
def test_cif(self):
"""
Test download of cif file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['cifdata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif'
with self.app.test_client() as client:
rv_obj = client.get(url)
cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, cif)
############### projectable_properties #############
def test_projectable_properties(self):
"""
test projectable_properties endpoint
"""
for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']:
url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type']
# check fields
for _, pinfo in response['data']['fields'].items():
available_keys = pinfo.keys()
for prop in expected_keys:
self.assertIn(prop, available_keys)
# check order
available_properties = response['data']['fields'].keys()
for prop in response['data']['ordering']:
self.assertIn(prop, available_properties)
def test_node_namespace(self):
"""
Test the rest api call to get list of available node namespace
"""
url = f'{self.get_url_prefix()}/nodes/full_types'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type']
response_keys = response['data'].keys()
for dkay in expected_data_keys:
self.assertIn(dkay, response_keys)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response)
def test_comments(self):
"""
Get the node comments
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)['data']['comments']
all_comments = []
for comment in response:
all_comments.append(comment['message'])
self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.']))
def test_repo(self):
"""
Test to get repo list or repo file contents for given node
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\"calcjob_inputs\""
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}])
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\"calcjob_inputs/aiida.in\""
with self.app.test_client() as client:
response_obj = client.get(url)
input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb')
self.assertEqual(response_obj.data, input_file)
def test_process_report(self):
"""
Test process report
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
expected_keys = response['data'].keys()
for key in ['logs']:
self.assertIn(key, expected_keys)
expected_log_keys = response['data']['logs'][0].keys()
for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']:
self.assertIn(key, expected_log_keys)
def test_download_formats(self):
"""
test for download format endpoint
"""
url = f'{self.get_url_prefix()}/nodes/download_formats'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']:
self.assertIn(key, response['data'].keys())
for key in ['cif', 'xsf', 'xyz']:
self.assertIn(key, response['data']['data.structure.StructureData.|'])
self.assertIn('cif', response['data']['data.cif.CifData.|'])
| 40.186026 | 117 | 0.583878 | 45,065 | 0.979397 | 0 | 0 | 7,022 | 0.152609 | 0 | 0 | 19,392 | 0.421446 |
8a7d500dd98fa04ac32ae6b712ad22a261bd4d52 | 3,644 | py | Python | processmonitor.py | yletallec/processmonitor | 95db3416ec35fcb1325a1ac6c5a26807e4c3a474 | [
"MIT"
] | null | null | null | processmonitor.py | yletallec/processmonitor | 95db3416ec35fcb1325a1ac6c5a26807e4c3a474 | [
"MIT"
] | null | null | null | processmonitor.py | yletallec/processmonitor | 95db3416ec35fcb1325a1ac6c5a26807e4c3a474 | [
"MIT"
] | null | null | null | """Process Monitor
Usage:
processmonitor.py <process_name> <overall_duration> [<sampling_interval>]
processmonitor.py -h|--help
processmonitor.py -v|--version
Options:
<process_name> Process name argument.
<overall_duration> Overall duration of the monitoring in seconds.
<sampling_interval> Sampling interval in seconds (optional, default 5).
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
from utils import string_to_integer
from process import Process
from threading import Event, Thread
from datetime import datetime
import os
import sys
import csv
import time
from enum import IntEnum
class ExitStatus(IntEnum):
OK = 0
BAD_DURATION = 1
BAD_INTERVAL = 2
INTERVAL_GT_DURATION = 3
def call_repeatedly(interval, func, *args):
stopped = Event()
def loop():
iteration = 1
while not stopped.wait(interval - time.time() % interval):
func(*args, iteration)
iteration = iteration + 1
Thread(target=loop).start()
return stopped.set
def print_average():
cpu_avg, mem_avg, files_avg = Process.metrics_average()
if cpu_avg != None and mem_avg != None and files_avg != None:
print(f"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}")
return True
return False
def generate_report(name, duration, interval):
if len(Process.metrics) == 0:
return False
ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv"
with open(f"{filename}", mode='w') as report:
writer = csv.writer(report, delimiter=',')
writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES'])
iteration = 1
for metric in Process.metrics:
writer.writerow([
iteration,
metric.cpu,
metric.mem,
metric.files])
iteration = iteration + 1
reportpath = f"./{filename}"
print(f"Metrics report: {reportpath}")
return True
def raise_memory_leak_warning(name):
if (Process.has_memory_leaks(name)):
print(f"WARNING: possible memory leaks detected for process \'{name}\'")
return True
return False
def main():
args = docopt(__doc__, version='Process Monitor 1.0')
if not args['<sampling_interval>']:
args['<sampling_interval>'] = 5
name = args['<process_name>']
try:
duration = string_to_integer(args['<overall_duration>'])
except:
print("duration parameter is not an integer")
return ExitStatus.BAD_DURATION
try:
interval = string_to_integer(args['<sampling_interval>'])
except:
print("interval parameter is not an integer")
return ExitStatus.BAD_INTERVAL
if interval > duration:
print("interval parameter is greater than duration parameter")
return ExitStatus.INTERVAL_GT_DURATION
print("---------------------------------------------")
print(" Process Monitor")
print("---------------------------------------------")
print(f"Monitoring process \'{name}\' every {interval} sec for {duration} sec")
cancel_future_calls = call_repeatedly(interval, Process.monitor, name)
time.sleep(duration)
cancel_future_calls()
print_average()
generate_report(name, duration, interval)
raise_memory_leak_warning(name)
return ExitStatus.OK
def init():
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append('-h')
sys.exit(main())
init()
| 31.145299 | 94 | 0.630626 | 108 | 0.029638 | 0 | 0 | 0 | 0 | 0 | 0 | 1,230 | 0.337541 |
8a7d668b99ceea74e75c844a87347ac04ef02b71 | 6,740 | py | Python | Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | 2 | 2020-12-05T07:42:55.000Z | 2021-01-06T23:23:18.000Z | Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py | Tim232/Python-Things | 05f0f373a4cf298e70d9668c88a6e3a9d1cd8146 | [
"MIT"
] | null | null | null | import os
import re
import numpy as np
from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import *
class DataLoader:
# todo train/test/validation => (클래스 당 500/50/50)
def __init__(self):
self.image_width = flags.FLAGS.image_width
self.image_height = flags.FLAGS.image_height
self.batch_size = flags.FLAGS.batch_size
self.data_path = flags.FLAGS.data_path
self.img_reg = re.compile('.*\\.jpeg', re.IGNORECASE)
self.init_class()
self.init_annotation()
def init_class(self):
self.cls = {}
for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))):
self.cls[dir] = idx
def init_annotation(self):
self.anno = {}
for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')):
filename, label, *_ = line.split('\t')
self.anno[filename] = label
def init_train(self):
train_x, train_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')):
for file in files:
if self.img_reg.match(file):
train_x.append(os.path.join(path, file))
train_y.append(self.cls[re.match('(.+)\\_\d+\\.jpeg', file, re.IGNORECASE).group(1)])
self.train_len = len(train_y)
#todo train data random sort
random_sort = np.random.permutation(self.train_len)
train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) => Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x')
self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y')
def init_validation(self):
valid_x, valid_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')):
for file in files:
if self.img_reg.match(file):
valid_x.append(os.path.join(path, file))
valid_y.append(self.cls[self.anno[file]])
self.valid_len = len(valid_y)
#todo validataion data random sort
random_sort = np.random.permutation(self.valid_len)
valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x')
self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y')
def init_test(self):
test_x = []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')):
for file in files:
test_x.append(os.path.join(path, file))
self.test_len = len(test_x)
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x')
def train_normal(self, x, y):
with tf.variable_scope(name_or_scope='train_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def train_random_crop(self, x, y):
with tf.variable_scope(name_or_scope='train_random_crop'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]], name='padding')
# x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3))
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def valid_normal(self, x, y):
with tf.variable_scope(name_or_scope='valid_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def test_normal(self, x):
with tf.variable_scope(name_or_scope='test_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x
def dataset_batch_loader(self, dataset, ref_func, name):
with tf.variable_scope(name_or_scope=name):
dataset_map = dataset.map(ref_func).batch(self.batch_size)
iterator = dataset_map.make_one_shot_iterator()
batch_input = iterator.get_next()
return batch_input
def train_loader(self):
with tf.variable_scope('train_loader'):
'''
repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수
shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한 random sort)
'''
dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch')
random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch')
return normal_batch, random_crop_batch
def valid_loader(self):
with tf.variable_scope('valid_loader'):
dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch')
return normal_batch
def test_loader(self):
with tf.variable_scope('test_loader'):
dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch')
return normal_batch | 41.863354 | 145 | 0.616914 | 6,774 | 0.982024 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.156712 |
8a7d81f9fd3f30534398ff05abd7412a6f78b709 | 4,035 | py | Python | MarkReport/MarkReport.py | dedukun/MarkReport | 2d92c87a69db5868d14b7a59e815b9ee72d439f9 | [
"MIT"
] | null | null | null | MarkReport/MarkReport.py | dedukun/MarkReport | 2d92c87a69db5868d14b7a59e815b9ee72d439f9 | [
"MIT"
] | null | null | null | MarkReport/MarkReport.py | dedukun/MarkReport | 2d92c87a69db5868d14b7a59e815b9ee72d439f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Command line flags
import os
import glob
import re
import pyinotify
import subprocess
from sys import stdout, stderr
from time import time, sleep
from tempfile import gettempdir
from distutils.dir_util import copy_tree
from shutil import copyfile
from weasyprint import HTML
import argparse
parser = argparse.ArgumentParser(
description='Converts Markdown to elegant PDF reports')
parser.add_argument('--basic', dest='basic', action='store_true',
help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)')
parser.add_argument('--watch', dest='watch', action='store_true',
help='Watch the current folder for changes and rebuild automatically')
parser.add_argument('--quiet', dest='quiet', action='store_true',
help='Do not output any information')
parser.add_argument("--timeout", type=int, default=2,
help='Page generation timeout')
parser.add_argument("--base-html", type=str, default="",
help='The path to the base HTML file')
parser.set_defaults(watch=False)
args = parser.parse_args()
# Check directory
ok = False
for file in os.listdir("."):
if file.endswith(".md"):
ok = True
break
if not ok:
stderr.write("No markdown file found in the current folder")
exit(1)
if args.base_html != "":
if not os.path.isfile(args.base_html):
stderr.write("The given base HTML file doesn't exist")
exit(1)
script_path = os.path.dirname(os.path.realpath(__file__))
# Temp dir
timestamp = str(int(time()))
tmp_dir = gettempdir() + "/" + timestamp + "_md-report/"
os.makedirs(tmp_dir, exist_ok=True)
# Headless browser
if not args.basic:
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = Options()
options.headless = True
options.log.level = "trace"
d = DesiredCapabilities.FIREFOX
d['loggingPrefs'] = {'browser': 'ALL'}
driver = webdriver.Firefox(options=options, capabilities=d)
driver.set_page_load_timeout(args.timeout)
prev_compile_time = 0
def recompile(notifier):
if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")):
return
global prev_compile_time
if time() - prev_compile_time < 1:
return
prev_compile_time = time()
if not args.quiet:
stdout.write("\rBuilding the PDF file...")
stdout.flush()
files = glob.glob(tmp_dir + '/*.md')
for f in files:
os.remove(f)
if args.base_html == "":
copyfile(script_path + "/base.html", tmp_dir + "/base.html")
else:
copyfile(args.base_html, tmp_dir + "/base.html")
if not os.path.islink(tmp_dir + "/src"):
os.symlink(script_path + "/src", tmp_dir + "/src")
copy_tree(".", tmp_dir)
# Markdown parsing
subprocess.check_output(script_path + "/md-parsing " +
tmp_dir, shell=True).decode('utf-8')
html_file_name = tmp_dir + "output.html"
# Interpret JS code
if not args.basic:
driver.get("file:///" + html_file_name)
sleep(2)
elem = driver.find_element_by_xpath("//*")
interpreted_html = elem.get_attribute("outerHTML")
with open(html_file_name, "w") as html_out_file:
html_out_file.write(interpreted_html)
# Create final PDF file
pdf = HTML(html_file_name).write_pdf()
f = open("output.pdf", 'wb')
f.write(pdf)
if not args.quiet:
stdout.write("\rDone. ")
stdout.flush()
recompile(None)
if not args.watch:
if not args.basic:
driver.quit()
exit(0)
watch_manager = pyinotify.WatchManager()
event_notifier = pyinotify.Notifier(watch_manager, recompile)
watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True)
event_notifier.loop()
if not args.basic:
driver.quit()
| 27.827586 | 105 | 0.662949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 890 | 0.22057 |
8a7e18d0d0b30bb03c5125997bb7d29ab2737184 | 902 | py | Python | DFS/13023.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | DFS/13023.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | DFS/13023.py | kjh9267/BOJ_Python | b4d2ae09c252cc9280df93ccecbd07880947827e | [
"Apache-2.0"
] | null | null | null | # https://www.acmicpc.net/problem/13023
import sys
sys.setrecursionlimit(999999999)
def dfs_all():
is_possible = [False]
for node in range(N):
visited = [False for _ in range(N)]
dfs(node, 0, visited, is_possible)
if is_possible[0]:
return 1
return 0
def dfs(cur, depth, visited, is_possible):
if visited[cur]:
return
if depth == target_depth:
is_possible[0] = True
return
visited[cur] = True
for nxt in graph[cur]:
dfs(nxt, depth + 1, visited, is_possible)
visited[cur] = False
if __name__ == '__main__':
input = __import__('sys').stdin.readline
target_depth = 4
N, M = map(int, input().split())
graph = [list() for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
print(dfs_all())
| 19.191489 | 49 | 0.578714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.059867 |
8a7ecd71a92cf19cd5b6422ac30a671d4195653c | 1,358 | py | Python | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 68 | 2017-08-02T14:22:59.000Z | 2022-02-19T05:27:42.000Z | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 6 | 2017-08-05T18:30:14.000Z | 2017-08-22T19:54:53.000Z | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 15 | 2017-08-22T16:04:26.000Z | 2022-03-13T10:36:02.000Z | """bst: BigchainDB Sharing Tools"""
from setuptools import setup, find_packages
install_requires = [
'base58~=0.2.2',
'PyNaCl~=1.1.0',
'bigchaindb-driver',
'click==6.7',
'colorama',
]
setup(
name='bst',
version='0.1.0',
description='bst: BigchainDB Sharing Tools',
long_description=(
'A collection of scripts with different patterns to share'
'private data on BigchainDB.'),
url='https://github.com/vrde/bst/',
author='Alberto Granzotto',
author_email='[email protected]',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'bst=bst.cli:main'
],
},
install_requires=install_requires
)
| 26.115385 | 74 | 0.594993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.59352 |
8a7f754432204bffd274f53972f0d99bc17086e5 | 118 | py | Python | polyaxon/db/admin/job_resources.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/db/admin/job_resources.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/db/admin/job_resources.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | from django.contrib import admin
from db.models.job_resources import JobResources
admin.site.register(JobResources)
| 19.666667 | 48 | 0.847458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8a7f7c81cefa2649d2218e763e7fb484932406a9 | 8,498 | py | Python | voting_ml/main.py | tommy-waltmann/voting-ml | 327de4515d8f2f7b8e072833df20eca651621ea6 | [
"BSD-3-Clause"
] | null | null | null | voting_ml/main.py | tommy-waltmann/voting-ml | 327de4515d8f2f7b8e072833df20eca651621ea6 | [
"BSD-3-Clause"
] | 2 | 2021-04-20T19:04:36.000Z | 2021-04-24T22:33:47.000Z | voting_ml/main.py | tommy-waltmann/voting-ml | 327de4515d8f2f7b8e072833df20eca651621ea6 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import sklearn
import subprocess
from sklearn import model_selection, tree
import data
import feature_selection
import model_sel
import os
import matplotlib.pyplot as plt
import seaborn as sns
def main():
#parameter space
list_test_size = [0.1,0.15,0.2] # decide this
list_ftsel_method = ['chi2','mutlinfo','pca','dt']
list_num_features = [10,15,20] # decide this
list_Kfold = [3,5]
list_corr_threshold = [1,0.5,0.6,0.7] # decide this
param_space = {
'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 7],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [2, 5, 10],
'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15],
}
repeat = 1
#output dictrionary list
list_output_dict = []
# output directory path
outdir = "../results/run1/"
if(not os.path.isdir(outdir)):
os.mkdir(outdir)
o_models_file = open(outdir+"models.csv","w")
o_models_file.write("test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\n")
#splitting data and weights into train, test (refer to optimal_params.py)
poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False)
acc = []
'''refer to optimal_params.py. Functions from this python scripts are transferred here. (get_bad_questions() and separate_weights().)'''
for ts in list_test_size:
for run_num in range(repeat):
all_data, all_data_questions = poll_data.all_data_except(get_bad_questions())
X = all_data[:, :-1]
y = all_data[:, -1]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y,
test_size=ts,
shuffle=True)
X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1])
X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1])
print("Number of Training Samples:", len(X_train))
print("Number of Testing Samples:", len(X_test))
data_dict = {
'X_train': X_train,
'X_test': X_test,
'y_train': y_train,
'y_test': y_test
}
weights_dict = {
'weights_train': weights_train,
'weights_test': weights_test}
for meth in list_ftsel_method:
'''Create class objects of the current selection method'''
for thres in list_corr_threshold:
data_ranked_dict, ranked_questions = {}, []
ftsel_obj =None
if(meth=='chi2'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_chi2"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres)
elif(meth=='mutlinfo'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_mutlinfo"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres)
elif(meth=='pca'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_pca"
)
data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict)
fts = data_sel_dict['X_train'].shape[1]
questions_int = list(map(str, list(range(1,fts+1,1))))
ranked_questions = ["ft_"+x for x in questions_int]
elif(meth=='dt'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_dt"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres)
for num in list_num_features:
data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions)
ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions)
for K in list_Kfold:
'''Here create a class onject of "model_sel" and output all the best parameters and values into "list_output_dict". Then, can create a .csv file to list all the models and accuracies.'''
model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model()
# intermediate = model_obj.select_model()
acc.append(model_obj['test_acc'])
o_models_file.write(str(ts)+",")
o_models_file.write(str(run_num)+",")
o_models_file.write(meth+",")
o_models_file.write(str(K)+",")
o_models_file.write(str(num)+",")
o_models_file.write(str(thres)+",")
for ii in range(len(model_obj['best_features'])):
o_models_file.write(model_obj['best_features'][ii]+" ")
o_models_file.write(",")
o_models_file.write(model_obj['best_params']['criterion']+",")
o_models_file.write(str(model_obj['best_params']['max_depth'])+",")
o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_split'])+",")
o_models_file.write(str(model_obj['train_acc'])+",")
o_models_file.write(str(model_obj['test_acc'])+",")
o_models_file.write("\n")
list_output_dict.append(model_obj)
'''Once all the models are run, select the model with best test accuracy and return the output dict for that model.'''
o_models_file.close()
best_index = np.argmax(acc)
best_model_dict = list_output_dict[best_index]
print("The best model parameters:")
print(best_model_dict)
def get_bad_questions():
f = open("../extern/manage_data/list_unnecessary_columns.txt", 'r')
bad_questions = f.readline().split(',')
bad_questions[-1] = bad_questions[-1][:-1] # chop the \n off the end
bad_questions.remove('weight') # need weight for training
return bad_questions
def separate_weights(X_train, column_names):
"""
Removes the column containing weights from X_train, and returns it as
a separate array.
"""
weight_column_idx = column_names.index('weight')
weights = X_train[:, weight_column_idx]
new_X_train = np.delete(X_train, weight_column_idx, axis=1)
new_questions = column_names
new_questions.remove('weight')
return new_X_train, weights, new_questions
if __name__ == "__main__":
main()
| 47.741573 | 222 | 0.564603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,194 | 0.258178 |
8a7f9273d28271b0f56005e762e91504d2293322 | 12,334 | py | Python | src/the_tale/the_tale/game/heroes/tests/test_logic.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/heroes/tests/test_logic.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/heroes/tests/test_logic.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class HeroDescriptionTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def test_no_description(self):
self.assertEqual(logic.get_hero_description(self.hero.id), '')
def test_has_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla')
def test_update_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
logic.set_hero_description(self.hero.id, 'new description')
self.assertEqual(logic.get_hero_description(self.hero.id), 'new description')
class CreateHero(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx',
email='[email protected]',
is_fast=False)
self.attributes = {'is_fast': False,
'is_bot': False,
'might': 0,
'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3),
'premium_state_end_at': datetime.datetime.fromtimestamp(0),
'ban_state_end_at': datetime.datetime.fromtimestamp(0)}
def test_default(self):
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.id, self.account.id)
self.assertEqual(hero.account_id, self.account.id)
self.assertIn(hero.gender, (game_relations.GENDER.MALE,
game_relations.GENDER.FEMALE))
self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration)
self.assertEqual(hero.habit_honor.raw_value, 0)
self.assertEqual(hero.habit_peacefulness.raw_value, 0)
self.assertTrue(hero.preferences.archetype.is_NEUTRAL)
self.assertTrue(hero.upbringing.is_PHILISTINE)
self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS)
self.assertTrue(hero.death_age.is_MATURE)
def test_account_attributes_required(self):
for attribute in self.attributes.keys():
with self.assertRaises(exceptions.HeroAttributeRequiredError):
logic.create_hero(account_id=self.account.id,
attributes={key: value for key, value in self.attributes.items() if key != attribute })
def test_account_attributes(self):
attributes = {'is_fast': random.choice((True, False)),
'is_bot': random.choice((True, False)),
'might': random.randint(1, 1000),
'active_state_end_at': datetime.datetime.fromtimestamp(1),
'premium_state_end_at': datetime.datetime.fromtimestamp(2),
'ban_state_end_at': datetime.datetime.fromtimestamp(3)}
logic.create_hero(account_id=self.account.id, attributes=attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.is_fast, attributes['is_fast'])
self.assertEqual(hero.is_bot, attributes['is_bot'])
self.assertEqual(hero.might, attributes['might'])
self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at'])
self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at'])
self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at'])
def test_attributes(self):
self.attributes.update({'race': game_relations.RACE.random(),
'gender': game_relations.GENDER.random(),
'name': game_names.generator().get_name(game_relations.RACE.random(),
game_relations.GENDER.random()),
'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'archetype': game_relations.ARCHETYPE.random(),
'upbringing': tt_beings_relations.UPBRINGING.random(),
'first_death': tt_beings_relations.FIRST_DEATH.random(),
'death_age': tt_beings_relations.AGE.random()})
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.race, self.attributes['race'])
self.assertEqual(hero.gender, self.attributes['gender'])
self.assertEqual(hero.utg_name, self.attributes['name'])
self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness'])
self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor'])
self.assertEqual(hero.preferences.archetype, self.attributes['archetype'])
self.assertEqual(hero.upbringing, self.attributes['upbringing'])
self.assertEqual(hero.first_death, self.attributes['first_death'])
self.assertEqual(hero.death_age, self.attributes['death_age'])
class RegisterSpendingTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
self.hero.premium_state_end_at
game_tt_services.debug_clear_service()
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_not_in_place(self):
self.hero.position.set_position(0, 0)
self.assertEqual(self.hero.position.place_id, None)
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False)
def test_can_not_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 100)
self.assertTrue(impacts[0].target_type.is_PLACE)
self.assertEqual(impacts[0].target_id, self.places[0].id)
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power__below_zero(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
logic.register_spending(self.hero, -50)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 150)
class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin,
utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def place_0_cost(self):
return logic.get_places_path_modifiers(self.hero)[self.places[0].id]
def test_every_place_has_modifier(self):
modifiers = logic.get_places_path_modifiers(self.hero)
self.assertEqual(set(modifiers.keys()), {place.id for place in self.places})
def test_race_bonus(self):
self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,))
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].race = self.hero.race
def test_modifier_bonus(self):
self.assertFalse(self.places[0].is_modifier_active())
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT)
self.create_effect(self.places[0].id,
value=100500,
attribute=places_relations.ATTRIBUTE.MODIFIER_FORT,
delta=0)
self.places[0].refresh_attributes()
self.assertTrue(self.places[0].is_modifier_active())
def test_home_place(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0])
def test_friend(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0])
def test_enemy(self):
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0])
def test_tax(self):
self.places[0].attrs.size = 10
self.places[0].refresh_attributes()
self.assertEqual(self.places[0].attrs.tax, 0)
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.create_effect(self.places[0].id,
value=100,
attribute=places_relations.ATTRIBUTE.TAX,
delta=0)
self.places[0].refresh_attributes()
HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA),
(-1, 0, 0),
(-1, +1, +c.PATH_MODIFIER_MINOR_DELTA),
( 0, -1, 0),
( 0, 0, 0),
( 0, +1, 0),
(+1, -1, +c.PATH_MODIFIER_MINOR_DELTA),
(+1, 0, 0),
(+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)]
def test_habits__honor(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_honor.set_habit(0)
self.hero.habit_honor.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER)
def test_habits__peacefulness(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_peacefulness.set_habit(0)
self.hero.habit_peacefulness.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_peacefulness.set_habit(hero_direction * c.HABITS_BORDER)
| 44.688406 | 137 | 0.652749 | 12,279 | 0.995541 | 0 | 0 | 2,019 | 0.163694 | 0 | 0 | 752 | 0.06097 |
8a7fb03f3abaa9ff95210abc3bc840c8008d9076 | 41 | py | Python | tinylinks/tests/test_app/models.py | brad/django-tinylinks | b3ae58ebe0d0292b7f618e9b0f1a08d2fb61b173 | [
"MIT"
] | 11 | 2016-11-27T15:46:42.000Z | 2021-07-31T14:03:54.000Z | tinylinks/tests/test_app/models.py | brad/django-tinylinks | b3ae58ebe0d0292b7f618e9b0f1a08d2fb61b173 | [
"MIT"
] | 2 | 2016-12-27T19:53:59.000Z | 2017-05-26T07:12:02.000Z | tinylinks/tests/test_app/models.py | brad/django-tinylinks | b3ae58ebe0d0292b7f618e9b0f1a08d2fb61b173 | [
"MIT"
] | 5 | 2015-02-01T01:10:31.000Z | 2015-10-29T18:48:59.000Z | """Dummy model needed for tests."""
pass
| 13.666667 | 35 | 0.682927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.853659 |
8a7fb88f2b8f8ab7d00332f23a58d29ccc1392ee | 1,346 | py | Python | postcipes/hydraulic_jump.py | timofeymukha/postcipes | f37b349038e26bb0295a2511295a46ef63fcd851 | [
"MIT"
] | null | null | null | postcipes/hydraulic_jump.py | timofeymukha/postcipes | f37b349038e26bb0295a2511295a46ef63fcd851 | [
"MIT"
] | null | null | null | postcipes/hydraulic_jump.py | timofeymukha/postcipes | f37b349038e26bb0295a2511295a46ef63fcd851 | [
"MIT"
] | 1 | 2019-03-20T22:39:55.000Z | 2019-03-20T22:39:55.000Z | # This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
from scipy.interpolate import interp1d
import numpy as np
import h5py
__all__ = ["HydraulicJump"]
class HydraulicJump(Postcipe):
def __init__(self, path):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.case['alphag'] = 1 - self.case['alpha.waterMean']
self.U = self.case.boundary_data("inlet", sort="y")[1]['UMean'][0, 0]
y_inlet = self.case.boundary_data("inlet", sort="y")[0][:, 1]
inlet_edge_length = tbl.edge_lengths(self.case, "inlet")
self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]
self.Fr1 = self.U/np.sqrt(9.81*self.d)
self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2
self.Fr2 = self.U/np.sqrt(9.81*self.d2)
iso05 = tbl.isoline(self.case, "alpha.waterMean", 0.5)
idx = iso05[:, 0].argsort()
self.xfs = iso05[idx, 0]
self.yfs = iso05[idx, 1]
idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))
self.xtoe = self.xfs[idx_toe]
| 33.65 | 81 | 0.653046 | 904 | 0.67162 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.193908 |
8a80483513e593a3c49ee46795ac3b8d601f6b9a | 416 | py | Python | main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | null | null | null | main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | null | null | null | main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | 1 | 2021-02-26T21:50:29.000Z | 2021-02-26T21:50:29.000Z | from cc3d.core.PySteppables import *
from cc3d import CompuCellSetup
from random import random
class ScreenshotSteppable(SteppableBasePy):
def __init__(self, frequency=10):
SteppableBasePy.__init__(self, frequency)
def step(self, mcs):
if mcs in [3, 5, 19,20, 23, 29, 31]:
self.request_screenshot(mcs=mcs, screenshot_label='Cell_Field_CellField_2D_XY_0')
| 27.733333 | 93 | 0.6875 | 297 | 0.713942 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.072115 |
8a80b1c774bd44450fbb371648857468404e7e42 | 3,350 | py | Python | aesara/gpuarray/optdb.py | anirudhacharya/aesara | cbf91122296b68ee2ad592b2312d56f6ff65ba53 | [
"BSD-3-Clause"
] | 1 | 2021-11-09T10:19:46.000Z | 2021-11-09T10:19:46.000Z | aesara/gpuarray/optdb.py | anirudhacharya/aesara | cbf91122296b68ee2ad592b2312d56f6ff65ba53 | [
"BSD-3-Clause"
] | null | null | null | aesara/gpuarray/optdb.py | anirudhacharya/aesara | cbf91122296b68ee2ad592b2312d56f6ff65ba53 | [
"BSD-3-Clause"
] | null | null | null | from aesara.compile import optdb
from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer
from aesara.graph.optdb import (
EquilibriumDB,
LocalGroupDB,
OptimizationDatabase,
SequenceDB,
)
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
gpu_seqopt = SequenceDB()
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register(
"gpuarray_opt",
gpu_seqopt,
optdb.__position__.get("add_destroy_handler", 49.5) - 1,
"gpuarray",
)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
matrix_ops_db = LocalGroupDB()
matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
matrix_ops_db2.__name__ = "matrix_ops_db2"
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
abstract_batch_norm_groupopt = LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
gpu_optimizer.register(name, local_opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
"""
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
"""
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
if isinstance(local_opt, OptimizationDatabase):
opt = local_opt
else:
opt = local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
optdb.register(
name,
TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace),
60,
"fast_run",
"inplace",
"gpuarray",
*tags,
)
return local_opt
return f
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt("fast_compile")(abstractconv_groupopt)
class GraphToGPUDB(OptimizationDatabase):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
from aesara.gpuarray.opt import GraphToGPU
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
| 28.632479 | 90 | 0.711343 | 414 | 0.123582 | 0 | 0 | 0 | 0 | 0 | 0 | 1,067 | 0.318507 |
8a810acd6b334888a1432a3e590727946894d380 | 4,579 | py | Python | jenkinsapi/node.py | imsardine/jenkinsapi | d4bfac62a4d01394ff41540c4d8d897ab566f4eb | [
"MIT"
] | null | null | null | jenkinsapi/node.py | imsardine/jenkinsapi | d4bfac62a4d01394ff41540c4d8d897ab566f4eb | [
"MIT"
] | null | null | null | jenkinsapi/node.py | imsardine/jenkinsapi | d4bfac62a4d01394ff41540c4d8d897ab566f4eb | [
"MIT"
] | null | null | null | """
Module for jenkinsapi Node class
"""
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import PostRequired
import logging
try:
from urllib import quote as urlquote
except ImportError:
# Python3
from urllib.parse import quote as urlquote
log = logging.getLogger(__name__)
class Node(JenkinsBase):
"""
Class to hold information on nodes that are attached as slaves
to the master jenkins instance
"""
def __init__(self, baseurl, nodename, jenkins_obj):
"""
Init a node object by providing all relevant pointers to it
:param baseurl: basic url for querying information on a node
:param nodename: hostname of the node
:param jenkins_obj: ref to the jenkins obj
:return: Node obj
"""
self.name = nodename
self.jenkins = jenkins_obj
JenkinsBase.__init__(self, baseurl)
def get_jenkins_obj(self):
return self.jenkins
def __str__(self):
return self.name
def is_online(self):
return not self.poll(tree='offline')['offline']
def is_temporarily_offline(self):
return self.poll(tree='temporarilyOffline')['temporarilyOffline']
def is_jnlpagent(self):
return self._data['jnlpAgent']
def is_idle(self):
return self._data['idle']
def set_online(self):
"""
Set node online.
Before change state verify client state: if node set 'offline'
but 'temporarilyOffline' is not set - client has connection problems
and AssertionError raised.
If after run node state has not been changed raise AssertionError.
"""
self.poll()
# Before change state check if client is connected
if self._data['offline'] and not self._data['temporarilyOffline']:
raise AssertionError("Node is offline and not marked as "
"temporarilyOffline, check client "
"connection: offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
elif self._data['offline'] and self._data['temporarilyOffline']:
self.toggle_temporarily_offline()
if self._data['offline']:
raise AssertionError("The node state is still offline, "
"check client connection:"
" offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
def set_offline(self, message="requested from jenkinsapi"):
"""
Set node offline.
If after run node state has not been changed raise AssertionError.
: param message: optional string explain why you are taking this
node offline
"""
if not self._data['offline']:
self.toggle_temporarily_offline(message)
data = self.poll(tree='offline,temporarilyOffline')
if not data['offline']:
raise AssertionError("The node state is still online:" +
"offline = %s , temporarilyOffline = %s" %
(data['offline'],
data['temporarilyOffline']))
def toggle_temporarily_offline(self, message="requested from jenkinsapi"):
"""
Switches state of connected node (online/offline) and
set 'temporarilyOffline' property (True/False)
Calling the same method again will bring node status back.
:param message: optional string can be used to explain why you
are taking this node offline
"""
initial_state = self.is_temporarily_offline()
url = self.baseurl + \
"/toggleOffline?offlineMessage=" + urlquote(message)
try:
html_result = self.jenkins.requester.get_and_confirm_status(url)
except PostRequired:
html_result = self.jenkins.requester.post_and_confirm_status(
url,
data={})
self.poll()
log.debug(html_result)
state = self.is_temporarily_offline()
if initial_state == state:
raise AssertionError(
"The node state has not changed: temporarilyOffline = %s" %
state)
| 37.227642 | 79 | 0.580913 | 4,255 | 0.929242 | 0 | 0 | 0 | 0 | 0 | 0 | 2,028 | 0.442891 |
8a82d93e4ba8abbe55f44853090dbccbc8c6e819 | 48,277 | py | Python | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2
# 06/17/2015 4027 dgilling Perform case-insensitive
# comparisons in foundCTAs.
# 07/13/2015 4648 randerso Fix bullets in follow up products
# 02/24/2016 5411 randerso Make bullet headers upper case
# 07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText
#
##
# This is a base file that is not intended to be overridden.
##
#-------------------------------------------------------------------------
# Description: This product is a template for creating Hazard Products.
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# GenericHazards
#-------------------------------------------------------------------------
# Customization Points:
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
#
# You must set the following:
#
# productName defines name of product e.g. "Zone Forecast Product"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "SFTBOS"
# areaName (opt.) Area name for product header, such as "Western New York"
# wfoCityState City,state that the WFO is located in, such as "Buffalo NY"
#
# Optional Configuration Items
#
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product.
# Product is saved if autoWrite is 1.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database. The product is not
# automatically stored unless autoStore is 1. This
# value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the awips product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN. The product is not
# automatically transmitted unless autoSend is 1.
# This value is also used for the default GUI
# entry for storage.
# autoSend If set to 1, then the product will be automatically
# sent on the AWIPS WAN to the "autoSendAddress" with
# the "awipsWANPil after product creation.
# autoStore If set to 1, then the product will be automatically
# stored into the text database using the "textdbPil"
# after product creation.
# autoWrite If set to 1, then the product will be automatically
# written to the "output" named disk file after
# product creation.
#
# lineLength max length of each line
#
# defaultEditAreas defines edit areas, default is Combinations
#
# purgeTime Maximum number of hours past issuance time for the
# expire time.
# includeCities If 1, cities will be included in the area header
# accurateCities If 1, cities are determined from grids
# citiesPhrase "Including the cities of" phrase used when including
# cities
# includeZoneNames If 1, zone names will be included in the area header
# easPhrase Optional EAS phrase to be include in product header
#
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
# includeOverviewHeadline If 1, the overview header is templated
# includeOverview If 1, the overview section is templated
# bulletProd If 1, the product will use a bullet format
#-------------------------------------------------------------------------
# Weather Elements Needed:
# Hazards
#-------------------------------------------------------------------------
# Edit Areas Needed: None
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations file
#-------------------------------------------------------------------------
# Component Products:
# Hazards
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#-------------------------------------------------------------------------
# Example Output:
#-------------------------------------------------------------------------
import LogStream
import TextRules
import SampleAnalysis
import time, string, types, copy, re
import CallToActions
import AbsTime
class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis,
CallToActions.CallToActions):
Definition = {
"type": "smart",
"displayName": None,
# Source database for product. Can be "Official", "Fcst" or "ISC"
"database": "Official",
# Defines output location of finished product.
"outputFile": "{prddir}/TEXT/genHaz.txt",
"debug": 0,
# Name of map background for creating Combinations
# Can be:
# Zones_BOU
# FireWxZones_BOU
# Counties
# Marine_Zones_BOU
"mapNameForCombinations": "Zones_<site>",
## Edit Areas: Create Combinations file with edit area combinations.
## Can be:
## EditAreas_PublicZones_BOU
## EditAreas_FireWx_BOU
## EditAreas_FIPS_BOU
## EditAreas_MarineZones_BOU
"defaultEditAreas" : "EditAreas_PublicZones_<site>_<MultiPil>",
# product identifiers
"productName": "Generic Hazard Product", # product name
"fullStationID": "<fullStationID>", # full station identifier (4letter)
"wmoID": "<wmoID>", # WMO ID
"pil": "<pil>", # Product pil
"areaName": "", # Name of state, such as "Georgia" -- optional
"wfoCityState": "<wfoCityState>", # Location of WFO - city,state
"textdbPil": "<textdbPil>", # Product ID for storing to AWIPS text database.
"awipsWANPil": "<awipsWANPil>", # Product ID for transmitting to AWIPS WAN.
"periodCombining" : 0, # If 1, combine periods, if possible
# automatic functions
"autoSend": 0, #set to 1 to automatically transmit product
"autoSendAddress": "000", #transmission address
"autoStore": 0, #set to 1 to automatically store product in textDB
"autoWrite": 0, #set to 1 to automatically write product to file
# Area Dictionary -- Descriptive information about zones
"areaDictionary": "AreaDictionary",
# Language
"language": "english",
"lineLength": 66, #Maximum line length
"purgeTime": 8, # Maximum hours for expireTime
"includeCities": 1 , # Cities included in area header
"accurateCities": 0, # Include all cities in area header
"cityLocation": "CityLocation", # City lat/lon dictionary to use
"cityDescriptor":"Including the cities of",
"includeZoneNames":1, # Zone names will be included in the area header
"easPhrase" :"", # Optional EAS phrase to be include in product header
"includeOverviewHeadline": 1, #include overview header
"includeOverview": 1, #include overview section
"bulletProd": 0, # do not default to bullets
"hazardSamplingThreshold": (10, None), #(%cov, #points)
"callToAction": 1,
}
def __init__(self):
TextRules.TextRules.__init__(self)
SampleAnalysis.SampleAnalysis.__init__(self)
self.__overviewText = ""
self.__procCTA = None
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
# Get variables
error = self._getVariables(argDict)
if error is not None:
return error
# Get the segments
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
if len(segmentList) == 0:
return "No hazards to report"
# Determine time ranges
error = self._determineTimeRanges(argDict)
if error is not None:
return error
# Initialize the output string
fcst = ""
fcst = self._preProcessProduct(fcst, argDict)
# Generate the product for each segment in the segmentList
fraction = 0
fractionOne = 1.0/float(len(segmentList))
percent = 50.0
self.setProgressPercentage(50)
for segmentAreas in segmentList:
self.progressMessage(fraction, percent, "Making Product for Segment")
fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict)
fcst = self._makeProduct(fcst, segmentAreas, argDict)
fcst = self._postProcessArea(fcst, segmentAreas, argDict)
fraction = fractionOne
fcst = self._postProcessProduct(fcst, argDict)
return fcst
def _getVariables(self, argDict):
# Make argDict accessible
self.__argDict = argDict
# Get Definition variables
self._definition = argDict["forecastDef"]
for key in self._definition.keys():
exec "self._" + key + "= self._definition[key]"
# Get VariableList
varDict = argDict["varDict"]
for key in varDict.keys():
if type(key) is types.TupleType:
label, variable = key
exec "self._" + variable + "= varDict[key]"
self._language = argDict["language"]
# Set up information for Hazards product
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
return None
def _determineTimeRanges(self, argDict):
# Set up the time range for 0-240 hours
self._timeRange = self.createTimeRange(0, 240)
self._ddhhmmTime = self.getCurrentTime(
argDict, "%d%H%M", shiftToLocal=0, stripLeading=0)
self._issueTime = AbsTime.AbsTime(argDict['creationTime'])
self._currentTime = argDict['creationTime']
self._expireTime = self._issueTime + self._purgeTime*3600
self._timeLabel = self.getCurrentTime(
argDict, "%l%M %p %Z %a %b %e %Y", stripLeading=1)
return None
def _preProcessProduct(self, fcst, argDict):
# Product header
if self._areaName != "":
self._areaName = " for " + self._areaName
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict,
self._productName + self._areaName)
if len(self._easPhrase) != 0:
eas = self._easPhrase + '\n'
else:
eas = ''
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = eas + productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n\n"
fcst = fcst + s
fcst = fcst + "Default overview section\n"
return fcst
def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):
# This is the header for an edit area combination
areaHeader = self.makeAreaHeader(
argDict, "", self._issueTime, expireTime,
self._areaDictionary, None, cityDescriptor=self._cityDescriptor,
areaList=segmentAreas, includeCities=self._includeCities,
includeZoneNames = self._includeZoneNames,
accurateCities = self._accurateCities)
fcst = fcst + areaHeader
return fcst
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
# Generate Narrative Forecast for Edit Area
# get the hazards text
# We only need to get headlines for the first edit area
# in the segment since all areas in the segment have
# the same headlines
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
return fcst + "\n\n$$\n\n"
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
#
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
# finish progress meter
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
return fcst
def allowedHazards(self):
return []
# Added for DR 21194
def _bulletDict(self):
return []
# Added for DR 21309
def _bulletOrder(self):
return []
## Replaced by 21309 code
## def _getBullets(self, newBulletList, argDict):
##
## ### get the bullet dictionary and split the bullets
## bDict = self._bulletDict()
## bLine = bDict.get(eachHazard['phen'])
## print 20* "*" + (eachHazard['phen'])
## bList = newBulletList.split(",")
##
## ### initialize the bullet output
## bullets = ""
##
## ### loop through the bullets and format the output
## for b in bList:
## bullets = bullets + "* " + b + "...|* Enter bullet text *|\n\n"
## # bullets = bullets + "\n"
## return bullets
def _indentBulletText(self, prevText):
print prevText
### if previous text is empty, return nothing
if prevText is None:
return prevText
###
### split the text
###
bullets = []
bullets = string.split(prevText, '\n\n')
if len(bullets) <= 1:
return prevText
###
### process the text
###
outText = ""
for b in bullets:
### if first character is a * we found a bullet
if re.match("\*", b):
### remove line feeds
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
bullet = removeLF.sub(r'\1 \2',b)
### indent code
bullet = self.indentText(bullet, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "..."])
###
### the "-" in the breakStrings line above is causing issues with
### offices that use "-20 degrees" in the text.
###
outText = outText + bullet + "\n\n"
else: ### not a bullet, CTA text
outText = outText + b + "\n\n"
### that's it
print outText
return outText
# The _hazardTimePhrases method is passed a hazard key, and returns
# time phrase wording consistent with that generated by the headline
# algorithms in DiscretePhrases.
#
def hazardTimePhrases(self, hazard, argDict, prefixSpace=True):
timeWords = self.getTimingPhrase(hazard, argDict['creationTime'])
if prefixSpace and len(timeWords):
timeWords = " " + timeWords #add a leading space
return timeWords
#
# The method hazardBodyText creates an attribution phrase
#
def hazardBodyText(self, hazardList, argDict):
bulletProd = self._bulletProd
hazardBodyPhrase = ''
#
# First, sort the hazards for this segment by importance
#
sortedHazardList = []
for each in ['W', 'Y', 'A', 'O', 'S']:
for eachHazard in hazardList:
if eachHazard['sig'] == each:
if eachHazard not in sortedHazardList:
sortedHazardList.append(eachHazard)
#
# Next, break them into individual lists based on action
#
newList = []
canList = []
expList = []
extList = []
conList = []
upgList = []
statementList = []
for eachHazard in sortedHazardList:
if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']:
statementList.append(eachHazard)
elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']:
newList.append(eachHazard)
elif eachHazard['act'] in ['CAN']:
canList.append(eachHazard)
elif eachHazard['act'] in ['EXP']:
expList.append(eachHazard)
elif eachHazard['act'] in ['EXT']:
extList.append(eachHazard)
elif eachHazard['act'] in ['UPG']:
upgList.append(eachHazard)
else:
conList.append(eachHazard)
#
# Now, go through each list and build the phrases
#
nwsIntroUsed = 0
#
# This is for the new hazards
#
phraseCount = 0
lastHdln = None
for eachHazard in newList:
hdln = eachHazard['hdln']
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazNameACap = self.sentence(hazNameA, addPeriod=False)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if hazName in ["Winter Weather Advisory", "Winter Storm Warning", "Beach Hazards Statement"]:
forPhrase = " for |* Enter hazard type *|"
else:
forPhrase =""
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " + self._wfoCity
nwsIntroUsed = 1
if phraseCount == 0:
phraseCount = 1
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + " has issued " + \
hazNameA + ". "
else:
hazardBodyPhrase += " has issued " + hazNameA + forPhrase + \
", which is in effect" + endTimePhrase + ". "
elif phraseCount == 1:
phraseCount = 2
if hdln != lastHdln:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued. This " + hazName + forPhrase + \
" is in effect" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \
" has also been issued" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase += "In addition, " + \
hazNameA + " has been issued."
else:
hazardBodyPhrase += "In addition, " + \
hazNameA + forPhrase + " has been issued. This " + hazName + \
" is in effect" + endTimePhrase + ". "
lastHdln = hdln
#
# This is for the can hazards
#
for eachHazard in canList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " +\
self._wfoCity
nwsIntroUsed = 1
hazardBodyPhrase = hazardBodyPhrase + \
" has cancelled the " + hazName + ". "
else:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" has been cancelled. "
#
# This is for the exp hazards
#
phraseCount = 0
for eachHazard in expList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if eachHazard['endTime'] <= argDict['creationTime']:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
else:
expTimeCurrent = argDict['creationTime']
timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" will expire " + timeWords + ". "
#
# This is for ext hazards
#
for eachHazard in extList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is now in effect" + endTimePhrase + ". "
#
# This is for upgrade hazards
#
for eachHazard in upgList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
#
# This is for con hazards
#
for eachHazard in conList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazardBodyPhrase = hazardBodyPhrase + hazNameA + \
" remains in effect" + endTimePhrase + ". "
#
# This is for statement hazards
#
for eachHazard in statementList:
hazardBodyPhrase = "...|* Add statement headline *|...\n\n"
#
# This adds segment text
#
segmentText = ''
#
# Check that this segment codes to determine capture or not,
# and frame captured text or not
#
incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \
self.useCaptureText(sortedHazardList)
#
#
# Check that the previous text exists
#
foundCTAs = []
for eachHazard in sortedHazardList:
if eachHazard.has_key('prevText'):
prevText = eachHazard['prevText']
if eachHazard['pil'] == 'MWS':
startPara = 0
else:
startPara = 1
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
tester = segmentText[0]
if tester == '*':
startPara = 1
else:
startPara = 2
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
#
# Check that the segment text isn't very short or blank
#
if len(segmentText) < 6:
incTextFlag = 0
# DR 21309 code addition from Middendorf (BYZ)
#
# Now if there is a new hazard and previous segment Text, then
# we may have to add bullets.
#
if incTextFlag and bulletProd:
for eachHazard in sortedHazardList:
if not eachHazard.has_key('prevText'):
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
print "newBullets = ", newBullets
print "segment text is: ", segmentText
for bullet in newBullets:
if re.search("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is None:
print bullet + " not in segmentText"
start = self._bulletOrder().index(bullet) + 1
end = len(self._bulletOrder())
bulletFlag = 1
for i in range(start,end):
if (re.search("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag:
print "* " + self._bulletOrder()[i] + "... found!"
segmentTextSplit = re.split("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\n* " + self._bulletOrder()[i] + "...")
bulletFlag = 0
if bulletFlag:
print "appending to bottom list of bullets!"
segmentTextSplit = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = "\n" + string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\nPRECAUTIONARY/PREPAREDNESS ACTIONS...")
bulletFlag = 0
#
# Now if there is a can/exp hazard and previous segment Text, then
# we may have to remove bullets.
#
if incTextFlag and bulletProd:
# First make list of bullets that we need to keep.
keepBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] not in ["CAN","EXP"]:
saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for saveBullet in saveBullets:
if saveBullet not in keepBulletList:
keepBulletList.append(saveBullet)
# Now determine which bullets we have to remove.
removeBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] in ["CAN","EXP"]:
canBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for canBullet in canBullets:
if canBullet not in keepBulletList and canBullet not in removeBulletList:
removeBulletList.append(canBullet)
print "hazardBodyText info: keepBulletList: ",keepBulletList
print "hazardBodyText info: removeBulletList: ",removeBulletList
# Finally remove the bullets no longer needed.
for bullet in removeBulletList:
if re.search("\* "+ bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None:
segmentTextSplit = re.split("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE)
print "segmentTextSplit is ", segmentTextSplit
segmentTextSplit2 = string.split(segmentTextSplit[1],"*",1)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "*" + segmentTextSplit2[1]
else:
segmentTextSplit2 = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentTextSplit[1], 1, flags=re.IGNORECASE)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "PRECAUTIONARY/PREPAREDNESS ACTIONS..." + segmentTextSplit2[1]
segmentText = string.join(segmentTextSplit,"")
if removeBulletList != []:
segmentText = "|*\n" + segmentText + "*|"
else:
segmentText = segmentText
#
# If segment passes the above checks, add the text
#
print "hazardBodyText info: incTextFlag: ",incTextFlag
if incTextFlag:
print "hazardBodyText info: segmentText: ",segmentText
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + \
segmentText + '\n\n'
elif bulletProd:
bulletFlag = 0
if eachHazard['act'] == 'CAN':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
elif eachHazard['act'] == 'EXP':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
else:
bulletFlag = 1
## print "bulletFlag is: ",bulletFlag
if bulletFlag:
newBulletList = []
bullets = ""
for eachHazard in sortedHazardList:
### get the default bullets for all hazards from the bullet diction
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for newBullet in newBullets:
if newBullet not in newBulletList:
newBulletList.append(newBullet)
print "my bullets are: ", newBulletList
### Determine the correct order for all bullets
bulletOrder = self._bulletOrder()
staticBulletOrder = self._bulletOrder()
for bullet in staticBulletOrder:
print "correct bullet order should be: ", bulletOrder
if bullet not in newBulletList:
bulletOrder.remove(bullet)
print "reordered bullets are: ", bulletOrder
for b in bulletOrder:
bullets = bullets + "* " + b.upper() + "...|* Enter bullet text *|\n\n"
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + bullets
# If segment doesn't pass the checks, put in framing codes
else:
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Statement text goes here *|.\n\n"
# End code for DR 21310
#
# This adds the call to action statements. This is only performed
# if the segment is 'NEW' or if the previous text has been discarded
# due to a CAN/EXP/UPG segment
#
# remove items from forceCTAList if they exist in foundCTAs. Note
# that the formats of these lists are different, thus this code
# is more complicated
for ent in foundCTAs:
#only process CTAs that are vtec phen/sig based
if ent.find('.') == 2:
phensig = (ent[0:2], ent[3]) #phen.sig
if phensig in forceCTAList:
del forceCTAList[forceCTAList.index(phensig)]
hazardBodyPhrase = hazardBodyPhrase + '\n\n'
ctas = []
for (phen,sig) in forceCTAList:
hazardPhenSig = phen + "." + sig
cta = self.defaultCTA(hazardPhenSig)
if cta not in ctas:
ctas.append(cta)
if len(ctas) > 0:
hazardBodyPhrase = hazardBodyPhrase + \
'PRECAUTIONARY/PREPAREDNESS ACTIONS...\n\n'
for c in ctas:
hazardBodyPhrase = hazardBodyPhrase + c + '\n\n'
hazardBodyPhrase = hazardBodyPhrase + '&&\n\n'
# Make sure there is only one CAP tag pairs
hazardBodyPhrase = re.sub(r'&&\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n', \
"", hazardBodyPhrase)
return hazardBodyPhrase
def finalOverviewText(self):
#if didn't calculate any, use the default
if len(self.__overviewText) == 0:
if self._includeOverviewHeadline:
overviewHeadline = "...|*Overview headline (must edit)*|...\n\n"
else:
overviewHeadline = ""
if self._includeOverview:
overviewBody = ".|*Overview (must edit)*|.\n\n"
else:
overviewBody = ""
#assemble the lines
overview = overviewHeadline + overviewBody
return overview
else:
return self.__overviewText
def overviewText(self, hazardList, pil):
#
# This method finds an overview in the previous product
#
overview = ""
for each in hazardList:
if (each.has_key('prevOverviewText') and
each.has_key('pil') and
each.has_key('endTime') and
each.has_key('act')):
if (each['pil'] == pil and
each['endTime'] > self._currentTime and
each['act'] not in ['CAN', 'EXP']):
overview = each['prevOverviewText']
self.__overviewText, dummy = self.cleanCapturedText(
overview, 0)
break
def useCaptureText(self, hazardList):
#Based on the hazardlist, returns a tuple indicating:
# (inc capture text, inc framing codes, skip CTAs, forceCTAList)
#
# For the values to be considered, the 'hdln' value must be
# present in the list, or it needs to be a Statement (sig="S")
cans = ['CAN','UPG','EXP']
acts = ['NEW','EXT','EXA','EXB','CON']
foundACTS = 0
foundCANS = 0
foundSig = []
for eh in hazardList:
if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'):
foundACTS = 1
if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'):
foundCANS = 1
if eh['sig'] not in foundSig:
foundSig.append(eh['sig'])
includeFrameCodes = 0
includeText = 1
skipCTAs = 0
forceCTAList = []
# all actions are in CAN, UPG, EXP only (don't include text)
if foundCANS and not foundACTS:
if 'S' in foundSig and len(foundSig) == 1: #only S
includeFrameCodes = 1 #capture text, but frame it
else:
includeText = 0 #end of non statement
# something in CANS and something in acts (frame it, include text)
elif foundCANS and foundACTS:
includeFrameCodes = 1
skipCTAs = 1
for eh in hazardList:
if eh['act'] in acts and \
(eh['phen'], eh['sig']) not in forceCTAList and \
len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
#everything in active entries, captured text is used, but still
# need to handle the "NEW" entries.
else:
for eh in hazardList:
if eh['act'] in ['NEW'] and len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
return (includeText, includeFrameCodes, skipCTAs, forceCTAList)
def cleanCapturedText(self, text, paragraphs, addFramingCodes = False,
skipCTAs = False):
#
# This method takes a block of text, wraps it preserving blank lines,
# then returns the part after 'paragraphs'. So, if paragraphs is 0, it
# returns the whole thing, if it's 2, it returns paragraphs 2 -> end, etc.
# Headlines are always removed.
# Framing codes are added if specified.
#
paras = self.convertSingleParas(text) #single paragraphs
# keep track of any call to actions found
foundCTAs = []
# Process the paragraphs, keep only the interested ones
paraCount = 0
processedText = ''
for eachPara in paras:
if paraCount >= paragraphs:
found = self.ctasFound(eachPara) #get list of ctas found
if skipCTAs and len(found):
pass
else:
processedText = processedText + eachPara + '\n\n'
#keep track of remaining CTAs in processed text
for f in found:
if f not in foundCTAs:
foundCTAs.append(f)
if eachPara.find('...') == 0:
pass #ignore headlines
paraCount = paraCount + 1
# Add framing codes
if addFramingCodes:
processedText = processedText.rstrip()
processedText = "|*\n" + processedText + "*|\n"
# Wrap
processedText = self.endline(processedText,
linelength=self._lineLength, breakStr=[" ", "-", "..."])
return processedText, foundCTAs
def decodeBulletedText(self, prevText):
# returns the bullet paragraph text or None, returns the
# regular text after the bullets. The afterText is text up to
# the next bullet or up to "The National Weather Service". Note
# that this only correctly handles the 1st set of entries in
# a segment, thus double events will only decode the first set
# of bullets and text. The multipleRecords is set to 1 in the
# event that there are multiple sets of bullets. In this case
# only the 1st set was captured/decoded.
# (hazard, time, basis, impact, afterText, multipleRecords)
if prevText is None:
return (None, None, None, None, None, None)
# find the bullets
bullets = []
buf = prevText.split('\n\n* ')
if len(buf) <= 1:
return (None, None, None, None, None, None)
multRecords = 0 #indicator of multiple sets of bullets
for x in xrange(len(buf)):
if x == 0:
continue #headlines and text before the bullets
bullets.append(buf[x])
# find only the bulleted text, defined by the double line feed term.
# of the text
regText = "" #regular text after bullets
for x in xrange(1, len(bullets)):
index = bullets[x].find('\n\n')
if index != -1:
regText = bullets[x][index+2:]
bullets[x] = bullets[x][0:index] #eliminate after bullet text
if len(bullets) > x+2: #more bullets are present
multRecords = 1
bullets = bullets[0:x+1] #only interested in these bullets
break
# regular text is the remainder of the text. However we only
# want text from the last in the series of bullets to the
# beginning of any next NWS phrase.
lines = regText.split('\n')
for x in xrange(len(lines)):
if lines[x].find('The National Weather Service') == 0:
lines = lines[0:x] #eliminate following lines
break
regText = ("\n").join(lines)
# now clean up the text
for x in xrange(len(bullets)):
bullets[x] = string.replace(bullets[x],'\n',' ')
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
regText = removeLF.sub(r'\1 \2',regText)
# extract out each section for returning the values
if len(bullets) >= 1:
hazard = bullets[0]
else:
hazard = None
if len(bullets) >= 2:
time = bullets[1]
else:
time = None
if len(bullets) >= 3:
basis = bullets[2]
else:
basis = None
if len(bullets) >= 4:
impact = bullets[3]
else:
impact = None
if len(regText) == 0:
regText = None #no regular text after bullets
return (hazard, time, basis, impact, regText, multRecords)
def substituteBulletedText(self, capText, defaultText, frameit="Never"):
#returns a properly formatted bulleted text based on
#the capText variable. If capText is None or 0 length, then
#the default text is used. frameit can be "Never", in which
#nothing is wrapped in framing codes, "Always" in which the
#text (default or cap) is wrapped in framing codes, or
#DefaultOnly" in which just the default text is wrapped.
if capText is not None and len(capText):
textToUse = capText[0].upper()+capText[1:]
if frameit == "Always":
textToUse = "|* " + textToUse + " *|"
else:
textToUse = defaultText
if frameit == "Always" or frameit == "DefaultOnly":
textToUse = "|* " + textToUse + " *|"
# add bullet codes
textToUse = "* " + textToUse
# format it
return self.indentText(textToUse, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "-", "..."])
def convertSingleParas(self, text):
#returns a list of paragraphs based on the input text.
lf = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
ptext = lf.sub(r'\1 \2', text)
ptext = ptext.replace('\n\n', '\n')
paragraphs = ptext.split('\n')
return paragraphs
def ctasFound(self, text):
#returns types of ctas found. The identifier is the pil (e.g., ZFP),
#phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions.
#convert text to single paragraphs
paragraphs = self.convertSingleParas(text)
for x in xrange(len(paragraphs)):
paragraphs[x] = string.replace(paragraphs[x],' ','')
#make list of call to actions (type, cta text)
if self.__procCTA is None:
self.__procCTA = []
ctao = CallToActions.CallToActions()
d = ctao.ctaDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
d = ctao.ctaPilDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
ctas = ctao.genericCTAs()
for it in ctas:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append(("GENERIC",
string.replace(cta,' ','')))
#compare
found = []
for para in paragraphs:
for (ctaType, cta) in self.__procCTA:
## Added following line to account for framing code issues in CTA
cta = re.sub("\|\*.*\*\|","",cta)
# We want this comparison to be case-insensitive just in case
# the site is not transmitting in mixed case yet.
if para.upper() == cta.upper() and ctaType not in found:
found.append(ctaType)
return found
| 41.29769 | 151 | 0.53106 | 41,309 | 0.855666 | 0 | 0 | 0 | 0 | 0 | 0 | 19,396 | 0.401765 |
8a8396f2f3ab51a489f606b57146366f183507ea | 14,346 | py | Python | virtualscreening/vina/spark/buried_areas.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 3 | 2015-01-19T20:12:59.000Z | 2019-02-21T18:43:04.000Z | virtualscreening/vina/spark/buried_areas.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 22 | 2015-01-05T16:48:54.000Z | 2017-01-21T16:36:10.000Z | virtualscreening/vina/spark/buried_areas.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 11 | 2015-03-03T13:32:24.000Z | 2020-04-03T11:22:24.000Z | from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
import ConfigParser as configparser
from subprocess import Popen, PIPE
from datetime import datetime
from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter
import os, sys
from os_utils import preparing_path
from gromacs_utils import get_value_from_xvg_sasa
from pdb_io import replace_chain_atom_line
from database_io import load_database
def sorting_buried_area(sc, buried_areaRDD):
sqlCtx = SQLContext(sc)
buried_areaRDD = sc.parallelize(buried_areaRDD)
#buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) )
buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) )
buried_area_table = sqlCtx.createDataFrame(buried_areaRDD)
buried_area_table.registerTempTable("buried_area")
buried_area_sorted_by_buried_total = sqlCtx.sql("SELECT * FROM buried_area ORDER BY buried_total DESC") #buried_lig_lig_perc
return buried_area_sorted_by_buried_total
def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
for area in buried_area_sorted_by_lig_rec_perc:
#splited_line = area[0].split("_-_")
#aux_recep = splited_line[0]
#aux_lig = str(splited_line[1])
#preparing receptor
#receptor = str(str(aux_recep).replace("compl_", " ")).strip()
#preparing ligand
#splited_aux_lig = str(aux_lig).split(get_separator_filename_mode())
#ligand = splited_aux_lig[0]
#model = splited_aux_lig[1]
pose = area[0]
buried_total = "{:.4f}".format(area[1])
#line = receptor+"\t"+ligand+"\t"+model+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = pose+"\t"+str(buried_total)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
line = "# buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in buried_area_sorted_by_lig_rec_perc:
#receptor = area[0]
#ligand = area[1]
#model = area[2]
pose = str(str(area[0]).replace("compl_", " ")).strip()
buried_total = "{:.4f}".format(area[1])
#buried_lig_rec_perc = "{:.4f}".format(area[4])
#buried_lig_lig_perc = "{:.4f}".format(area[5])
#line = receptor+"\t"+ligand+"\t"+str(model)+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = str(buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_normalized_buried_area(path_file_buried_area, full_dataRDD):
f_buried_area = open(path_file_buried_area,"w")
line = "# normalized_buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in full_dataRDD.collect():
pose = str(str(area[0]).replace("compl_", " ")).strip()
normalized_buried_total = "{:.4f}".format(area[1])
line = str(normalized_buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def loading_lines_from_area_files(line):
line_splited = str(line).split()
#line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) )
line_ret = ( str(line_splited[0]), float(line_splited[1]) )
return line_ret
def get_files_area(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".area"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def save_log(finish_time, start_time):
log_file_name = 'vs_buried_areas.log'
current_path = os.getcwd()
path_file = os.path.join(current_path, log_file_name)
log_file = open(path_file, 'w')
diff_time = finish_time - start_time
msg = 'Starting ' + str(start_time) +'\n'
log_file.write(msg)
msg = 'Finishing ' + str(finish_time) +'\n'
log_file.write(msg)
msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
log_file.write(msg)
def main():
config = configparser.ConfigParser()
config.read('config.ini')
#Path for Gromacs project
gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))
#Path where PDB ligand are - They are NOT participated in docking
pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path')
#Path that contains all files for analysis
path_analysis = config.get('DEFAULT', 'path_analysis')
#Ligand Database file
ligand_database = config.get('DEFAULT', 'ligand_database_path_file')
#Path where all pdb receptor are
path_receptor_pdb = config.get('DEFAULT', 'pdb_path')
#Path for saving pdb files of models generated by VS
path_analysis_pdb = get_directory_pdb_analysis(path_analysis)
# Create SPARK config
maxResultSize = str(config.get('SPARK', 'maxResultSize'))
conf = (SparkConf().set("spark.driver.maxResultSize", maxResultSize))
# Create context
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
#Adding Python Source file
#Path for drugdesign project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"gromacs_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"database_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"json_utils.py"))
#Adding bash scripts
sc.addFile(os.path.join(path_spark_drugdesign,"make_ndx_buried_area_total.sh"))
sc.addFile(os.path.join(path_spark_drugdesign,"make_sasa_rec_buried_area_total.sh"))
#Parameters form command line
#Indicates probe. Example: 0.14
probe = float(sys.argv[1])
#Indicates ndots. Example: 24
ndots = int(sys.argv[2])
#Broadcast
path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb)
gromacs_path = sc.broadcast(gromacs_path)
pdb_ligand_path = sc.broadcast(pdb_ligand_path)
probe = sc.broadcast(probe)
ndots = sc.broadcast(ndots)
start_time = datetime.now()
os.environ["GMX_MAXBACKUP"]="-1"
#Loading all PDB receptor files into memory
list_all_pdb_receptor_files_path = []
all_receptor_for_complex = get_files_pdb(path_receptor_pdb)
for receptor in all_receptor_for_complex:
list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor))
#Computing Buried areas
for pdb_receptor_files in list_all_pdb_receptor_files_path:
#Getting receptor name by fully path
base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0]))
#PDB file loaded into memory is sent by broadcast
pdb_file_receptor = pdb_receptor_files[1]
pdb_file_receptor = sc.broadcast(pdb_file_receptor)
#Loading PDB model files based on receptor into memory
base_file_name_receptor_for_filter = base_file_name_receptor+"_-_"
all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter)
all_model_for_complexRDD = sc.parallelize(all_model_for_complex)
all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect()
# ********** Starting function **********************************************************
def compute_buried_area(pdb_complex):
chZ = "chZ"
sasa_complex = -1.0
sasa_rec = -1.0
sasa_lig = -1.0
buried_total = -1.0
returned_list = []
try:
base_name = get_name_model_pdb(pdb_complex)
ligand_name = get_ligand_from_receptor_ligand_model(base_name)
f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+".pdb")
f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+".ndx")
f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_complex.xvg")
f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_rec.xvg")
f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig.xvg")
# Makes the index file with the ligand (chain z) and the rest (non chain z)
script_make_ndx = SparkFiles.get("make_ndx_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_ndx + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface System " + " -output System "+ " -xvg none " + " -o " + f_temp_sasa_complex
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# Makes f_temp_sasa_rec file
script_make_sasa_rec = SparkFiles.get("make_sasa_rec_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_sasa_rec + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx + " " + f_temp_sasa_rec
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface chZ " + " -output chZ "+ " -xvg none " + " -o " + f_temp_sasa_lig
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex)
sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec)
sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig)
buried_total = sasa_rec + sasa_lig - sasa_complex
#Generating result - See column sorting because resultaed file will be created based on this sorting
returned_list = (base_name, buried_total)
except:
returned_list = (base_name, float(0))
#Deleting files
if os.path.exists(f_ndx):
os.remove(f_ndx)
if os.path.exists(f_temp_sasa_complex):
os.remove(f_temp_sasa_complex)
if os.path.exists(f_temp_sasa_rec):
os.remove(f_temp_sasa_rec)
if os.path.exists(f_temp_sasa_lig):
os.remove(f_temp_sasa_lig)
return returned_list
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def save_model_receptor(list_receptor_model_file):
receptor_file = pdb_file_receptor.value #Obtained from broadcast
model_file = list_receptor_model_file[0]
full_path_for_save_complex = list_receptor_model_file[1]
#Open file for writting the complex
f_compl = open(full_path_for_save_complex, "w")
#Insert lines of receptor
for item in receptor_file:
f_compl.write(item)
#Insert lines of model and insert Z chain
for item in model_file:
item = replace_chain_atom_line(item,"d","z")
f_compl.write(item)
f_compl.close()
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def build_list_model_for_complex(model):
full_path_model = model[0]
model_file = model[1]
path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast
#Building complex file based on model file name
base_name_model = get_name_model_pdb(full_path_model)
complex_name = "compl_"+base_name_model+".pdb"
full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name)
list_receptor_model_file = (model_file, full_path_for_save_complex)
save_model_receptor(list_receptor_model_file)
list_ret = compute_buried_area(full_path_for_save_complex)
os.remove(full_path_for_save_complex)
return list_ret
# ********** Finish function **********************************************************
all_model_filesRDD = sc.parallelize(all_model_filesRDD)
all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect()
#Saving buried area of receptor
full_area_file = os.path.join(path_analysis,base_file_name_receptor+".area")
save_receptor_buried_area(full_area_file, all_model_filesRDD)
#Loading all area file
all_area_file = os.path.join(path_analysis,"*.area")
buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect()
#Sorting by buried_total column
buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD)
buried_area_sorted_by_buried_total.cache()
buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect()
#Saving buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_buried_areas_total.dat")
save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST)
#Calculating normalized buried area
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()
number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD)
number_pose_ligand_table.registerTempTable("buried_area_total_sort")
sql = """
SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area
FROM database a
JOIN buried_area_total_sort b ON b.ligand = a.ligand
ORDER BY normalized_buried_area DESC
"""
#Getting all data
full_dataRDD = sqlCtx.sql(sql)
#Saving normalized buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_normalized_buried_areas.dat")
save_normalized_buried_area(path_file_buried_area, full_dataRDD)
#Removing all area files
all_area_files = get_files_area(path_analysis)
for area_file in all_area_files:
os.remove(area_file)
finish_time = datetime.now()
save_log(finish_time, start_time)
main()
| 43.87156 | 253 | 0.748292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,464 | 0.311167 |
8a84ca10fd051b6b0bb8be0088246cc71958f9d5 | 12,062 | py | Python | oase-root/web_app/views/system/mail/action_mail.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 9 | 2020-03-25T07:51:47.000Z | 2022-02-07T00:07:28.000Z | oase-root/web_app/views/system/mail/action_mail.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 1,164 | 2021-01-28T23:16:11.000Z | 2022-03-28T07:23:10.000Z | oase-root/web_app/views/system/mail/action_mail.py | Masa-Yasuno/oase | 90f3cee73c0d9b3153808a4a72bd19984a4873f9 | [
"Apache-2.0"
] | 25 | 2020-03-17T06:48:30.000Z | 2022-02-15T15:13:44.000Z | # Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
MAILアクション用画面表示補助クラス
"""
import pytz
import datetime
import json
import socket
import traceback
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.db import transaction
from django.conf import settings
from libs.commonlibs import define as defs
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.aes_cipher import AESCipher
from web_app.models.models import ActionType
from web_app.models.mail_models import MailDriver
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
logger = OaseLogger.get_instance() # ロガー初期化
class mailDriverInfo():
def __init__(self, drv_id, act_id, name, ver, icon_name):
self.drv_id = drv_id
self.act_id = act_id
self.name = name
self.ver = ver
self.icon_name = icon_name
def __str__(self):
return '%s(ver%s)' % (self.name, self.ver)
def get_driver_name(self):
return '%s Driver ver%s' % (self.name, self.ver)
def get_driver_id(self):
return self.drv_id
def get_icon_name(self):
return self.icon_name
@classmethod
def get_template_file(cls):
return 'system/mail/action_mail.html'
@classmethod
def get_info_list(cls, user_groups):
try:
mail_driver_obj_list = MailDriver.objects.all()
except Exception as e:
# ここでの例外は大外で拾う
raise
protocol_dict = cls.get_define()['dict']
mail_driver_dto_list = []
cipher = AESCipher(settings.AES_KEY)
for mail_obj in mail_driver_obj_list:
mail_info = mail_obj.__dict__
if mail_obj.password:
mail_info['password'] = cipher.decrypt(mail_obj.password)
mail_info['protocol_str'] = protocol_dict[mail_obj.protocol]
mail_driver_dto_list.append(mail_info)
return mail_driver_dto_list
@classmethod
def get_group_list(cls, user_groups):
"""
[概要]
グループ一覧を取得する(システム管理グループを除く)
"""
return []
@classmethod
def get_define(cls):
protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL}
defines = {
'list_all': defs.SMTP_PROTOCOL.LIST_ALL,
'dict': protocol_dict,
}
return defines
def record_lock(self, json_str, request):
logger.logic_log('LOSI00001', 'None', request=request)
driver_id = self.get_driver_id()
# 更新前にレコードロック
if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE):
drvinfo_modify = int(json_str['json_str']['mail_driver_id'])
MailDriver.objects.select_for_update().filter(pk=drvinfo_modify)
logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request)
def modify(self, json_str, request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
error_flag = False
error_msg = {
'mail_disp_name' : '',
'protocol' : '',
'smtp_server' : '',
'port' : '',
'user' : '',
'password' : '',
}
now = datetime.datetime.now(pytz.timezone('UTC'))
emo_chk = UnicodeCheck()
# 成功時データ
response = {"status": "success",}
try:
rq = json_str['json_str']
ope = int(rq['ope'])
#削除以外の場合の入力チェック
if ope != defs.DABASE_OPECODE.OPE_DELETE:
error_flag = self._validate(rq, error_msg, request)
if error_flag:
raise UserWarning('validation error.')
# パスワードを暗号化 空なら空文字
cipher = AESCipher(settings.AES_KEY)
if ope == defs.DABASE_OPECODE.OPE_UPDATE:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id'])
driver_info_mod.mail_disp_name = rq['mail_disp_name']
driver_info_mod.protocol = rq['protocol']
driver_info_mod.smtp_server = rq['smtp_server']
driver_info_mod.port = rq['port']
driver_info_mod.user = rq['user']
driver_info_mod.password = encrypted_password
driver_info_mod.last_update_user = request.user.user_name
driver_info_mod.last_update_timestamp = now
driver_info_mod.save(force_update=True)
elif ope == defs.DABASE_OPECODE.OPE_DELETE:
MailDriver.objects.filter(pk=rq['mail_driver_id']).delete()
elif ope == defs.DABASE_OPECODE.OPE_INSERT:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_reg = MailDriver(
mail_disp_name = rq['mail_disp_name'],
protocol = rq['protocol'],
smtp_server = rq['smtp_server'],
port = rq['port'],
user = rq['user'],
password = encrypted_password,
last_update_user = request.user.user_name,
last_update_timestamp = now
).save(force_insert=True)
except MailDriver.DoesNotExist:
logger.logic_log('LOSM07006', "mail_driver_id", mail_driver_id, request=request)
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response = {
'status': 'failure',
'error_msg': error_msg, # エラー詳細(エラーアイコンで出す)
}
logger.logic_log('LOSI00002', 'response=%s' % response, request=request)
return response
def _validate(self, rq, error_msg, request):
"""
[概要]
入力チェック
[引数]
rq: dict リクエストされた入力データ
error_msg: dict
[戻り値]
"""
logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg))
error_flag = False
emo_chk = UnicodeCheck()
emo_flag = False
emo_flag_ita_disp_name = False
emo_flag_hostname = False
if len(rq['mail_disp_name']) == 0:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'mail_disp_name', request=request)
if len(rq['mail_disp_name']) > 64:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['mail_disp_name'])
if len(value_list) > 0:
error_flag = True
emo_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['protocol']) == 0:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'protocol', request=request)
if len(rq['protocol']) > 64:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request)
if len(rq['smtp_server']) == 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'smtp_server', request=request)
if len(rq['smtp_server']) > 128:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['smtp_server'])
if len(value_list) > 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['port']) == 0:
error_flag = True
error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'port', request=request)
try:
tmp_port = int(rq['port'])
if 0 > tmp_port or tmp_port > 65535:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
except ValueError:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
if len(rq['user']) > 64:
error_flag = True
error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['user'])
if len(value_list) > 0:
error_flag = True
error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['password']) > 64:
error_flag = True
error_msg['password'] += get_message('MOSJA27208', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['password'])
if len(value_list) > 0:
error_flag = True
error_msg['password'] += get_message('MOSJA27219', request.user.get_lang_mode(), showMsgId=False) + '\n'
if not emo_flag:
duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name'])
if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request)
if error_flag == False:
# 疎通確認
resp_code = -1
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか
sock.close()
except Exception as e:
pass
if resp_code != 0:
error_flag = True
#todo 仮でこのエラーは名前に入れている
error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request)
return error_flag
| 35.372434 | 122 | 0.596419 | 11,210 | 0.896513 | 0 | 0 | 1,286 | 0.102847 | 0 | 0 | 3,169 | 0.253439 |
8a85a524c6381c0f4e277dd284d072a8b41daaac | 3,427 | py | Python | queue/animal_queue.py | cozek/code-practice | bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80 | [
"MIT"
] | null | null | null | queue/animal_queue.py | cozek/code-practice | bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80 | [
"MIT"
] | null | null | null | queue/animal_queue.py | cozek/code-practice | bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from typing import Any, Union
class Animal:
def __init__(self, name: str) -> None:
self.name = name
def set_order(self, order: int) -> None:
self.order = order
def peek_order(self) -> int:
return self.order
def __str__(self) -> str:
return f"{self.name}"
class Node:
def __init__(self, data: Any):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self) -> None:
self.head = None
self.tail = None
def __str__(self) -> str:
current = self.head
string = f""
while current.next_node is not None:
string += f"{current.data} -> "
current = current.next_node
return string + "END"
def is_empty(self) -> bool:
if self.head is None:
return True
else:
return False
def insert(self, item: Any) -> None:
if self.is_empty():
self.head = Node(item)
self.tail = self.head
else:
new_node = Node(item)
self.tail.next_node = new_node
self.tail = self.tail.next_node
def remove(self) -> Any:
if self.head is None:
raise ("Empty LinkedList!")
else:
data = self.head.data
self.head = self.head.next_node
return data
def peak(self):
return self.head.data
class Dog(Animal):
def __init__(self, name: str):
super().__init__(name)
class Cat(Animal):
def __init__(self, name: str):
super().__init__(name)
class AnimalQueue:
def __init__(self) -> None:
self.dogs = LinkedList()
self.cats = LinkedList()
self.order = 0
def enqueue(self, animal: Union[Dog, Cat]) -> None:
if not isinstance(animal, (Dog, Cat)):
raise Exception("Expected Dog or Cat!")
else:
animal.set_order(self.order)
self.order += 1
if isinstance(animal, Dog):
self.dogs.insert(animal)
elif isinstance(animal, Cat):
self.cats.insert(animal)
def dequeAny(self) -> Union[Dog, Cat]:
if self.dogs.is_empty():
return self.dequeCat()
elif self.cats.is_empty():
return self.dequeDog()
if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order():
return self.dequeCat()
else:
return self.dequeDog()
def print_cats(self) -> str:
string = ""
cat = self.cats.head
while cat is not None:
string += f"{cat.data.name} {cat.data.peek_order()} | "
cat = cat.next_node
return string
def dequeDog(self) -> Dog:
return self.dogs.remove()
def dequeCat(self) -> Cat:
return self.cats.remove()
def main():
q = AnimalQueue()
dogs = [Dog("d1"), Dog("d2"), Dog("d3")]
cats = [Cat("c1"), Cat("c2"), Cat("c3")]
both = []
while cats != []:
both.append(cats.pop())
both.append(dogs.pop())
[q.enqueue(animal) for animal in both]
string = ""
for anim in both:
string += f"{anim.name} {anim.order} | "
print(string)
# print(q.print_cats())
get = q.dequeDog()
print(get.order,get.name)
get = q.dequeAny()
print(get.order,get.name)
if __name__ == "__main__":
main()
| 24.133803 | 79 | 0.541873 | 2,806 | 0.818792 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.070616 |
8a85f7a1837485544e723eea52a8cc5f16480c6c | 6,816 | py | Python | ophyd/areadetector/detectors.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 16 | 2015-05-20T20:48:25.000Z | 2019-04-24T21:12:59.000Z | ophyd/areadetector/detectors.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 594 | 2015-01-05T21:55:21.000Z | 2019-05-10T02:05:24.000Z | ophyd/areadetector/detectors.py | NSLS-II/ophyd | d5fc722eef4d3d83845b1d523004302ec3aadb78 | [
"BSD-3-Clause"
] | 34 | 2015-01-23T19:50:58.000Z | 2019-05-07T05:38:57.000Z | # vi: ts=4 sw=4
'''AreaDetector Devices
`areaDetector`_ detector abstractions
.. _areaDetector: https://areadetector.github.io/master/index.html
'''
import warnings
from .base import (ADBase, ADComponent as C)
from . import cam
__all__ = ['DetectorBase',
'AreaDetector',
'AdscDetector',
'Andor3Detector',
'AndorDetector',
'BrukerDetector',
'DexelaDetector',
'EmergentVisionDetector',
'EigerDetector',
'FirewireLinDetector',
'FirewireWinDetector',
'GreatEyesDetector',
'LightFieldDetector',
'Mar345Detector',
'MarCCDDetector',
'PSLDetector',
'PerkinElmerDetector',
'PICamDetector',
'PilatusDetector',
'PixiradDetector',
'PointGreyDetector',
'ProsilicaDetector',
'PvcamDetector',
'RoperDetector',
'SimDetector',
'URLDetector',
'UVCDetector',
'Xspress3Detector'
]
class DetectorBase(ADBase):
"""
The base class for the hardware-specific classes that follow.
Note that Plugin also inherits from ADBase.
This adds some AD-specific methods that are not shared by the plugins.
"""
_default_configuration_attrs = (ADBase._default_configuration_attrs +
('cam', ))
def generate_datum(self, key, timestamp, datum_kwargs=None):
"""
Notify plugins of acquisition being complete.
When a new acquisition is started, this method is called with a
key which is a label like 'light', 'dark', or 'gain8'.
It in turn calls ``generate_datum`` on all of the plugins that have
that method.
File plugins are identified by searching for a
:meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum`
method that must have the signature ::
def generate_datum(key: str, timestamp: float, datum_kwargs: dict):
...
Parameters
----------
key : str
The label for the datum that should be generated
timestamp : float
The time of the trigger
datum_kwargs : Dict[str, Any], optional
Any datum kwargs that should go to all children.
"""
if datum_kwargs is None:
datum_kwargs = {}
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'generate_datum')]
for p in file_plugins:
if p.enable.get():
p.generate_datum(key, timestamp, datum_kwargs)
def dispatch(self, key, timestamp):
warnings.warn(
".dispatch is deprecated, use .generate_datum instead",
stacklevel=2
)
return self.generate_datum(key, timestamp, {})
dispatch.__doc__ = generate_datum.__doc__
def make_data_key(self):
source = 'PV:{}'.format(self.prefix)
# This shape is expected to match arr.shape for the array.
shape = (self.cam.num_images.get(),
self.cam.array_size.array_size_y.get(),
self.cam.array_size.array_size_x.get())
return dict(shape=shape, source=source, dtype='array',
external='FILESTORE:')
def collect_asset_docs(self):
file_plugins = [s for s in self._signals.values() if
hasattr(s, 'collect_asset_docs')]
for p in file_plugins:
yield from p.collect_asset_docs()
class AreaDetector(DetectorBase):
cam = C(cam.AreaDetectorCam, 'cam1:')
class SimDetector(DetectorBase):
_html_docs = ['simDetectorDoc.html']
cam = C(cam.SimDetectorCam, 'cam1:')
class AdscDetector(DetectorBase):
_html_docs = ['adscDoc.html']
cam = C(cam.AdscDetectorCam, 'cam1:')
class AndorDetector(DetectorBase):
_html_docs = ['andorDoc.html']
cam = C(cam.AndorDetectorCam, 'cam1:')
class Andor3Detector(DetectorBase):
_html_docs = ['andor3Doc.html']
cam = C(cam.Andor3DetectorCam, 'cam1:')
class BrukerDetector(DetectorBase):
_html_docs = ['BrukerDoc.html']
cam = C(cam.BrukerDetectorCam, 'cam1:')
class DexelaDetector(DetectorBase):
_html_docs = ['DexelaDoc.html']
cam = C(cam.DexelaDetectorCam, 'cam1:')
class EmergentVisionDetector(DetectorBase):
_html_docs = ['EVTDoc.html']
cam = C(cam.EmergentVisionDetectorCam, 'cam1:')
class EigerDetector(DetectorBase):
_html_docs = ['EigerDoc.html']
cam = C(cam.EigerDetectorCam, 'cam1:')
class FirewireLinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireLinDetectorCam, 'cam1:')
class FirewireWinDetector(DetectorBase):
_html_docs = ['FirewireWinDoc.html']
cam = C(cam.FirewireWinDetectorCam, 'cam1:')
class GreatEyesDetector(DetectorBase):
_html_docs = [] # the documentation is not public
cam = C(cam.GreatEyesDetectorCam, 'cam1:')
class LightFieldDetector(DetectorBase):
_html_docs = ['LightFieldDoc.html']
cam = C(cam.LightFieldDetectorCam, 'cam1:')
class Mar345Detector(DetectorBase):
_html_docs = ['Mar345Doc.html']
cam = C(cam.Mar345DetectorCam, 'cam1:')
class MarCCDDetector(DetectorBase):
_html_docs = ['MarCCDDoc.html']
cam = C(cam.MarCCDDetectorCam, 'cam1:')
class PerkinElmerDetector(DetectorBase):
_html_docs = ['PerkinElmerDoc.html']
cam = C(cam.PerkinElmerDetectorCam, 'cam1:')
class PSLDetector(DetectorBase):
_html_docs = ['PSLDoc.html']
cam = C(cam.PSLDetectorCam, 'cam1:')
class PICamDetector(DetectorBase):
_html_docs = ['PICamDoc.html']
cam = C(cam.PICamDetectorCam, 'cam1:')
class PilatusDetector(DetectorBase):
_html_docs = ['pilatusDoc.html']
cam = C(cam.PilatusDetectorCam, 'cam1:')
class PixiradDetector(DetectorBase):
_html_docs = ['PixiradDoc.html']
cam = C(cam.PixiradDetectorCam, 'cam1:')
class PointGreyDetector(DetectorBase):
_html_docs = ['PointGreyDoc.html']
cam = C(cam.PointGreyDetectorCam, 'cam1:')
class ProsilicaDetector(DetectorBase):
_html_docs = ['prosilicaDoc.html']
cam = C(cam.ProsilicaDetectorCam, 'cam1:')
class PvcamDetector(DetectorBase):
_html_docs = ['pvcamDoc.html']
cam = C(cam.PvcamDetectorCam, 'cam1:')
class RoperDetector(DetectorBase):
_html_docs = ['RoperDoc.html']
cam = C(cam.RoperDetectorCam, 'cam1:')
class URLDetector(DetectorBase):
_html_docs = ['URLDoc.html']
cam = C(cam.URLDetectorCam, 'cam1:')
class UVCDetector(DetectorBase):
_html_docs = ['UVCDoc.html']
cam = C(cam.UVCDetectorCam, 'cam1:')
class Xspress3Detector(DetectorBase):
_html_docs = ['Xspress3Doc.html']
cam = C(cam.Xspress3DetectorCam, 'det1:')
| 27.264 | 81 | 0.639965 | 5,658 | 0.830106 | 225 | 0.033011 | 0 | 0 | 0 | 0 | 2,508 | 0.367958 |
8a86ee599744eb8c45294e523b8309fa29706768 | 261 | py | Python | python/EXERCICIO 96 - FUNCAO QUE CALCULA A AREA.py | debor4h/exerciciosPython | a18d88c6e98bc49005bfcb8badeb712007c16d69 | [
"MIT"
] | 1 | 2022-03-15T02:25:17.000Z | 2022-03-15T02:25:17.000Z | python/EXERCICIO 96 - FUNCAO QUE CALCULA A AREA.py | debor4h/exerciciosPython | a18d88c6e98bc49005bfcb8badeb712007c16d69 | [
"MIT"
] | null | null | null | python/EXERCICIO 96 - FUNCAO QUE CALCULA A AREA.py | debor4h/exerciciosPython | a18d88c6e98bc49005bfcb8badeb712007c16d69 | [
"MIT"
] | null | null | null | def area(msg):#declaracao da funcao com o parametro msg
print(msg)#aqui msg e a area
print('Controle de Terrenos')
print('-' * 20)
l = float(input('Largura (m): '))
c = float(input('Comprimento (m): '))
area(f'A área do seu terreno {l}X{c} é de {l*c}m².')
| 26.1 | 55 | 0.64751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.632576 |
8a8756b0429224a6d5fdf07d18eb3a9eed2f7a05 | 2,373 | py | Python | auth_iam/dashboard/auth/routes.py | santiher/dash-auth-example | 9854bfe953f86a0c7ed97660da30b7b7d1d3069f | [
"MIT"
] | 11 | 2020-03-05T18:50:07.000Z | 2022-02-16T19:45:35.000Z | auth_iam/dashboard/auth/routes.py | santiher/dash-auth-example | 9854bfe953f86a0c7ed97660da30b7b7d1d3069f | [
"MIT"
] | null | null | null | auth_iam/dashboard/auth/routes.py | santiher/dash-auth-example | 9854bfe953f86a0c7ed97660da30b7b7d1d3069f | [
"MIT"
] | null | null | null | import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
| 29.296296 | 77 | 0.634218 | 0 | 0 | 0 | 0 | 211 | 0.088917 | 0 | 0 | 887 | 0.373788 |
8a8789db154d951e04619fad043530fa0eb6fd39 | 935 | py | Python | amazon/model_api/migrations/0005_remove_order_datetimecreated_alter_order__id_and_more.py | gabrielkarras/SOEN341 | da7241abd894bda4d5f7465b3de70e51afacf3f5 | [
"MIT"
] | 3 | 2022-01-16T19:12:37.000Z | 2022-01-25T18:50:15.000Z | amazon/model_api/migrations/0005_remove_order_datetimecreated_alter_order__id_and_more.py | gabrielkarras/SOEN341 | da7241abd894bda4d5f7465b3de70e51afacf3f5 | [
"MIT"
] | 83 | 2022-01-16T18:57:50.000Z | 2022-03-30T00:44:35.000Z | amazon/model_api/migrations/0005_remove_order_datetimecreated_alter_order__id_and_more.py | gabrielkarras/SOEN341 | da7241abd894bda4d5f7465b3de70e51afacf3f5 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-04-07 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('model_api', '0004_remove_order_created_remove_order_id_and_more'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='dateTimeCreated',
),
migrations.AlterField(
model_name='order',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='_id',
field=models.AutoField(editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='orderedproduct',
name='price',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| 28.333333 | 86 | 0.594652 | 842 | 0.900535 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.203209 |
8a88b11de563042688caafdaffa71f1207edee67 | 8,082 | py | Python | items/migrations/0001_initial.py | tony-joseph/livre | 3a6a851ed58029d5d14edde647b15ed22d65f24b | [
"BSD-3-Clause"
] | 1 | 2020-05-06T16:59:47.000Z | 2020-05-06T16:59:47.000Z | items/migrations/0001_initial.py | tony-joseph/livre | 3a6a851ed58029d5d14edde647b15ed22d65f24b | [
"BSD-3-Clause"
] | null | null | null | items/migrations/0001_initial.py | tony-joseph/livre | 3a6a851ed58029d5d14edde647b15ed22d65f24b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-21 12:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BookCopy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('remarks', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='BookDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('author', models.CharField(default='Unknown', max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('published_on', models.DateField(blank=True, null=True)),
('pages', models.PositiveIntegerField(blank=True, default=0, null=True)),
('ddc', models.CharField(blank=True, default='', max_length=1024)),
('llcc', models.CharField(blank=True, default='', max_length=1024)),
('isbn', models.CharField(blank=True, default='', max_length=1024)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=512)),
('slug', models.SlugField(max_length=128, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=512)),
('short_code', models.CharField(db_index=True, max_length=8, unique=True)),
('description', models.TextField(blank=True, default='')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Periodical',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('description', models.TextField(blank=True, default='')),
('publisher', models.CharField(blank=True, default='', max_length=512)),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PeriodicalIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),
('published_on', models.DateField(blank=True, null=True)),
('volume', models.PositiveIntegerField(blank=True, null=True)),
('issue', models.PositiveIntegerField(blank=True, null=True)),
('remarks', models.TextField(blank=True, default='')),
('tags', models.CharField(blank=True, max_length=1024, null=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='bookdetail',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'),
),
migrations.AddField(
model_name='bookdetail',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookdetail',
name='language',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'),
),
migrations.AddField(
model_name='bookdetail',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='book_detail',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'),
),
migrations.AddField(
model_name='bookcopy',
name='created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bookcopy',
name='updated_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL),
),
]
| 56.915493 | 191 | 0.614081 | 7,861 | 0.972655 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.162831 |
8a894222f80aae1db1ccdaaadeb6288f55d6b62f | 267 | py | Python | compliance_suite/exceptions/user_config_exception.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 1 | 2019-09-18T14:38:55.000Z | 2019-09-18T14:38:55.000Z | compliance_suite/exceptions/user_config_exception.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 14 | 2019-05-24T18:55:23.000Z | 2022-02-25T16:56:28.000Z | compliance_suite/exceptions/user_config_exception.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 8 | 2019-04-08T14:48:35.000Z | 2022-02-04T16:59:59.000Z | # -*- coding: utf-8 -*-
"""Module compliance_suite.exceptions.user_config_exception.py
This module contains class definition for user config file exceptions.
"""
class UserConfigException(Exception):
"""Exception for user config file-related errors"""
pass | 26.7 | 70 | 0.752809 | 103 | 0.385768 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.794007 |
8a8988f59a7e29aadd9cfcc08e9db137ae34f210 | 3,677 | py | Python | 2021/day15/aoc-2021-d15.py | bbornstein/aoc | 624dacfe591a46aa34e3071b894076cf60091e7d | [
"MIT"
] | null | null | null | 2021/day15/aoc-2021-d15.py | bbornstein/aoc | 624dacfe591a46aa34e3071b894076cf60091e7d | [
"MIT"
] | null | null | null | 2021/day15/aoc-2021-d15.py | bbornstein/aoc | 624dacfe591a46aa34e3071b894076cf60091e7d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15)
# Author: Ben Bornstein
import collections
import heapq
Point = collections.namedtuple('Point', ['x', 'y'])
Point.__add__ = lambda self, q: Point(self[0] + q[0], self[1] + q[1])
class RiskMap:
def __init__ (self):
"""Creates a new (empty) risk-level map.
Individual risk-levels as specific positions are accessible via
`RiskMap[Point]`.
See also `RiskMap.load()`
"""
self._factor = 1
self._levels = [ ]
self._nrows = 0
self._ncols = 0
def __getitem__ (self, pos):
"""Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`."""
if self._factor > 1:
risk = self._levels[pos.y % self._nrows][pos.x % self._ncols]
risk += pos.y // self._nrows
risk += pos.x // self._ncols
if risk > 9:
risk = risk % 9
else:
risk = self._levels[pos.y][pos.x]
return risk
@staticmethod
def load (filename):
"""Creates a new risk-level map from `filename`."""
rmap = RiskMap()
with open(filename) as stream:
for line in stream.readlines():
rmap.append([ int(c) for c in line.strip() ])
return rmap
@property
def ncols (self):
"""The number of columns in this `RiskMap`."""
return self._factor * self._ncols
@property
def nrows (self):
"""The number of rows in this `RiskMap`."""
return self._factor * self._nrows
def append (self, row):
"""Appends `row` to this `RiskMap`."""
if len(self._levels) == 0:
self._ncols = len(row)
self._levels.append(row)
self._nrows += 1
def neighbors (self, pos):
"""Iterable 4-neighbors (up, down, left, right) for `pos`ition."""
deltas = (0, -1), (0, 1), (-1, 0), (1, 0)
adjacent = ( pos + Point(*delta) for delta in deltas )
yield from ( p for p in adjacent if self.valid(p) )
def resize (self, factor):
"""Resizes this `RiskMap` by setting its expansion factor to `factor`
copies both horizontally and vertically.
"""
self._factor = factor
def valid (self, pos):
"""Indicates whether or not `pos` is valid (inside this `RiskMap`)."""
return pos.y in range(0, self.nrows) and pos.x in range(0, self.ncols)
def search (rmap, start, end):
"""Searches `RiskMap` `rmap` (breadth-first) to find the least risky
path from `start` to `end`. Returns the total risk of that path.
"""
risk = 0
queue = [ (rmap[p], p) for p in rmap.neighbors(start) ]
visited = { start }
heapq.heapify(queue)
while len(queue) > 0:
risk, current = heapq.heappop(queue)
if current == end:
break
for pos in rmap.neighbors(current):
if pos not in visited:
heapq.heappush( queue, ((rmap[pos] + risk), pos) )
visited.add(pos)
return risk
filename = 'aoc-2021-d15.txt'
rmap = RiskMap.load(filename)
start = Point(0, 0)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
# Part 1
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 755
print(f'Part 1: Total Risk = {search(rmap, start, end):4}')
# Part 2
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 3016
rmap.resize(factor=5)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
print(f'Part 2: Total Risk = {search(rmap, start, end)}')
| 25.184932 | 78 | 0.56731 | 2,218 | 0.603209 | 279 | 0.075877 | 543 | 0.147675 | 0 | 0 | 1,297 | 0.352733 |
8a89fcb6aa9605bd61ebc69c816df71f6eb1ab81 | 673 | py | Python | indico/modules/events/abstracts/compat.py | aiforrural/Digital-Events-Example | 628aaa8727b259b9367ac0ae1c5ba8e9e95eca82 | [
"MIT"
] | 1 | 2021-02-08T09:34:27.000Z | 2021-02-08T09:34:27.000Z | indico/modules/events/abstracts/compat.py | pamirk/indico | c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768 | [
"MIT"
] | null | null | null | indico/modules/events/abstracts/compat.py | pamirk/indico | c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):
abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()
return redirect(url_for('abstracts.' + endpoint, abstract, management=management))
| 35.421053 | 86 | 0.786033 | 0 | 0 | 0 | 0 | 281 | 0.417533 | 0 | 0 | 219 | 0.325409 |
8a8a2f0c0a2dfbb11e77c498d88fd4e6f73817b2 | 2,168 | py | Python | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py | limingu/azure-cli-extensions | 1bc29f089f4da42ab8905e440f2f46d6b5b0aa97 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py | limingu/azure-cli-extensions | 1bc29f089f4da42ab8905e440f2f46d6b5b0aa97 | [
"MIT"
] | 1 | 2020-06-12T01:39:40.000Z | 2020-06-12T01:39:40.000Z | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py | anpaz-msft/azure-cli-extensions | 847fd487fe61e83f2a4163a9393edc9555267bc2 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult
class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult):
"""The access keys for the given database account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar primary_readonly_master_key: Base 64 encoded value of the primary
read-only key.
:vartype primary_readonly_master_key: str
:ivar secondary_readonly_master_key: Base 64 encoded value of the
secondary read-only key.
:vartype secondary_readonly_master_key: str
:ivar primary_master_key: Base 64 encoded value of the primary read-write
key.
:vartype primary_master_key: str
:ivar secondary_master_key: Base 64 encoded value of the secondary
read-write key.
:vartype secondary_master_key: str
"""
_validation = {
'primary_readonly_master_key': {'readonly': True},
'secondary_readonly_master_key': {'readonly': True},
'primary_master_key': {'readonly': True},
'secondary_master_key': {'readonly': True},
}
_attribute_map = {
'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},
'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},
'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'},
'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(DatabaseAccountListKeysResult, self).__init__(**kwargs)
self.primary_master_key = None
self.secondary_master_key = None
| 40.90566 | 98 | 0.66928 | 1,592 | 0.734317 | 0 | 0 | 0 | 0 | 0 | 0 | 1,565 | 0.721863 |
8a8aa73cf4c767bf7b906925d1382b404b94f301 | 1,834 | py | Python | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | from parsel import Selector
import requests, json, re
params = {
"q": "richard branson",
"tbm": "bks",
"gl": "us",
"hl": "en"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
books_results = []
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
title = book_result.css(".DKV0Md::text").get()
link = book_result.css(".bHexk a::attr(href)").get()
displayed_link = book_result.css(".tjvcx::text").get()
snippet = book_result.css(".cmlJmd span::text").get()
author = book_result.css(".fl span::text").get()
author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
date_published = book_result.css(".fl+ span::text").get()
preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
books_results.append({
"title": title,
"link": link,
"displayed_link": displayed_link,
"snippet": snippet,
"author": author,
"author_link": author_link,
"date_published": date_published,
"preview_link": preview_link,
"more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
"thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
| 39.869565 | 135 | 0.657579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.421483 |
8a8bbdd35a1d135f6e6a32befca7b762678940d4 | 327 | py | Python | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 1 | 2020-07-28T17:07:35.000Z | 2020-07-28T17:07:35.000Z | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 5 | 2021-04-06T18:25:29.000Z | 2021-04-10T15:13:28.000Z | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | null | null | null | r"""
Contains classes and methods that can be used when simulating the game
Higher-or-Lower and performing statistical analysis on different games.
"""
from hol import (
cards,
constants,
)
from hol._hol import (
generate_all_games,
should_pick_higher,
is_a_winning_game,
generate_win_statistics,
)
| 17.210526 | 71 | 0.737003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.46789 |
8a8bd51e1880ca1483e91fca0ab41237e4c4f869 | 4,896 | py | Python | Lib/hTools2/dialogs/glyphs/slide.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:43:56.000Z | 2019-07-27T00:35:20.000Z | Lib/hTools2/dialogs/glyphs/slide.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 2 | 2017-05-17T10:11:46.000Z | 2018-11-21T21:43:43.000Z | Lib/hTools2/dialogs/glyphs/slide.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 4 | 2015-01-10T13:58:50.000Z | 2019-12-18T15:40:14.000Z | # [h] slide selected glyphs
from mojo.roboFont import CurrentFont, CurrentGlyph, version
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_full_name, get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
class slideGlyphsDialog(hDialog):
'''A dialog to slide the selected glyphs vertically and/or horizontally.
.. image:: imgs/glyphs/slide.png
'''
_moveX = 0
_moveY = 0
_xMax = 1000
_xMin = -1000
_yMax = 500
_yMin = -500
font = None
font_name = '(no font selected)'
def __init__(self):
# window
self.title = "slide"
self.button_width = 70
self.column_1 = 20
self.column_2 = 240
self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3
self.height = self.text_height*3 + self.padding_y*4
self.w = HUDFloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# current font name
self.w.box = Box(
(x, y, self.column_1 + self.column_2, self.text_height))
self.w.box.text = TextBox(
(5, 0, self.column_1 + self.column_2, self.text_height),
self.font_name,
sizeStyle=self.size_style)
x += (self.column_2 + self.column_1 + self.padding_x)
self.w.button_update_font = SquareButton(
(x, y, self.button_width, self.text_height),
"update",
callback=self.update_font_callback,
sizeStyle=self.size_style)
# x slider
x = self.padding_x
y += self.text_height + self.padding_y
self.w.x_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"x",
sizeStyle=self.size_style)
x += self.column_1
self.w.x_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._xMax,
minValue=self._xMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_x = SquareButton(
(x, y, self.button_width, self.text_height),
"reset x",
callback=self.restore_x_callback,
sizeStyle=self.size_style)
# y slider
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.y_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"y",
sizeStyle=self.size_style)
x += self.column_1
self.w.y_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._yMax,
minValue=self._yMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_y = SquareButton(
(x, y, self.button_width, self.text_height),
"reset y",
callback=self.restore_y_callback,
sizeStyle=self.size_style)
# open
self.w.open()
self.update_font()
# callbacks
def restore_x(self):
self._moveX = 0
self.w.x_slider.set(self._moveX)
def restore_y(self):
self._moveY = 0
self.w.y_slider.set(self._moveY)
def restore_x_callback(self, sender):
self.restore_x()
def restore_y_callback(self, sender):
self.restore_y()
def update_font(self):
self.font = CurrentFont()
if self.font is not None:
self.w.box.text.set(get_full_name(self.font))
self.set_defaults()
self.restore_x()
self.restore_y()
else:
print no_font_open
def set_defaults(self):
self._xMax = self.font.info.unitsPerEm
self._yMax = self.font.info.unitsPerEm / 2
self._xMin = -self._xMax
self._yMin = -self._yMax
def update_font_callback(self, sender):
self.update_font()
def slide_callback(self, sender):
xValue = self.w.x_slider.get()
yValue = self.w.y_slider.get()
x = self._moveX - xValue
y = self._moveY - yValue
self._moveX = xValue
self._moveY = yValue
glyph_names = get_glyphs(self.font)
if len(glyph_names) > 0:
for glyph_name in glyph_names:
# RF 2.0
if version[0] == '2':
self.font[glyph_name].moveBy((-x, -y))
# RF 1.8.X
else:
self.font[glyph_name].move((-x, -y))
else:
print no_glyph_selected
| 31.184713 | 89 | 0.55576 | 4,621 | 0.943832 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.059232 |
8a8c544c5af946feba8528e8627d4c6fff3edf22 | 3,495 | py | Python | werobot/utils.py | lilac/WeRobot | 29fd70631b736a0c339f16f7729ea89f986c8bae | [
"MIT"
] | 2 | 2018-06-03T16:32:07.000Z | 2018-06-03T16:32:10.000Z | werobot/utils.py | Milleree/WeRoBot | f9777f792d55ae70e7262f13e6e3f3667a167036 | [
"MIT"
] | 9 | 2020-06-05T19:51:33.000Z | 2022-03-11T23:40:25.000Z | werobot/utils.py | Milleree/WeRoBot | f9777f792d55ae70e7262f13e6e3f3667a167036 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import random
import re
import string
import time
from functools import wraps
from hashlib import sha1
import six
try:
from secrets import choice
except ImportError:
from random import choice
string_types = (six.string_types, six.text_type, six.binary_type)
re_type = type(re.compile("regex_test"))
def get_signature(token, timestamp, nonce, *args):
sign = [token, timestamp, nonce] + list(args)
sign.sort()
sign = to_binary(''.join(sign))
return sha1(sign).hexdigest()
def check_signature(token, timestamp, nonce, signature):
if not (token and timestamp and nonce and signature):
return False
sign = get_signature(token, timestamp, nonce)
return sign == signature
def check_token(token):
return re.match('^[A-Za-z0-9]{3,32}$', token)
def cached_property(method):
prop_name = '_{}'.format(method.__name__)
@wraps(method)
def wrapped_func(self, *args, **kwargs):
if not hasattr(self, prop_name):
setattr(self, prop_name, method(self, *args, **kwargs))
return getattr(self, prop_name)
return property(wrapped_func)
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def is_string(value):
return isinstance(value, string_types)
def byte2int(s, index=0):
"""Get the ASCII int value of a character in a string.
:param s: a string
:param index: the position of desired character
:return: ASCII int value
"""
if six.PY2:
return ord(s[index])
return s[index]
def generate_token(length=''):
if not length:
length = random.randint(3, 32)
length = int(length)
assert 3 <= length <= 32
letters = string.ascii_letters + string.digits
return ''.join(choice(letters) for _ in range(length))
def json_loads(s):
s = to_text(s)
return json.loads(s)
def json_dumps(d):
return json.dumps(d)
def pay_sign_dict(
appid,
pay_sign_key,
add_noncestr=True,
add_timestamp=True,
add_appid=True,
**kwargs
):
"""
支付参数签名
"""
assert pay_sign_key, "PAY SIGN KEY IS EMPTY"
if add_appid:
kwargs.update({'appid': appid})
if add_noncestr:
kwargs.update({'noncestr': generate_token()})
if add_timestamp:
kwargs.update({'timestamp': int(time.time())})
params = kwargs.items()
_params = [
(k.lower(), v) for k, v in kwargs.items() if k.lower() != "appid"
]
_params += [('appid', appid), ('appkey', pay_sign_key)]
_params.sort()
sign = '&'.join(["%s=%s" % (str(p[0]), str(p[1]))
for p in _params]).encode("utf-8")
sign = sha1(sign).hexdigest()
sign_type = 'SHA1'
return dict(params), sign, sign_type
def make_error_page(url):
with io.open(
os.path.join(os.path.dirname(__file__), 'contrib/error.html'),
'r',
encoding='utf-8'
) as error_page:
return error_page.read().replace('{url}', url)
def is_regex(value):
return isinstance(value, re_type)
| 22.403846 | 73 | 0.645207 | 0 | 0 | 0 | 0 | 208 | 0.05931 | 0 | 0 | 416 | 0.11862 |
8a8c957af09c1662e1613d8819301ef9871bcd5c | 5,914 | py | Python | tensorflow/python/ops/standard_ops.py | ashutom/tensorflow-upstream | c16069c19de9e286dd664abb78d0ea421e9f32d4 | [
"Apache-2.0"
] | 8 | 2021-08-03T03:57:10.000Z | 2021-12-13T01:19:02.000Z | tensorflow/python/ops/standard_ops.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 17 | 2021-08-12T19:38:42.000Z | 2022-01-27T14:39:35.000Z | tensorflow/python/ops/standard_ops.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 4 | 2022-01-13T11:23:44.000Z | 2022-03-02T11:11:42.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| 46.936508 | 93 | 0.825668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,605 | 0.27139 |
8a8ce25aff69e17f6f7281d206c301403a98d23f | 3,208 | py | Python | src/tango_scaling_test/TestDeviceServer/__main__.py | rtobar/sdp-prototype | 9f1527b884bf80daa509a7fe3722160c77260f4f | [
"BSD-3-Clause"
] | null | null | null | src/tango_scaling_test/TestDeviceServer/__main__.py | rtobar/sdp-prototype | 9f1527b884bf80daa509a7fe3722160c77260f4f | [
"BSD-3-Clause"
] | null | null | null | src/tango_scaling_test/TestDeviceServer/__main__.py | rtobar/sdp-prototype | 9f1527b884bf80daa509a7fe3722160c77260f4f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Tango device server for use with scaling tests."""
import sys
import time
import argparse
import tango
from tango.server import run
from TestDevice import TestDevice
def init_callback():
"""Report server start up times.
This callback is executed post server initialisation.
"""
# pylint: disable=global-statement
global START_TIME
db = tango.Database()
elapsed = time.time() - START_TIME
list_devices()
exported_devices = list(db.get_device_exported('test/*'))
num_devices = len(exported_devices)
file = open('results.txt', 'a')
file.write(',{},{}\n'.format(elapsed, elapsed / num_devices))
print('>> Time taken to start devices: {:.4f} s ({:.4f} s/dev)'
.format(elapsed, elapsed / num_devices))
def delete_server():
"""Delete the TestDeviceServer from the tango db."""
db = tango.Database()
db.set_timeout_millis(50000)
server = 'TestDeviceServer/1'
server_list = list(db.get_server_list(server))
if server in server_list:
start_time = time.time()
db.delete_server('TestDeviceServer/1')
print('- Delete server: {:.4f} s'.format(time.time() - start_time))
def register(num_devices):
"""Register devices in the tango db."""
db = tango.Database()
device_info = tango.DbDevInfo()
device_info.server = 'TestDeviceServer/1'
# pylint: disable=protected-access
device_info._class = 'TestDevice'
start_time = time.time()
for device_id in range(num_devices):
device_info.name = 'test/test_device/{:05d}'.format(device_id)
db.add_device(device_info)
elapsed = time.time() - start_time
file = open('results.txt', 'a')
file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices))
print('- Register devices: {:.4f} s ({:.4f} s/device)'
.format(elapsed, elapsed / num_devices))
def list_devices():
"""List tango devices associated with the TestDeviceServer."""
db = tango.Database()
server_instance = 'TestDeviceServer/1'
device_class = 'TestDevice'
devices = list(db.get_device_name(server_instance, device_class))
print('- No. registered devices: {}'.format(len(devices)))
exported_devices = list(db.get_device_exported('test/*'))
print('- No. running devices: {}'.format(len(exported_devices)))
def main(args=None, **kwargs):
"""Run (start) the device server."""
run([TestDevice], verbose=True, msg_stream=sys.stdout,
post_init_callback=init_callback, raises=False,
args=args, **kwargs)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Device registration time.')
PARSER.add_argument('num_devices', metavar='N', type=int,
default=1, nargs='?',
help='Number of devices to start.')
ARGS = PARSER.parse_args()
delete_server()
time.sleep(0.5)
list_devices()
print('* Registering {} devices'.format(ARGS.num_devices))
register(ARGS.num_devices)
list_devices()
print('* Starting server ...')
sys.argv = ['TestDeviceServer', '1', '-v4']
START_TIME = time.time()
main()
| 29.981308 | 77 | 0.65586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,005 | 0.313279 |
8a8d44634b296be16e3e3fe11b62e194bcce203d | 14,955 | py | Python | test/test_pipeline.py | ParikhKadam/haystack | 8a57f6b16af0bdd41dc02bf1200e0adbdf1da39b | [
"Apache-2.0"
] | 1 | 2021-08-04T09:06:43.000Z | 2021-08-04T09:06:43.000Z | test/test_pipeline.py | jacksbox/haystack | 65f1da00cc4b6757752dafb8bf756531fad46dd0 | [
"Apache-2.0"
] | null | null | null | test/test_pipeline.py | jacksbox/haystack | 65f1da00cc4b6757752dafb8bf756531fad46dd0 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
import pytest
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \
DocumentSearchPipeline, RootNode
from haystack.retriever.dense import DensePassageRetriever
from haystack.retriever.sparse import ElasticsearchRetriever
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_load_yaml(document_store_with_docs):
# test correct load of indexing pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"),
pipeline_name="indexing_pipeline")
pipeline.run(file_path=Path("samples/pdf/sample_pdf_1.pdf"), top_k_retriever=10, top_k_reader=3)
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="query_pipeline")
prediction = pipeline.run(query="Who made the PDF specification?", top_k_retriever=10, top_k_reader=3)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0]["answer"] == "Adobe Systems"
# test invalid pipeline name
with pytest.raises(Exception):
Pipeline.load_from_yaml(path=Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="invalid")
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever_with_docs, document_store_with_docs", [("elasticsearch", "elasticsearch")], indirect=True
)
def test_graph_creation(reader, retriever_with_docs, document_store_with_docs):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["Query"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.output_2"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.wrong_edge_label"])
with pytest.raises(Exception):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["InvalidNode"])
with pytest.raises(Exception):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["InvalidNode"])
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Who lives in Berlin?"
assert prediction["answers"][0]["answer"] == "Carla"
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
assert len(prediction["answers"]) == 3
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_offsets(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=5)
assert prediction["answers"][0]["offset_start"] == 11
assert prediction["answers"][0]["offset_end"] == 16
start = prediction["answers"][0]["offset_start"]
end = prediction["answers"][0]["offset_end"]
assert prediction["answers"][0]["context"][start:end] == prediction["answers"][0]["answer"]
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_single_result(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
query = "testing finder"
prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1)
assert prediction is not None
assert len(prediction["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_faq_pipeline(retriever, document_store):
documents = [
{"text": "How to test module-1?", 'meta': {"source": "wiki1", "answer": "Using tests for module-1"}},
{"text": "How to test module-2?", 'meta': {"source": "wiki2", "answer": "Using tests for module-2"}},
{"text": "How to test module-3?", 'meta': {"source": "wiki3", "answer": "Using tests for module-3"}},
{"text": "How to test module-4?", 'meta': {"source": "wiki4", "answer": "Using tests for module-4"}},
{"text": "How to test module-5?", 'meta': {"source": "wiki5", "answer": "Using tests for module-5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = FAQPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=3)
assert len(output["answers"]) == 3
assert output["answers"][0]["query"].startswith("How to")
assert output["answers"][0]["answer"].startswith("Using tests")
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_document_search_pipeline(retriever, document_store):
documents = [
{"text": "Sample text for document-1", 'meta': {"source": "wiki1"}},
{"text": "Sample text for document-2", 'meta': {"source": "wiki2"}},
{"text": "Sample text for document-3", 'meta': {"source": "wiki3"}},
{"text": "Sample text for document-4", 'meta': {"source": "wiki4"}},
{"text": "Sample text for document-5", 'meta': {"source": "wiki5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = DocumentSearchPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=4)
assert len(output.get('documents', [])) == 4
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["documents"]) == 1
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator):
base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
pipeline = TranslationWrapperPipeline(
input_translator=de_to_en_translator,
output_translator=en_to_de_translator,
pipeline=base_pipeline
)
prediction = pipeline.run(query="Wer lebt in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Wer lebt in Berlin?"
assert "Carla" in prediction["answers"][0]["answer"]
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_join_document_pipeline(document_store_with_docs, reader):
es = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr = DensePassageRetriever(
document_store=document_store_with_docs,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
use_gpu=False,
)
document_store_with_docs.update_embeddings(dpr)
query = "Where does Carla lives?"
# test merge without weights
join_node = JoinDocuments(join_mode="merge")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test merge with weights
join_node = JoinDocuments(join_mode="merge", weights=[1000, 1], top_k_join=2)
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert results["documents"][0].score > 1000
assert len(results["documents"]) == 2
# test concatenate
join_node = JoinDocuments(join_mode="concatenate")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test join_node with reader
join_node = JoinDocuments()
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
p.add_node(component=reader, name="Reader", inputs=["Join"])
results = p.run(query=query)
assert results["answers"][0]["answer"] == "Berlin"
def test_parallel_paths_in_pipeline_graph():
class A(RootNode):
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
kwargs["output"] = kwargs["inputs"][0]["output"] + kwargs["inputs"][1]["output"]
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="E", component=E(), inputs=["C"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E"])
output = pipeline.run(query="test")
assert output["output"] == "ABDABCE"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="E", component=JoinNode(), inputs=["C", "D"])
output = pipeline.run(query="test")
assert output["output"] == "ABCABD"
def test_parallel_paths_in_pipeline_graph_with_branching():
class AWithOutput1(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class AWithOutput2(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_2"
class AWithOutputAll(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_all"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
if kwargs.get("inputs"):
kwargs["output"] = ""
for input_dict in kwargs["inputs"]:
kwargs["output"] += (input_dict["output"])
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput1(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ABEABD"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput2(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "AC"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutputAll(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ACABEABD"
| 42.126761 | 119 | 0.664527 | 2,089 | 0.139686 | 0 | 0 | 9,763 | 0.652825 | 0 | 0 | 3,709 | 0.248011 |
8a8db025d17d202dce4f03767b8394c4ff63db8d | 14,254 | py | Python | src/telr/TELR_assembly.py | dominik-handler/TELR | 3e34e54fc959c13fa45dc911facf0d5179fbb34b | [
"BSD-2-Clause"
] | 22 | 2020-09-22T21:21:17.000Z | 2022-01-21T17:52:12.000Z | src/telr/TELR_assembly.py | dominik-handler/TELR | 3e34e54fc959c13fa45dc911facf0d5179fbb34b | [
"BSD-2-Clause"
] | 6 | 2021-05-07T13:52:30.000Z | 2022-03-27T18:21:10.000Z | src/telr/TELR_assembly.py | dominik-handler/TELR | 3e34e54fc959c13fa45dc911facf0d5179fbb34b | [
"BSD-2-Clause"
] | 6 | 2020-10-01T12:47:19.000Z | 2021-08-13T14:38:11.000Z | import sys
import os
import subprocess
import shutil
import time
import logging
from Bio import SeqIO
from multiprocessing import Pool
import pysam
from telr.TELR_utility import mkdir, check_exist, format_time
def get_local_contigs(
assembler,
polisher,
contig_dir,
vcf_parsed,
out,
sample_name,
bam,
raw_reads,
thread,
presets,
polish_iterations,
):
"""Perform local assembly using reads from parsed VCF file in parallel"""
# Prepare reads used for local assembly and polishing
sv_reads_dir = os.path.join(out, "sv_reads")
try:
prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv"
)
except Exception as e:
print(e)
print("Prepare local assembly input data failed, exiting...")
sys.exit(1)
mkdir(contig_dir)
k = 0
asm_pa_list = []
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
contig_name = "_".join([entry[0], entry[1], entry[2]])
# rename variant reads
sv_reads = sv_reads_dir + "/contig" + str(k)
sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa"
os.rename(sv_reads, sv_reads_rename)
thread_asm = 1
asm_pa = [
sv_reads_rename,
contig_dir,
contig_name,
thread_asm,
presets,
assembler,
polisher,
polish_iterations,
]
asm_pa_list.append(asm_pa)
k = k + 1
# run assembly in parallel
logging.info("Perform local assembly of non-reference TE loci...")
start_time = time.time()
try:
pool = Pool(processes=thread)
contig_list = pool.map(run_assembly_polishing, asm_pa_list)
pool.close()
pool.join()
except Exception as e:
print(e)
print("Local assembly failed, exiting...")
sys.exit(1)
proc_time = time.time() - start_time
# merge all contigs
assembly_passed_loci = set()
merged_contigs = os.path.join(out, sample_name + ".contigs.fa")
with open(merged_contigs, "w") as merged_output_handle:
for contig in contig_list:
if check_exist(contig):
contig_name = os.path.basename(contig).replace(".cns.fa", "")
assembly_passed_loci.add(contig_name)
parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa")
with open(contig, "r") as input:
records = SeqIO.parse(input, "fasta")
for record in records:
if record.id == "ctg1" or record.id == "contig_1":
record.id = contig_name
record.description = "len=" + str(len(record.seq))
SeqIO.write(record, merged_output_handle, "fasta")
with open(parsed_contig, "w") as parsed_output_handle:
SeqIO.write(record, parsed_output_handle, "fasta")
logging.info("Local assembly finished in " + format_time(proc_time))
return merged_contigs, assembly_passed_loci
def run_assembly_polishing(args):
reads = args[0]
asm_dir = args[1]
contig_name = args[2]
thread = args[3]
presets = args[4]
assembler = args[5]
polisher = args[6]
polish_iterations = args[7]
# run assembly
if assembler == "wtdbg2":
asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)
else:
asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)
if not check_exist(asm_cns):
print("assembly failed")
return None
# run polishing
if polish_iterations > 0:
if polisher == "wtdbg2":
asm_cns = run_wtdbg2_polishing(
asm_cns, reads, thread, polish_iterations, presets
)
else:
asm_cns = run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
)
if check_exist(asm_cns):
return asm_cns
else:
return None
def run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
):
"""Run Flye polishing"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
"--polish-target",
asm_cns,
presets_flye,
reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
str(polish_iterations),
]
)
except Exception as e:
print(e)
print("Polishing failed, exiting...")
return None
# rename contig file
polished_contig = os.path.join(
tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta"
)
if check_exist(polished_contig):
os.rename(polished_contig, asm_cns)
shutil.rmtree(tmp_out_dir)
return asm_cns
else:
return None
def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):
"""Run wtdbg2 polishing"""
if presets == "pacbio":
presets_minimap2 = "map-pb"
else:
presets_minimap2 = "map-ont"
# polish consensus
threads = str(min(threads, 4))
bam = asm_cns + ".bam"
k = 0
while True:
# align reads to contigs
command = (
"minimap2 -t "
+ threads
+ " -ax "
+ presets_minimap2
+ " -r2k "
+ asm_cns
+ " "
+ reads
+ " | samtools sort -@"
+ threads
+ " > "
+ bam
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to map reads to contig: " + asm_cns)
return
# run wtpoa-cns to get polished contig
cns_tmp = asm_cns + ".tmp"
command = (
"samtools view -F0x900 "
+ bam
+ " | wtpoa-cns -t "
+ threads
+ " -d "
+ asm_cns
+ " -i - -fo "
+ cns_tmp
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to polish contig: " + asm_cns)
return
if check_exist(cns_tmp):
os.rename(cns_tmp, asm_cns)
os.remove(bam)
else:
break
k = k + 1
if k >= polish_iterations:
break
if check_exist(asm_cns):
return asm_cns
else:
print("polishing failed for " + asm_cns + "\n")
return None
def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run Flye assembly"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
presets_flye,
sv_reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
"0",
]
)
except Exception as e:
print(e)
print("Assembly failed, exiting...")
return
# rename contigs
contig_path = os.path.join(tmp_out_dir, "assembly.fasta")
contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa")
if check_exist(contig_path):
os.rename(contig_path, contig_path_new)
# remove tmp files
shutil.rmtree(tmp_out_dir)
return contig_path_new
else:
print("assembly failed")
return None
def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run wtdbg2 assembly"""
if presets == "pacbio":
presets_wtdbg2 = "rs"
else:
presets_wtdbg2 = "ont"
prefix = sv_reads.replace(".reads.fa", "")
try:
subprocess.run(
[
"wtdbg2",
"-x",
presets_wtdbg2,
"-q",
"-AS",
"1",
"-g",
"30k",
"-t",
str(thread),
"-i",
sv_reads,
"-fo",
prefix,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to build contig layout for contig: " + contig_name)
return
except Exception as e:
print(e)
print("wtdbg2 failed, exiting...")
return None
# derive consensus
contig_layout = prefix + ".ctg.lay.gz"
if check_exist(contig_layout):
cns_thread = str(min(thread, 4))
consensus = prefix + ".cns.fa"
try:
subprocess.run(
[
"wtpoa-cns",
"-q",
"-t",
cns_thread,
"-i",
contig_layout,
"-fo",
consensus,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to assemble contig: " + contig_name)
return None
if check_exist(consensus):
consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa")
os.rename(consensus, consensus_rename)
return consensus_rename
else:
return None
def prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv"
):
"""Prepare reads for local assembly"""
# logging.info("Prepare reads for local assembly")
if read_type == "sv": # TODO: figure out what this does
# extract read IDs
read_ids = os.path.join(out, sample_name + ".id")
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output:
for line in input:
entry = line.replace("\n", "").split("\t")
read_list = entry[8].split(",")
for read in read_list:
output.write(read + "\n")
else: # TODO: think about using this for assembly, filter for cigar reads
window = 1000
samfile = pysam.AlignmentFile(bam, "rb")
read_ids = os.path.join(out, sample_name + ".id")
vcf_parsed_new = vcf_parsed + ".new"
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open(
vcf_parsed_new, "w"
) as VCF:
for line in input:
entry = line.replace("\n", "").split("\t")
# get sniffles read list
read_list = entry[8].split(",")
reads_sniffles = set(read_list)
ins_chr = entry[0]
ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)
start = ins_breakpoint - window
end = ins_breakpoint + window
reads = set()
# coverage = 0
for read in samfile.fetch(ins_chr, start, end):
reads.add(read.query_name)
for read in reads:
output.write(read + "\n")
# write
out_line = line.replace("\n", "") + "\t" + str(len(reads))
VCF.write(out_line + "\n")
vcf_parsed = vcf_parsed_new
# generate unique ID list
read_ids_unique = read_ids + ".unique"
command = "cat " + read_ids + " | sort | uniq"
with open(read_ids_unique, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# filter raw reads using read list
subset_fa = os.path.join(out, sample_name + ".subset.fa")
command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a"
with open(subset_fa, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# reorder reads
subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa"
extract_reads(subset_fa, read_ids, subset_fa_reorder)
# separate reads into multiple files, using csplit
mkdir(reads_dir)
csplit_prefix = reads_dir + "/contig"
m = []
k = 1
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
if read_type == "sv":
k = k + 2 * (len(entry[8].split(",")))
else:
k = k + 2 * int(entry[14])
m.append(k)
if len(m) == 1:
subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"])
elif len(m) == 0:
print("No insertion detected, exiting...")
else:
m = m[:-1]
index = " ".join(str(i) for i in m)
command = (
"csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index
)
subprocess.call(command, shell=True)
# remove tmp files
os.remove(read_ids)
os.remove(read_ids_unique)
os.remove(subset_fa)
os.remove(subset_fa_reorder)
def extract_reads(reads, list, out):
"""Extract reads from fasta using read ID list"""
record_dict = SeqIO.index(reads, "fasta")
with open(out, "wb") as output_handle, open(list, "r") as ID:
for entry in ID:
entry = entry.replace("\n", "")
output_handle.write(record_dict.get_raw(entry))
| 30.32766 | 88 | 0.525116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,344 | 0.164445 |
8a900957322aa8d59dab3c2935590611098dad34 | 28,015 | py | Python | pygmt/tests/test_clib.py | aliciaha1997/pygmt | a10af5d8deb3bf3090eab4b6492bcf8cf722cb71 | [
"BSD-3-Clause"
] | null | null | null | pygmt/tests/test_clib.py | aliciaha1997/pygmt | a10af5d8deb3bf3090eab4b6492bcf8cf722cb71 | [
"BSD-3-Clause"
] | null | null | null | pygmt/tests/test_clib.py | aliciaha1997/pygmt | a10af5d8deb3bf3090eab4b6492bcf8cf722cb71 | [
"BSD-3-Clause"
] | 1 | 2021-11-03T07:47:18.000Z | 2021-11-03T07:47:18.000Z | # pylint: disable=protected-access
"""
Test the wrappers for the C API.
"""
import os
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import xarray as xr
from packaging.version import Version
from pygmt import Figure, clib
from pygmt.clib.conversion import dataarray_to_matrix
from pygmt.clib.session import FAMILIES, VIAS
from pygmt.exceptions import (
GMTCLibError,
GMTCLibNoSessionError,
GMTInvalidInput,
GMTVersionError,
)
from pygmt.helpers import GMTTempFile
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
with clib.Session() as _lib:
gmt_version = Version(_lib.info["version"])
@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Mock a GMT C API function to make it always return a given value.
Used to test that exceptions are raised when API functions fail by
producing a NULL pointer as output or non-zero status codes.
Needed because it's not easy to get some API functions to fail without
inducing a Segmentation Fault (which is a good thing because libgmt usually
only fails with errors).
"""
if mock_func is None:
def mock_api_function(*args): # pylint: disable=unused-argument
"""
A mock GMT API function that always returns a given value.
"""
return returns
mock_func = mock_api_function
get_libgmt_func = session.get_libgmt_func
def mock_get_libgmt_func(name, argtypes=None, restype=None):
"""
Return our mock function.
"""
if name == func:
return mock_func
return get_libgmt_func(name, argtypes, restype)
setattr(session, "get_libgmt_func", mock_get_libgmt_func)
yield
setattr(session, "get_libgmt_func", get_libgmt_func)
def test_getitem():
"""
Test that I can get correct constants from the C lib.
"""
ses = clib.Session()
assert ses["GMT_SESSION_EXTERNAL"] != -99999
assert ses["GMT_MODULE_CMD"] != -99999
assert ses["GMT_PAD_DEFAULT"] != -99999
assert ses["GMT_DOUBLE"] != -99999
with pytest.raises(GMTCLibError):
ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement
def test_create_destroy_session():
"""
Test that create and destroy session are called without errors.
"""
# Create two session and make sure they are not pointing to the same memory
session1 = clib.Session()
session1.create(name="test_session1")
assert session1.session_pointer is not None
session2 = clib.Session()
session2.create(name="test_session2")
assert session2.session_pointer is not None
assert session2.session_pointer != session1.session_pointer
session1.destroy()
session2.destroy()
# Create and destroy a session twice
ses = clib.Session()
for __ in range(2):
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
ses.create("session1")
assert ses.session_pointer is not None
ses.destroy()
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
def test_create_session_fails():
"""
Check that an exception is raised when failing to create a session.
"""
ses = clib.Session()
with mock(ses, "GMT_Create_Session", returns=None):
with pytest.raises(GMTCLibError):
ses.create("test-session-name")
# Should fail if trying to create a session before destroying the old one.
ses.create("test1")
with pytest.raises(GMTCLibError):
ses.create("test2")
def test_destroy_session_fails():
"""
Fail to destroy session when given bad input.
"""
ses = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
ses.destroy()
ses.create("test-session")
with mock(ses, "GMT_Destroy_Session", returns=1):
with pytest.raises(GMTCLibError):
ses.destroy()
ses.destroy()
def test_call_module():
"""
Run a command to see if call_module works.
"""
data_fname = os.path.join(TEST_DATA_DIR, "points.txt")
out_fname = "test_call_module.txt"
with clib.Session() as lib:
with GMTTempFile() as out_fname:
lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name))
assert os.path.exists(out_fname.name)
output = out_fname.read().strip()
assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338"
def test_call_module_invalid_arguments():
"""
Fails for invalid module arguments.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("info", "bogus-data.bla")
def test_call_module_invalid_name():
"""
Fails when given bad input.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("meh", "")
def test_call_module_error_message():
"""
Check is the GMT error message was captured.
"""
with clib.Session() as lib:
try:
lib.call_module("info", "bogus-data.bla")
except GMTCLibError as error:
assert "Module 'info' failed with status code" in str(error)
assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error)
def test_method_no_session():
"""
Fails when not in a session.
"""
# Create an instance of Session without "with" so no session is created.
lib = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
lib.call_module("gmtdefaults", "")
with pytest.raises(GMTCLibNoSessionError):
lib.session_pointer # pylint: disable=pointless-statement
def test_parse_constant_single():
"""
Parsing a single family argument correctly.
"""
lib = clib.Session()
for family in FAMILIES:
parsed = lib._parse_constant(family, valid=FAMILIES)
assert parsed == lib[family]
def test_parse_constant_composite():
"""
Parsing a composite constant argument (separated by |) correctly.
"""
lib = clib.Session()
test_cases = ((family, via) for family in FAMILIES for via in VIAS)
for family, via in test_cases:
composite = "|".join([family, via])
expected = lib[family] + lib[via]
parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)
assert parsed == expected
def test_parse_constant_fails():
"""
Check if the function fails when given bad input.
"""
lib = clib.Session()
test_cases = [
"SOME_random_STRING",
"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR",
"GMT_IS_DATASET|NOT_A_PROPER_VIA",
"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX",
"NOT_A_PROPER_FAMILY|ALSO_INVALID",
]
for test_case in test_cases:
with pytest.raises(GMTInvalidInput):
lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)
# Should also fail if not given valid modifiers but is using them anyway.
# This should work...
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS
)
# But this shouldn't.
with pytest.raises(GMTInvalidInput):
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None
)
def test_create_data_dataset():
"""
Run the function to make sure it doesn't fail badly.
"""
with clib.Session() as lib:
# Dataset from vectors
data_vector = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0], # columns, rows, layers, dtype
)
# Dataset from matrices
data_matrix = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_MATRIX",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
assert data_vector != data_matrix
def test_create_data_grid_dim():
"""
Create a grid ignoring range and inc.
"""
with clib.Session() as lib:
# Grids from matrices using dim
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
def test_create_data_grid_range():
"""
Create a grid specifying range and inc instead of dim.
"""
with clib.Session() as lib:
# Grids from matrices using range and int
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
def test_create_data_fails():
"""
Check that create_data raises exceptions for invalid input and output.
"""
# Passing in invalid mode
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="Not_a_valid_mode",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# Passing in invalid geometry
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_GRID",
geometry="Not_a_valid_geometry",
mode="GMT_CONTAINER_ONLY",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# If the data pointer returned is None (NULL pointer)
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
with mock(lib, "GMT_Create_Data", returns=None):
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[11, 10, 2, 0],
)
def test_virtual_file():
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
family = "GMT_IS_DATASET|GMT_VIA_MATRIX"
geometry = "GMT_IS_POINT"
dataset = lib.create_data(
family=family,
geometry=geometry,
mode="GMT_CONTAINER_ONLY",
dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype
)
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
lib.put_matrix(dataset, matrix=data)
# Add the dataset to a virtual file and pass it along to gmt info
vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset)
with lib.open_virtual_file(*vfargs) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtual_file_fails():
"""
Check that opening and closing virtual files raises an exception for non-
zero return codes.
"""
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IN|GMT_IS_REFERENCE",
None,
)
# Mock Open_VirtualFile to test the status check when entering the context.
# If the exception is raised, the code won't get to the closing of the
# virtual file.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
print("Should not get to this code")
# Test the status check when closing the virtual file
# Mock the opening to return 0 (success) so that we don't open a file that
# we won't close later.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock(
lib, "GMT_Close_VirtualFile", returns=1
):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
pass
print("Shouldn't get to this code either")
def test_virtual_file_bad_direction():
"""
Test passing an invalid direction argument.
"""
with clib.Session() as lib:
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IS_GRID", # The invalid direction argument
0,
)
with pytest.raises(GMTInvalidInput):
with lib.open_virtual_file(*vfargs):
print("This should have failed")
def test_virtualfile_from_vectors():
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
y = np.arange(size, size * 2, 1, dtype=dtype)
z = np.arange(size * 2, size * 3, 1, dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_one_string_or_object_column(dtype):
"""
Test passing in one column with string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings))
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):
"""
Test passing in two columns of string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(
f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2)
)
assert output == expected
def test_virtualfile_from_vectors_transpose():
"""
Test transforming matrix columns to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(*data.T) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T]
)
expected = "{}\n".format(bounds)
assert output == expected
def test_virtualfile_from_vectors_diff_size():
"""
Test the function fails for arrays of different sizes.
"""
x = np.arange(5)
y = np.arange(6)
with clib.Session() as lib:
with pytest.raises(GMTInvalidInput):
with lib.virtualfile_from_vectors(x, y):
print("This should have failed")
def test_virtualfile_from_matrix():
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtualfile_from_matrix_slice():
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
rows = 5
cols = 3
data = full_data[:rows, :cols]
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds)
assert output == expected
def test_virtualfile_from_vectors_pandas():
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
data=dict(
x=np.arange(size, dtype=dtype),
y=np.arange(size, size * 2, 1, dtype=dtype),
z=np.arange(size * 2, size * 3, 1, dtype=dtype),
)
)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
[
"<{:.0f}/{:.0f}>".format(i.min(), i.max())
for i in (data.x, data.y, data.z)
]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_virtualfile_from_vectors_arraylike():
"""
Pass array-like vectors to a dataset.
"""
size = 13
x = list(range(0, size, 1))
y = tuple(range(size, size * 2, 1))
z = range(size * 2, size * 3, 1)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_extract_region_fails():
"""
Check that extract region fails if nothing has been plotted.
"""
Figure()
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
lib.extract_region()
def test_extract_region_two_figures():
"""
Extract region should handle multiple figures existing at the same time.
"""
# Make two figures before calling extract_region to make sure that it's
# getting from the current figure, not the last figure.
fig1 = Figure()
region1 = np.array([0, 10, -20, -10])
fig1.coast(region=region1, projection="M6i", frame=True, land="black")
fig2 = Figure()
fig2.basemap(region="US.HI+r5", projection="M6i", frame=True)
# Activate the first figure and extract the region from it
# Use in a different session to avoid any memory problems.
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig1._name))
with clib.Session() as lib:
wesn1 = lib.extract_region()
npt.assert_allclose(wesn1, region1)
# Now try it with the second one
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig2._name))
with clib.Session() as lib:
wesn2 = lib.extract_region()
npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))
def test_write_data_fails():
"""
Check that write data raises an exception for non-zero return codes.
"""
# It's hard to make the C API function fail without causing a Segmentation
# Fault. Can't test this if by giving a bad file name because if
# output=='', GMT will just write to stdout and spaces are valid file
# names. Use a mock instead just to exercise this part of the code.
with clib.Session() as lib:
with mock(lib, "GMT_Write_Data", returns=1):
with pytest.raises(GMTCLibError):
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
[1] * 6,
"some-file-name",
None,
)
def test_dataarray_to_matrix_works():
"""
Check that dataarray_to_matrix returns correct output.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flipud(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])
def test_dataarray_to_matrix_negative_x_increment():
"""
Check if dataarray_to_matrix returns correct output with flipped x.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=data)
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_x_and_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped x/y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.fliplr(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_dims_fails():
"""
Check that it fails for > 2 dims.
"""
# Make a 3D regular grid
data = np.ones((10, 12, 11), dtype="float32")
x = np.arange(11)
y = np.arange(12)
z = np.arange(10)
grid = xr.DataArray(data, coords=[("z", z), ("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_dataarray_to_matrix_inc_fails():
"""
Check that it fails for variable increments.
"""
data = np.ones((4, 5), dtype="float64")
x = np.linspace(0, 1, 5)
y = np.logspace(2, 3, 4)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_get_default():
"""
Make sure get_default works without crashing and gives reasonable results.
"""
with clib.Session() as lib:
assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"]
assert int(lib.get_default("API_CORES")) >= 1
assert Version(lib.get_default("API_VERSION")) >= Version("6.2.0")
def test_get_default_fails():
"""
Make sure get_default raises an exception for invalid names.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.get_default("NOT_A_VALID_NAME")
def test_info_dict():
"""
Make sure the clib.Session.info dict is working.
"""
# Check if there are no errors or segfaults from getting all of the
# properties.
with clib.Session() as lib:
assert lib.info
# Mock GMT_Get_Default to return always the same string
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Put 'bla' in the value buffer.
"""
value.value = b"bla"
return 0
ses = clib.Session()
ses.create("test-session")
with mock(ses, "GMT_Get_Default", mock_func=mock_defaults):
# Check for an empty dictionary
assert ses.info
for key in ses.info:
assert ses.info[key] == "bla"
ses.destroy()
def test_fails_for_wrong_version():
"""
Make sure the clib.Session raises an exception if GMT is too old.
"""
# Mock GMT_Get_Default to return an old version
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Return an old version.
"""
if name == b"API_VERSION":
value.value = b"5.4.3"
else:
value.value = b"bla"
return 0
lib = clib.Session()
with mock(lib, "GMT_Get_Default", mock_func=mock_defaults):
with pytest.raises(GMTVersionError):
with lib:
assert lib.info["version"] != "5.4.3"
# Make sure the session is closed when the exception is raised.
with pytest.raises(GMTCLibNoSessionError):
assert lib.session_pointer
| 34.332108 | 85 | 0.60464 | 0 | 0 | 1,144 | 0.040835 | 2,863 | 0.102195 | 0 | 0 | 8,703 | 0.310655 |
8a9030707f61608d50658393cbb2098e6e4bfbb7 | 300 | py | Python | stubs/_pytest/_code.py | questioneer-ltd/scrut | 8cb914c9c35eee0d9e17a6051683c970db2649b5 | [
"MIT"
] | null | null | null | stubs/_pytest/_code.py | questioneer-ltd/scrut | 8cb914c9c35eee0d9e17a6051683c970db2649b5 | [
"MIT"
] | null | null | null | stubs/_pytest/_code.py | questioneer-ltd/scrut | 8cb914c9c35eee0d9e17a6051683c970db2649b5 | [
"MIT"
] | null | null | null | """Type stubs for _pytest._code."""
# This class actually has more functions than are specified here.
# We don't use these features, so I don't think its worth including
# them in our type stub. We can always change it later.
class ExceptionInfo:
@property
def value(self) -> Exception: ...
| 33.333333 | 67 | 0.72 | 72 | 0.24 | 0 | 0 | 47 | 0.156667 | 0 | 0 | 222 | 0.74 |
8a9169fbe2dd0a7e667174a77f2109a3f57e8580 | 1,808 | py | Python | Prime Factorization/prime_factorization_II.py | rayvantsahni/Let-us-Math | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | 2 | 2020-08-06T07:09:38.000Z | 2020-09-12T02:32:23.000Z | Prime Factorization/prime_factorization_II.py | rayvantsahni/Math-is-Fun | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | null | null | null | Prime Factorization/prime_factorization_II.py | rayvantsahni/Math-is-Fun | 571ee70452feae0b15f37d46de658b0c0251bd3d | [
"MIT"
] | 1 | 2021-08-30T14:17:28.000Z | 2021-08-30T14:17:28.000Z | def get_primes(n):
primes = [] # stores the prime numbers within the reange of the number
sieve = [False] * (n + 1) # stores boolean values indicating whether a number is prime or not
sieve[0] = sieve[1] = True # marking 0 and 1 as not prime
for i in range(2, n + 1): # loops over all the numbers to check for prime numbers
if sieve[i]: # checks whether a number is not prime
continue # skips the loop if the number is not a prime number
primes.append(i) # adds a number into list if it is a prime number
for j in range(i ** 2, n + 1, i): # loops over all multiples of the prime number starting from the sqaure of the prime number
sieve[j] = True # marks the multiple of the prime number as not prime
return primes # returns the list containing prime numbers
def get_factorization(n):
prime_factors = [] # stores the prime factorization of the number
for prime in get_primes(n): # looping over all the prime numbers
while n != 1: # keeps diving the number by a certain prime number until the number is 1
if n % prime == 0: # checks if the number is divisible by a particular prime number
prime_factors.append(prime) # add the prime factor in the list if it divides the number
n /= prime # reducing the number after dividing it by the prime number
else:
break # if the number is not divisible by the paricular prime number then the inner loop breaks and the number is further divided by the next prime number until the number becomes 1
return prime_factors # returns the list containing the prime factorization of the number
if __name__ == "__main__":
n = int(input("Enter a number: "))
print(get_factorization(n))
| 54.787879 | 198 | 0.672566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,143 | 0.63219 |