repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
132nd-etcher/EMFT | emft/gui/tab_about.py | 1 | 1269 | # coding=utf-8
from emft.core import constant
from emft.core.logging import make_logger
from emft.gui.base import GridLayout, HSpacer, Label, VLayout, VSpacer
from emft.gui.main_ui_tab_widget import MainUiTabChild
LOGGER = make_logger(__name__)
class TabChildAbout(MainUiTabChild):
def tab_clicked(self):
pass
@property
def tab_title(self) -> str:
return 'About'
def __init__(self, parent=None):
super(TabChildAbout, self).__init__(parent)
repo_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_REPO)
)
repo_label.setOpenExternalLinks(True)
changelog_label = Label(
'''<a href='{link}'>{link}</a>'''.format(link=constant.LINK_CHANGELOG)
)
changelog_label.setOpenExternalLinks(True)
self.setLayout(
VLayout(
[
GridLayout(
[
[Label('Github repository: '), repo_label, HSpacer()],
[Label('Changelog: '), changelog_label, HSpacer()],
],
[0, 0, 1]
),
VSpacer(),
]
)
)
| gpl-3.0 | 780,620,645,744,232,400 | -6,537,607,988,053,126,000 | 27.2 | 82 | 0.502758 | false |
jfinkels/networkx | networkx/readwrite/graph6.py | 3 | 7803 | # Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# Tomas Gavenciak <[email protected]>
# All rights reserved.
# BSD license.
#
# Authors: Tomas Gavenciak <[email protected]>
# Aric Hagberg <[email protected]>
"""Functions for reading and writing graphs in the *graph6* format.
The *graph6* file format is suitable for small graphs or large dense
graphs. For large sparse graphs, use the *sparse6* format.
For more information, see the `graph6`_ homepage.
.. _graph6: http://users.cecs.anu.edu.au/~bdm/data/formats.html
"""
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file, not_implemented_for
__all__ = ['read_graph6', 'parse_graph6', 'generate_graph6', 'write_graph6']
def parse_graph6(string):
"""Read a simple undirected graph in graph6 format from string.
Parameters
----------
string : string
Data in graph6 format
Returns
-------
G : Graph
Raises
------
NetworkXError
If the string is unable to be parsed in graph6 format
Examples
--------
>>> G = nx.parse_graph6('A_')
>>> sorted(G.edges())
[(0, 1)]
See Also
--------
generate_graph6, read_graph6, write_graph6
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
def bits():
"""Return sequence of individual bits from 6-bit-per-value
list of data values."""
for d in data:
for i in [5,4,3,2,1,0]:
yield (d>>i)&1
if string.startswith('>>graph6<<'):
string = string[10:]
data = graph6_to_data(string)
n, data = data_to_n(data)
nd = (n*(n-1)//2 + 5) // 6
if len(data) != nd:
raise NetworkXError(\
'Expected %d bits but got %d in graph6' % (n*(n-1)//2, len(data)*6))
G=nx.Graph()
G.add_nodes_from(range(n))
for (i,j),b in zip([(i,j) for j in range(1,n) for i in range(j)], bits()):
if b:
G.add_edge(i,j)
return G
@open_file(0,mode='rt')
def read_graph6(path):
"""Read simple undirected graphs in graph6 format from path.
Parameters
----------
path : file or string
File or filename to write.
Returns
-------
G : Graph or list of Graphs
If the file contains multiple lines then a list of graphs is returned
Raises
------
NetworkXError
If the string is unable to be parsed in graph6 format
Examples
--------
>>> nx.write_graph6(nx.Graph([(0,1)]), 'test.g6')
>>> G = nx.read_graph6('test.g6')
>>> sorted(G.edges())
[(0, 1)]
See Also
--------
generate_graph6, parse_graph6, write_graph6
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
glist = []
for line in path:
line = line.strip()
if not len(line):
continue
glist.append(parse_graph6(line))
if len(glist) == 1:
return glist[0]
else:
return glist
@not_implemented_for('directed','multigraph')
def generate_graph6(G, nodes = None, header=True):
"""Generate graph6 format string from a simple undirected graph.
Parameters
----------
G : Graph (undirected)
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by G.nodes() is used.
header: bool
If True add '>>graph6<<' string to head of data
Returns
-------
s : string
String in graph6 format
Raises
------
NetworkXError
If the graph is directed or has parallel edges
Examples
--------
>>> G = nx.Graph([(0, 1)])
>>> nx.generate_graph6(G)
'>>graph6<<A_'
See Also
--------
read_graph6, parse_graph6, write_graph6
Notes
-----
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
if nodes is not None:
G = G.subgraph(nodes)
H = nx.convert_node_labels_to_integers(G)
ns = sorted(H.nodes())
def bits():
for (i,j) in [(i,j) for j in range(1,n) for i in range(j)]:
yield G.has_edge(ns[i],ns[j])
n = G.order()
data = n_to_data(n)
d = 0
flush = False
for i, b in zip(range(n * n), bits()):
d |= b << (5 - (i % 6))
flush = True
if i % 6 == 5:
data.append(d)
d = 0
flush = False
if flush:
data.append(d)
string_data = data_to_graph6(data)
if header:
string_data = '>>graph6<<' + string_data
return string_data
@open_file(1, mode='wt')
def write_graph6(G, path, nodes = None, header=True):
"""Write a simple undirected graph to path in graph6 format.
Parameters
----------
G : Graph (undirected)
path : file or string
File or filename to write.
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by G.nodes() is used.
header: bool
If True add '>>graph6<<' string to head of data
Raises
------
NetworkXError
If the graph is directed or has parallel edges
Examples
--------
>>> G = nx.Graph([(0, 1)])
>>> nx.write_graph6(G, 'test.g6')
See Also
--------
generate_graph6, parse_graph6, read_graph6
Notes
-----
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
path.write(generate_graph6(G, nodes=nodes, header=header))
path.write('\n')
# helper functions
def graph6_to_data(string):
"""Convert graph6 character sequence to 6-bit integers."""
v = [ord(c)-63 for c in string]
if len(v) > 0 and (min(v) < 0 or max(v) > 63):
return None
return v
def data_to_graph6(data):
"""Convert 6-bit integer sequence to graph6 character sequence."""
if len(data) > 0 and (min(data) < 0 or max(data) > 63):
raise NetworkXError("graph6 data units must be within 0..63")
return ''.join([chr(d+63) for d in data])
def data_to_n(data):
"""Read initial one-, four- or eight-unit value from graph6
integer sequence.
Return (value, rest of seq.)"""
if data[0] <= 62:
return data[0], data[1:]
if data[1] <= 62:
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:]
return ((data[2]<<30) + (data[3]<<24) + (data[4]<<18) +
(data[5]<<12) + (data[6]<<6) + data[7], data[8:])
def n_to_data(n):
"""Convert an integer to one-, four- or eight-unit graph6 sequence."""
if n < 0:
raise NetworkXError("Numbers in graph6 format must be non-negative.")
if n <= 62:
return [n]
if n <= 258047:
return [63, (n>>12) & 0x3f, (n>>6) & 0x3f, n & 0x3f]
if n <= 68719476735:
return [63, 63,
(n>>30) & 0x3f, (n>>24) & 0x3f, (n>>18) & 0x3f,
(n>>12) & 0x3f, (n>>6) & 0x3f, n & 0x3f]
raise NetworkXError("Numbers above 68719476735 are not supported by graph6")
def teardown_module(module):
import os
if os.path.isfile('test.g6'):
os.unlink('test.g6')
| bsd-3-clause | 8,297,269,426,694,884,000 | -3,826,876,541,812,963,000 | 25.09699 | 80 | 0.570934 | false |
imply/chuu | ppapi/generators/idl_lexer.py | 62 | 9292 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Lexer for PPAPI IDL """
#
# IDL Lexer
#
# The lexer is uses the PLY lex library to build a tokenizer which understands
# WebIDL tokens.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
import os.path
import re
import sys
#
# Try to load the ply module, if not, then assume it is in the third_party
# directory, relative to ppapi
#
try:
from ply import lex
except:
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, '..', '..', 'third_party')
sys.path.append(third_party)
from ply import lex
from idl_option import GetOption, Option, ParseOptions
Option('output', 'Generate output.')
#
# IDL Lexer
#
class IDLLexer(object):
# 'tokens' is a value required by lex which specifies the complete list
# of valid token types.
tokens = [
# Symbol and keywords types
'COMMENT',
'DESCRIBE',
'ENUM',
'LABEL',
'SYMBOL',
'INLINE',
'INTERFACE',
'STRUCT',
'TYPEDEF',
# Extra WebIDL keywords
'CALLBACK',
'DICTIONARY',
'OPTIONAL',
'STATIC',
# Invented for apps use
'NAMESPACE',
# Data types
'FLOAT',
'OCT',
'INT',
'HEX',
'STRING',
# Operators
'LSHIFT',
'RSHIFT'
]
# 'keywords' is a map of string to token type. All SYMBOL tokens are
# matched against keywords, to determine if the token is actually a keyword.
keywords = {
'describe' : 'DESCRIBE',
'enum' : 'ENUM',
'label' : 'LABEL',
'interface' : 'INTERFACE',
'readonly' : 'READONLY',
'struct' : 'STRUCT',
'typedef' : 'TYPEDEF',
'callback' : 'CALLBACK',
'dictionary' : 'DICTIONARY',
'optional' : 'OPTIONAL',
'static' : 'STATIC',
'namespace' : 'NAMESPACE',
}
# 'literals' is a value expected by lex which specifies a list of valid
# literal tokens, meaning the token type and token value are identical.
literals = '"*.(){}[],;:=+-/~|&^?'
# Token definitions
#
# Lex assumes any value or function in the form of 't_<TYPE>' represents a
# regular expression where a match will emit a token of type <TYPE>. In the
# case of a function, the function is called when a match is made. These
# definitions come from WebIDL.
# 't_ignore' is a special match of items to ignore
t_ignore = ' \t'
# Constant values
t_FLOAT = r'-?(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?|-?\d+[Ee][+-]?\d+'
t_INT = r'-?[0-9]+[uU]?'
t_OCT = r'-?0[0-7]+'
t_HEX = r'-?0[Xx][0-9A-Fa-f]+'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
# A line ending '\n', we use this to increment the line number
def t_LINE_END(self, t):
r'\n+'
self.AddLines(len(t.value))
# We do not process escapes in the IDL strings. Strings are exclusively
# used for attributes, and not used as typical 'C' constants.
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
self.AddLines(t.value.count('\n'))
return t
# A C or C++ style comment: /* xxx */ or //
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
self.AddLines(t.value.count('\n'))
return t
# Return a "preprocessor" inline block
def t_INLINE(self, t):
r'\#inline (.|\n)*?\#endinl.*'
self.AddLines(t.value.count('\n'))
return t
# A symbol or keyword.
def t_KEYWORD_SYMBOL(self, t):
r'_?[A-Za-z][A-Za-z_0-9]*'
# All non-keywords are assumed to be symbols
t.type = self.keywords.get(t.value, 'SYMBOL')
# We strip leading underscores so that you can specify symbols with the same
# value as a keywords (E.g. a dictionary named 'interface').
if t.value[0] == '_':
t.value = t.value[1:]
return t
def t_ANY_error(self, t):
msg = "Unrecognized input"
line = self.lexobj.lineno
# If that line has not been accounted for, then we must have hit
# EoF, so compute the beginning of the line that caused the problem.
if line >= len(self.index):
# Find the offset in the line of the first word causing the issue
word = t.value.split()[0]
offs = self.lines[line - 1].find(word)
# Add the computed line's starting position
self.index.append(self.lexobj.lexpos - offs)
msg = "Unexpected EoF reached after"
pos = self.lexobj.lexpos - self.index[line]
file = self.lexobj.filename
out = self.ErrorMessage(file, line, pos, msg)
sys.stderr.write(out + '\n')
self.lex_errors += 1
def AddLines(self, count):
# Set the lexer position for the beginning of the next line. In the case
# of multiple lines, tokens can not exist on any of the lines except the
# last one, so the recorded value for previous lines are unused. We still
# fill the array however, to make sure the line count is correct.
self.lexobj.lineno += count
for i in range(count):
self.index.append(self.lexobj.lexpos)
def FileLineMsg(self, file, line, msg):
if file: return "%s(%d) : %s" % (file, line + 1, msg)
return "<BuiltIn> : %s" % msg
def SourceLine(self, file, line, pos):
caret = '\t^'.expandtabs(pos)
# We decrement the line number since the array is 0 based while the
# line numbers are 1 based.
return "%s\n%s" % (self.lines[line - 1], caret)
def ErrorMessage(self, file, line, pos, msg):
return "\n%s\n%s" % (
self.FileLineMsg(file, line, msg),
self.SourceLine(file, line, pos))
def SetData(self, filename, data):
# Start with line 1, not zero
self.lexobj.lineno = 1
self.lexobj.filename = filename
self.lines = data.split('\n')
self.index = [0]
self.lexobj.input(data)
self.lex_errors = 0
def __init__(self):
self.lexobj = lex.lex(object=self, lextab=None, optimize=0)
#
# FilesToTokens
#
# From a set of source file names, generate a list of tokens.
#
def FilesToTokens(filenames, verbose=False):
lexer = IDLLexer()
outlist = []
for filename in filenames:
data = open(filename).read()
lexer.SetData(filename, data)
if verbose: sys.stdout.write(' Loaded %s...\n' % filename)
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t)
return outlist
def TokensFromText(text):
lexer = IDLLexer()
lexer.SetData('unknown', text)
outlist = []
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t.value)
return outlist
#
# TextToTokens
#
# From a block of text, generate a list of tokens
#
def TextToTokens(source):
lexer = IDLLexer()
outlist = []
lexer.SetData('AUTO', source)
while 1:
t = lexer.lexobj.token()
if t is None: break
outlist.append(t.value)
return outlist
#
# TestSame
#
# From a set of token values, generate a new source text by joining with a
# single space. The new source is then tokenized and compared against the
# old set.
#
def TestSame(values1):
# Recreate the source from the tokens. We use newline instead of whitespace
# since the '//' and #inline regex are line sensitive.
text = '\n'.join(values1)
values2 = TextToTokens(text)
count1 = len(values1)
count2 = len(values2)
if count1 != count2:
print "Size mismatch original %d vs %d\n" % (count1, count2)
if count1 > count2: count1 = count2
for i in range(count1):
if values1[i] != values2[i]:
print "%d >>%s<< >>%s<<" % (i, values1[i], values2[i])
if GetOption('output'):
sys.stdout.write('Generating original.txt and tokenized.txt\n')
open('original.txt', 'w').write(src1)
open('tokenized.txt', 'w').write(src2)
if values1 == values2:
sys.stdout.write('Same: Pass\n')
return 0
print "****************\n%s\n%s***************\n" % (src1, src2)
sys.stdout.write('Same: Failed\n')
return -1
#
# TestExpect
#
# From a set of tokens pairs, verify the type field of the second matches
# the value of the first, so that:
# INT 123 FLOAT 1.1
# will generate a passing test, where the first token is the SYMBOL INT,
# and the second token is the INT 123, third token is the SYMBOL FLOAT and
# the fourth is the FLOAT 1.1, etc...
def TestExpect(tokens):
count = len(tokens)
index = 0
errors = 0
while index < count:
type = tokens[index].value
token = tokens[index + 1]
index += 2
if type != token.type:
sys.stderr.write('Mismatch: Expected %s, but got %s = %s.\n' %
(type, token.type, token.value))
errors += 1
if not errors:
sys.stdout.write('Expect: Pass\n')
return 0
sys.stdout.write('Expect: Failed\n')
return -1
def Main(args):
filenames = ParseOptions(args)
try:
tokens = FilesToTokens(filenames, GetOption('verbose'))
values = [tok.value for tok in tokens]
if GetOption('output'): sys.stdout.write(' <> '.join(values) + '\n')
if GetOption('test'):
if TestSame(values):
return -1
if TestExpect(tokens):
return -1
return 0
except lex.LexError as le:
sys.stderr.write('%s\n' % str(le))
return -1
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | 5,317,699,096,860,631,000 | 7,263,202,688,237,150,000 | 25.624642 | 80 | 0.622901 | false |
sanger-pathogens/gff3toembl | gff3toembl/EMBLConverter.py | 3 | 1946 | import gff3toembl
from gt import CustomVisitor
from gff3toembl.EMBLContig import EMBLContig
class EMBLConverter(CustomVisitor):
def __init__(self, locus_tag=None, translation_table=11):
CustomVisitor.__init__(self)
self.contigs = {}
self.locus_tag = locus_tag
self.translation_table = translation_table
def visit_feature_node(self, feature_node):
sequence_id = feature_node.get_seqid()
contig = self.contigs.get(sequence_id)
if contig: # contig already exists, just try and update it
contig.add_feature(sequence_id = sequence_id, feature_type = feature_node.get_type(), start = feature_node.get_start(),
end = feature_node.get_end(), strand = feature_node.get_strand(),
feature_attributes = feature_node.attribs,
locus_tag = self.locus_tag, translation_table = self.translation_table)
else:
contig = EMBLContig()
successfully_added_feature = contig.add_feature(sequence_id = sequence_id, feature_type = feature_node.get_type(), start = feature_node.get_start(),
end = feature_node.get_end(), strand = feature_node.get_strand(),
feature_attributes = feature_node.attribs,
locus_tag = self.locus_tag, translation_table = self.translation_table)
if successfully_added_feature:
self.contigs[sequence_id] = contig
else:
pass # discard the contig because we didn't add a feature so it is empty
def visit_region_node(self, region_node):
pass # for now
def visit_comment_node(self, comment_node):
pass # for now
def visit_sequence_node(self, sequence_node):
sequence_id = sequence_node.get_description()
contig = self.contigs.setdefault(sequence_id, EMBLContig())
contig.add_sequence(sequence_node.get_sequence())
| gpl-3.0 | 1,926,829,460,692,399,900 | -708,487,220,745,068,800 | 46.463415 | 156 | 0.633607 | false |
SanPen/GridCal | src/GridCal/Engine/Sparse/utils.py | 1 | 2463 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def slice_to_range(sl: slice, n):
"""
Turn a slice into a range
:param sl: slice object
:param n: total number of items
:return: range object, if the slice is not supported an exception is raised
"""
if sl.start is None and sl.step is None and sl.start is None: # (:)
return range(n)
elif sl.start is not None and sl.step is None and sl.start is None: # (a:)
return range(sl.start, n)
elif sl.start is not None and sl.step is not None and sl.start is None: # (?)
raise Exception('Invalid slice')
elif sl.start is not None and sl.step is None and sl.start is not None: # (a:b)
return range(sl.start, sl.stop)
elif sl.start is not None and sl.step is not None and sl.start is not None: # (a:s:b)
return range(sl.start, sl.stop, sl.step)
elif sl.start is None and sl.step is None and sl.start is not None: # (:b)
return range(sl.stop)
else:
raise Exception('Invalid slice')
def dense_to_str(mat: np.ndarray):
"""
Turn dense 2D numpy array into a string
:param mat: 2D numpy array
:return: string
"""
rows, cols = mat.shape
val = "Matrix (" + ("%d" % rows) + " x " + ("%d" % cols) + ")\n"
val += str(mat).replace('. ', ' ').replace('[', ' ').replace(']', '').replace('0 ', '_ ').replace('0.', '_ ')
# for i in range(0, rows):
# for j in range(0, cols):
# x = mat[i, j]
# if x is not None:
# if x == 0:
# val += '{:<4}'.format(0)
# else:
# val += '{:<4}'.format(x)
# else:
# val += ""
# val += '\n'
# for rows in M:
# print(*['{:<4}'.format(each) for each in rows])
return val
| gpl-3.0 | 6,019,726,522,540,913,000 | -995,721,098,688,405,400 | 33.208333 | 113 | 0.587089 | false |
trashkalmar/omim | tools/python/mwm/dump_mwm.py | 10 | 1418 | #!/usr/bin/python
import sys, os.path, random
import json
from mwm import MWM
if len(sys.argv) < 2:
print('Dumps some MWM structures.')
print('Usage: {0} <country.mwm>'.format(sys.argv[0]))
sys.exit(1)
mwm = MWM(open(sys.argv[1], 'rb'))
mwm.read_types(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', 'data', 'types.txt'))
print('Tags:')
tvv = sorted([(k, v[0], v[1]) for k, v in mwm.tags.items()], key=lambda x: x[1])
for tv in tvv:
print(' {0:<8}: offs {1:9} len {2:8}'.format(tv[0], tv[1], tv[2]))
v = mwm.read_version()
print('Format: {0}, version: {1}'.format(v['fmt'], v['date'].strftime('%Y-%m-%d %H:%M')))
print('Header: {0}'.format(mwm.read_header()))
print('Region Info: {0}'.format(mwm.read_region_info()))
print('Metadata count: {0}'.format(len(mwm.read_metadata())))
cross = mwm.read_crossmwm()
if cross:
print('Outgoing points: {0}, incoming: {1}'.format(len(cross['out']), len(cross['in'])))
print('Outgoing regions: {0}'.format(set(cross['neighbours'])))
# Print some random features using reservoir sampling
count = 5
sample = []
for i, feature in enumerate(mwm.iter_features()):
if i < count:
sample.append(feature)
elif random.randint(0, i) < count:
sample[random.randint(0, count-1)] = feature
print('Feature count: {0}'.format(i))
print('Sample features:')
for feature in sample:
print(json.dumps(feature, ensure_ascii=False))
| apache-2.0 | -1,666,888,041,325,059,300 | -4,498,748,573,651,022,000 | 34.45 | 97 | 0.631171 | false |
vergecurrency/electrum-xvg | gui/qt/version_getter.py | 2 | 4598 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, re, socket
import webbrowser
import requests
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum_xvg.i18n import _
from electrum_xvg import ELECTRUM_VERSION, print_error
class VersionGetter(threading.Thread):
def __init__(self, label):
threading.Thread.__init__(self)
self.label = label
self.daemon = True
def run(self):
try:
res = requests.request("GET", "http://electrum-verge.xyz/version")
except:
print_error("Could not retrieve version information")
return
if res.status_code == 200:
latest_version = res.text
latest_version = latest_version.replace("\n","")
if(re.match('^\d+(\.\d+)*$', latest_version)):
self.label.callback(latest_version)
class UpdateLabel(QLabel):
def __init__(self, config, sb):
QLabel.__init__(self)
self.new_version = False
self.sb = sb
self.config = config
self.current_version = ELECTRUM_VERSION
self.connect(self, QtCore.SIGNAL('new_electrum_version'), self.new_electrum_version)
# prevent HTTP leaks if a proxy is set
if self.config.get('proxy'):
return
VersionGetter(self).start()
def callback(self, version):
self.latest_version = version
if(self.compare_versions(self.latest_version, self.current_version) == 1):
latest_seen = self.config.get("last_seen_version",ELECTRUM_VERSION)
if(self.compare_versions(self.latest_version, latest_seen) == 1):
self.new_version = True
self.emit(QtCore.SIGNAL('new_electrum_version'))
def new_electrum_version(self):
if self.new_version:
self.setText(_("New version available") + ": " + self.latest_version)
self.sb.insertPermanentWidget(1, self)
def compare_versions(self, version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
try:
return cmp(normalize(version1), normalize(version2))
except:
return 0
def ignore_this_version(self):
self.setText("")
self.config.set_key("last_seen_version", self.latest_version, True)
QMessageBox.information(self, _("Preference saved"), _("Notifications about this update will not be shown again."))
self.dialog.done(0)
def ignore_all_version(self):
self.setText("")
self.config.set_key("last_seen_version", "9.9.9", True)
QMessageBox.information(self, _("Preference saved"), _("No more notifications about version updates will be shown."))
self.dialog.done(0)
def open_website(self):
webbrowser.open("http://electrum-verge.xyz/download.html")
self.dialog.done(0)
def mouseReleaseEvent(self, event):
dialog = QDialog(self)
dialog.setWindowTitle(_('Electrum-XVG update'))
dialog.setModal(1)
main_layout = QGridLayout()
main_layout.addWidget(QLabel(_("A new version of Electrum-XVG is available:")+" " + self.latest_version), 0,0,1,3)
ignore_version = QPushButton(_("Ignore this version"))
ignore_version.clicked.connect(self.ignore_this_version)
ignore_all_versions = QPushButton(_("Ignore all versions"))
ignore_all_versions.clicked.connect(self.ignore_all_version)
open_website = QPushButton(_("Goto download page"))
open_website.clicked.connect(self.open_website)
main_layout.addWidget(ignore_version, 1, 0)
main_layout.addWidget(ignore_all_versions, 1, 1)
main_layout.addWidget(open_website, 1, 2)
dialog.setLayout(main_layout)
self.dialog = dialog
if not dialog.exec_(): return
| gpl-3.0 | 9,210,067,497,314,935,000 | -5,745,468,158,435,431,000 | 35.784 | 125 | 0.645063 | false |
lambeau/ansible-modules-core | cloud/openstack/_quantum_router.py | 37 | 7032 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Creates a router for tenant admin
- quantum_router: state=present
login_username=admin
login_password=admin
login_tenant_name=admin
name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception, e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 7,256,300,803,703,418,000 | 1,436,887,258,766,311,400 | 31.256881 | 122 | 0.625427 | false |
regionbibliotekhalland/digitalasagor | edittabvideo.py | 1 | 4897 | # Copyright 2013 Regionbibliotek Halland
#
# This file is part of Digitala sagor.
#
# Digitala sagor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Digitala sagor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Digitala sagor. If not, see <http://www.gnu.org/licenses/>.
from Tkinter import *
import ttk
from tooltip import ToolTip
from language import lang
import language as lng
from playerdlg import showPlayerDialog
from datamodel import tpVideo
import tkFileDialog
import os
import os.path
import shutil
import spmanager as spm
from edittab import EditTab
_videoFileFormats = [('mp4', '*.mp4'), ('avi', '*.avi'), ('wmv', '*.wmv'), ('mpeg', '*.mpeg'), ('mov', '*.mov')]
class EditTabVideo(EditTab):
"""A Frame for editing video based stories"""
def __init__(self, parent, wdir, datamodel, psize):
"""Initiate
Arguments
parent -- parent tkinter item
wdir -- working directory
datamodel -- the database that is edited by the program
psize -- tuple defining preview size of videos
"""
EditTab.__init__(self, parent, wdir, datamodel, psize)
self._mediatype = tpVideo
#Create variables for common data
self._svVideo = StringVar()
#Make the first row expandable
self.rowconfigure(0, weight = 1)
#Add frame from super class
self._superFrame.grid(row = 0, column = 0, sticky = W + N)
#Create the right column
rightLf = ttk.LabelFrame(self, text = ' ' + lang[lng.txtVideo] + ' ')
rightLf.grid(row = 0, column = 1, pady = 10, sticky = W + N)
rightFrame = Frame(rightLf)
rightFrame.grid()
e = Entry(rightFrame, w = 32, textvariable = self._svVideo, state = "readonly")
e.grid(row = 0, column = 0, padx = 10, pady = 5, sticky = W);
tt = ToolTip(e, '', textvariable = self._svVideo, wraplength = parent.winfo_screenwidth() * 4 / 5)
b = Button(rightFrame, text = lang[lng.txtSelect] + '...', command = self._ehGetVideo)
b.grid(row = 0, column = 1, padx = 10, pady = 5)
b = Button(rightFrame, text = lang[lng.txtWatch], command = self._ehWatch)
b.grid(row = 0, column = 2, padx = 10, pady = 5)
def open(self, slideshow, prepared = False):
"""Open a slideshow for editing
Arguments
slideshow -- the slideshow
prepared -- if true, all media data is already copied to the working folder
(i.e. the slideshow has been created automatically)
"""
EditTab.open(self, slideshow, prepared = False)
if(not prepared):
if(slideshow.video != ''):
shutil.copyfile(slideshow.getPath(slideshow.video), os.path.join(self._wdir, slideshow.video))
self._svVideo.set(slideshow.video)
def clear(self):
"""Clear the edit tab"""
EditTab.clear(self)
self._svVideo.set('')
def _getCurrentSlideshow(self):
"""Create and return a slideshow representing the currently edited slideshow."""
slideshow = EditTab._getCurrentSlideshow(self)
slideshow.video = self._svVideo.get()
return slideshow
#Event handlers
def _ehGetVideo(self):
"""Event handler for assigning a video"""
initdir = spm.spmanager.getFirstPath([spm.VideoFolder,
spm.MostRecentFolder])
filenamepath = tkFileDialog.askopenfilename(initialdir = initdir, filetypes = _videoFileFormats)
if(len(filenamepath) > 0):
filename = os.path.basename(filenamepath)
try:
shutil.copyfile(filenamepath, os.path.join(self._wdir, filename))
except IOError:
showerror(lang[lng.txtCopyError], lang[lng.txtCouldNotCopy] + os.path.basename(filename))
return
self._svVideo.set(filename)
self.setDirty(True)
spm.spmanager.setPath(spm.VideoFolder, os.path.dirname(filenamepath))
def _ehWatch(self):
"""Event handler for preview of the video"""
media = self._getCurrentSlideshow()
showPlayerDialog(self._parent, self._psize, media)
| gpl-3.0 | -5,058,063,836,021,137,000 | 7,373,712,909,376,517,000 | 35.669231 | 112 | 0.603226 | false |
devalbo/mm_anywhere | google/protobuf/internal/encoder.py | 484 | 25695 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a cStringIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = '[email protected] (Kenton Varda)'
import struct
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_chr = chr
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_chr = chr
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_chr(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_chr(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return "".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write('\x00\x00\x80\x7F')
elif value == _NEG_INF:
write('\x00\x00\x80\xFF')
elif value != value: # NaN
write('\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write('\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write('\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = "".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
| agpl-3.0 | 5,709,194,657,205,806,000 | 732,076,512,618,822,400 | 32.413524 | 80 | 0.67663 | false |
ciudadanointeligente/lobby_cplt | lobby/csv_reader.py | 1 | 1655 | from lobby.models import Active, Audiencia, Passive
from popolo.models import Identifier
import uuid
import unicodedata
from datetime import datetime
class ActivosCSVReader():
def parse_line(self, line):
active = Active()
active.name = unicode(line[3] + " " + line[4])
active.save()
seed = line[3] + line[4] + line[5] + line[7]
i = Identifier(identifier=line[0])
active.identifiers.add(i)
class AudienciasCSVReader():
def __init__(self, *args, **kwargs):
self.audiencia_records = {
}
def parse_audiencia_line(self, line):
audiencia = Audiencia()
audiencia.observations = line[9].decode('utf-8').strip()
audiencia.length = int(line[7])
date = datetime.strptime(line[6], '%Y-%m-%d %H:%M:%S')
audiencia.date = date
self.audiencia_records[line[0]] = audiencia
def parse_several_lines(self, lines):
lines.pop(0)
for line in lines:
self.parse_audiencia_line(line)
def parse_one_person(self, line, klass, pre_):
name = line[3].decode('utf-8').strip() + u" " + line[4].decode('utf-8').strip()
p = klass.objects.get(name=name)
i = Identifier(identifier=pre_ + line[0].decode('utf-8').strip())
p.identifiers.add(i)
def parse_one_passive_lines(self, line):
self.parse_one_person(line, Passive, 'passive_')
def parse_several_passives_lines(self, lines):
lines.pop(0)
for line in lines:
self.parse_one_passive_lines(line)
def parse_one_active_lines(self, line):
self.parse_one_person(line, Active, 'active_')
| agpl-3.0 | 8,336,952,305,327,993,000 | 2,831,408,640,563,105,000 | 30.826923 | 87 | 0.607855 | false |
marcelocure/django | django/core/management/sql.py | 399 | 1890 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
| bsd-3-clause | -2,561,690,969,595,645,400 | -735,251,645,773,106,600 | 36.8 | 101 | 0.667196 | false |
johanvdw/rasterio | examples/concurrent-cpu-bound.py | 6 | 3361 | """concurrent-cpu-bound.py
Operate on a raster dataset window-by-window using a ThreadPoolExecutor.
Simulates a CPU-bound thread situation where multiple threads can improve performance.
With -j 4, the program returns in about 1/4 the time as with -j 1.
"""
import concurrent.futures
import multiprocessing
import time
import numpy
import rasterio
from rasterio._example import compute
def main(infile, outfile, num_workers=4):
with rasterio.drivers():
# Open the source dataset.
with rasterio.open(infile) as src:
# Create a destination dataset based on source params.
# The destination will be tiled, and we'll "process" the tiles
# concurrently.
meta = src.meta
del meta['transform']
meta.update(affine=src.affine)
meta.update(blockxsize=256, blockysize=256, tiled='yes')
with rasterio.open(outfile, 'w', **meta) as dst:
# Define a generator for data, window pairs.
# We use the new read() method here to a 3D array with all
# bands, but could also use read_band().
def jobs():
for ij, window in dst.block_windows():
data = src.read(window=window)
result = numpy.zeros(data.shape, dtype=data.dtype)
yield data, result, window
# Submit the jobs to the thread pool executor.
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers) as executor:
# Map the futures returned from executor.submit()
# to their destination windows.
#
# The _example.compute function modifies no Python
# objects and releases the GIL. It can execute
# concurrently.
future_to_window = {
executor.submit(compute, data, res): (res, window)
for data, res, window in jobs()}
# As the processing jobs are completed, get the
# results and write the data to the appropriate
# destination window.
for future in concurrent.futures.as_completed(
future_to_window):
result, window = future_to_window[future]
# Since there's no multiband write() method yet in
# Rasterio, we use write_band for each part of the
# 3D data array.
for i, arr in enumerate(result, 1):
dst.write_band(i, arr, window=window)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Concurrent raster processing demo")
parser.add_argument(
'input',
metavar='INPUT',
help="Input file name")
parser.add_argument(
'output',
metavar='OUTPUT',
help="Output file name")
parser.add_argument(
'-j',
metavar='NUM_JOBS',
type=int,
default=multiprocessing.cpu_count(),
help="Number of concurrent jobs")
args = parser.parse_args()
main(args.input, args.output, args.j)
| bsd-3-clause | -2,754,200,184,191,004,700 | 153,778,353,819,631,900 | 34.378947 | 86 | 0.551026 | false |
Alwnikrotikz/micolog2 | plugins/wapblog/wapblog.py | 2 | 8001 | # -*- coding: utf-8 -*-
from micolog_plugin import *
import logging
import urllib
from model import *
from google.appengine.api import users
from google.appengine.api import memcache
from base import BaseRequestHandler,request_cache
from google.appengine.ext import webapp
from datetime import datetime, timedelta
def urlencode(value):
return urllib.quote(value.encode('utf8'))
class wapblog(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="云在天边"
self.authoruri="http://www.tangblog.info"
self.uri="http://www.tangblog.info"
self.description="Micolog WAP Blog插件,使用该插件可以方便在手机上浏览新博文,查看并发表评论。(默认仅支持Google Account用户登陆留言,点击插件名进行设置。)"
self.name="Micolog Wap Blog"
self.version="0.6"
self.register_urlhandler('(?i)/wap',waphandler)
self.register_urlhandler('/wap/(\d+)',waphandler)
self.register_urlhandler('(?i)/wap/page',pagehandler)
self.register_urlhandler('(?i)/wap/post_comment',postComment)
self.register_urlhandler('(?i)/wap/(.*)',Error404)
def get(self,page):
postcount=OptionSet.getValue("posts_per_page",default="8")
commentcount=OptionSet.getValue("LatestCommentCount",default="5")
return '''
<h3>“WAP Blog”插件已经工作!</h3>
<p>请完善如下设置</p>
<form action="" method="post">
每页显示文章数目:<input name="PostCount" value="%s" onKeyUp="this.value=this.value.replace(/\D/g,'')" onafterpaste="this.value=this.value.replace(/\D/g,'')" /><br />
文章最近评论数目:<input name="CommentCount" value="%s" onKeyUp="this.value=this.value.replace(/\D/g,'')" onafterpaste="this.value=this.value.replace(/\D/g,'')" />(若该值设为0,将显示所有留言)<br />
<br>
<input type="submit" title="Save" value="保存">
</form>
<p>恭喜你! 你的"Micolog WAP Blog" 插件已经工作!<br />访问Wap页面的URL是:
<a href="/wap" target="_blank">http://www.yourdomain.com/wap</a><br />
<b>作者:</b><a href="http://www.tangblog.info" target="_blank">云在天边</a><br/></p>
<p>您的支持是创作者继续发展的动力,感谢您以实际行动来帮助作者!</p>
<p>如果在使用过程中遇到任何问题,请到作者的留言板(云在天边 <a href="http://www.tangblog.info/contact">www.tangblog.info/contact</a>)提交报告!</p>
'''%(postcount,commentcount)
def post(self,page):
postcount=int(page.param("PostCount"))
commentcount=int(page.param("CommentCount"))
OptionSet.setValue("posts_per_page",postcount)
OptionSet.setValue("LatestCommentCount",commentcount)
return self.get(page)
class waphandler(BaseRequestHandler):
def get(self,page=1):
self.doget(page)
#TODO: update this @request_cache()
def doget(self,page):
try:
from model import g_blog
except:
pass
page=int(page)
time=datetime.now()
entrycount=g_blog.postscount()
posts_per_page = OptionSet.getValue("posts_per_page",default="8")
if posts_per_page:
posts_per_page = 8
max_page = entrycount / posts_per_page + ( entrycount % posts_per_page and 1 or 0 )
comments=memcache.get("wap_comments"+self.request.path_qs)
if comments is None:
comments=Comment.all().order('-date').fetch(5)
memcache.set("wap_comments"+self.request.path_qs,comments)
if page < 1 or page > max_page:
return self.error(404)
entries=memcache.get("wap_entries"+self.request.path_qs)
if entries is None :
entries = Entry.all().filter('entrytype =','post').\
filter("published =", True).order('-date').\
fetch(posts_per_page, offset = (page-1) * posts_per_page)
memcache.set("wap_entries"+self.request.path_qs,entries)
show_prev =entries and (not (page == 1))
show_next =entries and (not (page == max_page))
self.render2("plugins/wapblog/index.html",{'entries':entries,
'show_prev' : show_prev,
'show_next' : show_next,
'pageindex':page,
'time':time,
'ishome':True,
'pagecount':max_page,
'postscount':entrycount,
'comments':comments
})
class pagehandler(BaseRequestHandler):
#TODO: update this @request_cache()
def get(self,*arg1):
try: id=int(self.param("id") or self.param("p") )
except: return self.redirect('/wap')
time=datetime.now()
commentcount = OptionSet.getValue("LatestCommentCount",default="5")
if commentcount:
commentcount = 5
entries = Entry.all().filter("published =", True).filter('post_id =',id).fetch(1)
entry=entries[0]
comments=memcache.get("wap_comments"+self.request.path_qs)
if comments is None:
if commentcount==0:
comments=Comment.all().filter("entry =",entry).order('-date')
memcache.set("wap_comments"+self.request.path_qs,comments)
else:
comments=Comment.all().filter("entry =",entry).order('-date').fetch(commentcount)
memcache.set("wap_comments"+self.request.path_qs,comments)
Comments=memcache.get("wap_Comments"+self.request.path_qs)
if Comments is None:
Comments=Comment.all().filter("entry =",entry).order('-date')
user = users.get_current_user()
if user:
greeting = ("Welcome, %s! (<a href=\"%s\">sign out</a>)" %
(user.nickname(), users.create_logout_url(self.request.uri)))
email = user.email()
try:
query = Comment.all().filter('email =',email).order('-date').fetch(1)
name = query[0].author
weburl = query[0].weburl
except:
name=user.nickname()
weburl=None
self.render2("plugins/wapblog/page.html",{'entry':entry,'id':id,'comments':comments,'Comments':Comments,'user_name':name,'user_email':email,'user':user,'user_url':weburl,'greeting':greeting,'time':time})
else:
greeting = ("<a href=\"%s\">Sign in with your Google Account</a>." %
users.create_login_url(self.request.uri))
self.render2("plugins/wapblog/page.html",{'entry':entry,'id':id,'comments':comments,'Comments':Comments,'greeting':greeting,'user':user,'time':time})
class postComment(BaseRequestHandler):
def get(self,*arg1):
self.response.set_status(405)
self.write('<h1>405 Method Not Allowed</h1>\n<a href="/wap">Back To Home</a>')
def post(self):
name=self.param('author')
#email=self.param('email')
url=self.param('url')
key=self.param('key')
content=self.param('comment')
parent_id=self.paramint('parentid',0)
reply_notify_mail=True
user = users.get_current_user()
try:
email=user.email()
except:
email=None
if not (name and email and content):
self.response.out.write('Please input name and comment content .\n <a href="javascript:history.back(-1)">Back</a>')
else:
comment=Comment(author=name,
content=content+"<br /><small>from wap blog</small>",
email=email,
reply_notify_mail=reply_notify_mail,
entry=Entry.get(key))
starturl='http://'
if url:
try:
if not url.lower().startswith(('http://','https://')):
url = starturl + url
comment.weburl=url
except:
comment.weburl=None
info_str='#@#'.join([urlencode(name),urlencode(email),urlencode(url)])
logging.info("info:"+name+"#@#"+info_str + "Comment Form Wap Site")
cookiestr='comment_user=%s;expires=%s;domain=%s;path=/'%( info_str,
(datetime.now()+timedelta(days=100)).strftime("%a, %d-%b-%Y %H:%M:%S GMT"),
'' )
comment.ip=self.request.remote_addr
if parent_id:
comment.parent=Comment.get_by_id(parent_id)
comment.no=comment.entry.commentcount+1
try:
comment.save()
memcache.delete("/"+comment.entry.link)
self.response.headers.add_header( 'Set-Cookie', cookiestr)
self.redirect(self.referer+"#comment-"+str(comment.key().id()))
memcache.delete("/feed/comments")
except:
self.response.out.write('Sorry,Comment not allowed .\n <a href="javascript:history.back(-1)">Back</a>')
class Error404(BaseRequestHandler):
def get(self,*arg1):
self.response.clear()
self.response.set_status(404)
self.response.out.write('<h1>404 Not Found</h1>\n<a href="/wap">Back To Main Page ! </a>')
| gpl-3.0 | -1,066,156,452,368,180,400 | 3,483,973,721,551,884,300 | 37.685279 | 206 | 0.677339 | false |
labordoc/labordoc-next | modules/webtag/lib/webtag_forms.py | 3 | 7394 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebTag Forms"""
from invenio.webtag_config import \
CFG_WEBTAG_LAST_MYSQL_CHARACTER
from invenio.webtag_config import \
CFG_WEBTAG_NAME_MAX_LENGTH
from invenio.webinterface_handler_flask_utils import _
from invenio.wtforms_utils import InvenioBaseForm
from invenio.webuser_flask import current_user
from wtforms import \
IntegerField, \
HiddenField, \
TextField, \
SelectMultipleField, \
validators
# Models
from invenio.sqlalchemyutils import db
from invenio.webtag_model import \
WtgTAG, \
WtgTAGRecord, \
wash_tag_silent, \
wash_tag_blocking
from invenio.bibedit_model import Bibrec
from invenio.search_engine import check_user_can_view_record
def validate_tag_name(dummy_form, field):
""" Check validity of tag name """
if field.data:
suggested_silent = wash_tag_silent(field.data)
suggested = wash_tag_blocking(suggested_silent)
field.data = suggested_silent
if suggested != suggested_silent:
raise validators.ValidationError(
_('Forbidden characters. Try ') + suggested + '.')
if len(suggested) <= 0:
raise validators.ValidationError(
_('The name must contain valid characters.'))
if len(suggested_silent) > CFG_WEBTAG_NAME_MAX_LENGTH:
raise validators.ValidationError( _('The name cannot exeed ') \
+ str(CFG_WEBTAG_NAME_MAX_LENGTH) + _(' characters.'))
if max(ord(letter) for letter in suggested_silent) \
> CFG_WEBTAG_LAST_MYSQL_CHARACTER:
raise validators.ValidationError( _('Forbidden character.'))
def validate_name_available(dummy_form, field):
""" Check if the user already has tag named this way """
if field.data:
uid = current_user.get_id()
copy_count = db.session.query(WtgTAG).\
filter_by(id_user=uid, name=field.data).count()
if copy_count > 0:
raise validators.ValidationError(
_('Tag with that name already exists.'))
def validate_tag_exists(dummy_form, field):
""" Check if id_tag matches a tag in database """
if field.data:
try:
field.data = int(field.data)
except ValueError:
raise validators.ValidationError(_('Tag ID must be an integer.'))
if not db.session.query(WtgTAG).get(field.data):
raise validators.ValidationError(_('Tag does not exist.'))
def validate_user_owns_tag(dummy_form, field):
""" Check if id_tag matches a tag in database """
if field.data:
tag = db.session.query(WtgTAG).get(field.data)
if tag and tag.id_user != current_user.get_id():
raise validators.ValidationError(
_('You are not the owner of this tag.'))
def validate_bibrec_exists(dummy_form, field):
""" Check if id_bibrec matches a bibrec in database """
if field.data:
try:
field.data = int(field.data)
except ValueError:
raise validators.ValidationError(_('Bibrec ID must be an integer.'))
record = db.session.query(Bibrec).get(field.data)
if (not record):
raise validators.ValidationError(_('Bibrec does not exist.'))
# Switch to merged record if present
merged_id = record.merged_recid_final
if merged_id != record.id:
record = db.session.query(Bibrec).get(merged_id)
field.data = merged_id
if record.deleted:
raise validators.ValidationError(_('Bibrec has been deleted.'))
def validate_user_can_see_bibrec(dummy_form, field):
""" Check if user has rights to view bibrec """
if field.data:
(auth_code, msg) = check_user_can_view_record(current_user, field.data)
if auth_code > 0:
raise validators.ValidationError(
_('Unauthorized to view record: ')+msg)
def validate_not_already_attached(form, dummy_field):
""" Check if the pair (tag, bibrec) is already connected """
if form:
if ('id_tag' in form.data) and ('id_bibrec' in form.data):
tag_record = db.session.query(WtgTAGRecord)\
.get((form.data['id_tag'], form.data['id_bibrec']))
if tag_record is not None:
raise validators.ValidationError(_('Tag already attached.'))
def validate_already_attached(form, dummy_field):
""" Check if the pair (tag, bibrec) is already connected """
if form:
if ('id_tag' in form.data) and ('id_bibrec' in form.data):
tag_record = db.session.query(WtgTAGRecord)\
.get((form.data['id_tag'], form.data['id_bibrec']))
if tag_record is None:
raise validators.ValidationError(_('Tag not attached.'))
class CreateTagForm(InvenioBaseForm):
"""Defines form for creating a new tag."""
name = TextField(_('Name'), [validators.Required(),
validate_tag_name,
validate_name_available])
# Ajax requests only:
# Send a record ID if the tag should be attached to the record
# right after creation
id_bibrec = HiddenField('Tagged record',
[validate_bibrec_exists,
validate_user_can_see_bibrec])
class DeleteTagForm(InvenioBaseForm):
"""Defines form for deleting a tag."""
id_tag = SelectMultipleField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_user_owns_tag])
class AttachTagForm(InvenioBaseForm):
"""Defines a form validating attaching a tag to record"""
# Ajax requests only:
id_tag = IntegerField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_not_already_attached,
validate_user_owns_tag])
# validate user rights on tag
id_bibrec = IntegerField('Record ID',
[validate_bibrec_exists,
validate_user_can_see_bibrec])
class DetachTagForm(InvenioBaseForm):
"""Defines a form validating detaching a tag from record"""
# Ajax requests only:
id_tag = IntegerField('Tag ID',
[validators.Required(),
validate_tag_exists,
validate_already_attached,
validate_user_owns_tag])
# validate user rights on tag
id_bibrec = IntegerField('Record ID',
[validators.Required(),
validate_bibrec_exists,
validate_user_can_see_bibrec])
| gpl-2.0 | -5,501,008,173,432,027,000 | 1,721,646,194,088,208,100 | 34.893204 | 80 | 0.620638 | false |
buguelos/odoo | addons/point_of_sale/controllers/main.py | 243 | 1576 | # -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
import werkzeug.utils
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
if not pos_session_ids:
return werkzeug.utils.redirect('/web#action=point_of_sale.action_pos_session_opening')
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc._title_changed = function() {}
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
| agpl-3.0 | 5,845,252,346,859,807,000 | 5,118,021,337,117,765,000 | 31.833333 | 123 | 0.593274 | false |
JFriel/honours_project | networkx/networkx/algorithms/shortest_paths/dense.py | 42 | 5102 | # -*- coding: utf-8 -*-
"""Floyd-Warshall algorithm for shortest paths.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """Aric Hagberg <[email protected]>"""
__all__ = ['floyd_warshall',
'floyd_warshall_predecessor_and_distance',
'floyd_warshall_numpy']
def floyd_warshall_numpy(G, nodelist=None, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
nodelist : list, optional
The rows and columns are ordered by the nodes in nodelist.
If nodelist is None then the ordering is produced by G.nodes().
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
If there is no path between to nodes the corresponding matrix entry
will be Inf.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are
negative cycles. It has running time O(n^3) with running space of O(n^2).
"""
try:
import numpy as np
except ImportError:
raise ImportError(\
"to_numpy_matrix() requires numpy: http://scipy.org/ ")
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_matrix(G, nodelist=nodelist, multigraph_weight=min,
weight=weight, nonedge=np.inf)
n,m = A.shape
I = np.identity(n)
A[I==1] = 0 # diagonal elements should be zero
for i in range(n):
A = np.minimum(A, A[i,:] + A[:,i])
return A
def floyd_warshall_predecessor_and_distance(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
predecessor,distance : dictionaries
Dictionaries, keyed by source and target, of predecessors and distances
in the shortest path.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time O(n^3) with running space of O(n^2).
See Also
--------
floyd_warshall
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
from collections import defaultdict
# dictionary-of-dictionaries representation for dist and pred
# use some defaultdict magick here
# for dist the default is the floating point inf value
dist = defaultdict(lambda : defaultdict(lambda: float('inf')))
for u in G:
dist[u][u] = 0
pred = defaultdict(dict)
# initialize path distance dictionary to be the adjacency matrix
# also set the distance to self to 0 (zero diagonal)
undirected = not G.is_directed()
for u,v,d in G.edges(data=True):
e_weight = d.get(weight, 1.0)
dist[u][v] = min(e_weight, dist[u][v])
pred[u][v] = u
if undirected:
dist[v][u] = min(e_weight, dist[v][u])
pred[v][u] = v
for w in G:
for u in G:
for v in G:
if dist[u][v] > dist[u][w] + dist[w][v]:
dist[u][v] = dist[u][w] + dist[w][v]
pred[u][v] = pred[w][v]
return dict(pred),dict(dist)
def floyd_warshall(G, weight='weight'):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
Parameters
----------
G : NetworkX graph
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : dict
A dictionary, keyed by source and target, of shortest paths distances
between nodes.
Notes
------
Floyd's algorithm is appropriate for finding shortest paths
in dense graphs or graphs with negative weights when Dijkstra's algorithm
fails. This algorithm can still fail if there are negative cycles.
It has running time O(n^3) with running space of O(n^2).
See Also
--------
floyd_warshall_predecessor_and_distance
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
"""
# could make this its own function to reduce memory costs
return floyd_warshall_predecessor_and_distance(G, weight=weight)[1]
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| gpl-3.0 | -2,716,379,292,489,024,500 | 5,749,162,952,149,643,000 | 31.291139 | 78 | 0.635045 | false |
kafan15536900/shadowsocks | shadowsocks/eventloop.py | 51 | 7513 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from ssloop
# https://github.com/clowwindy/ssloop
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import select
import errno
import logging
from collections import defaultdict
from shadowsocks import shell
__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR',
'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES']
POLL_NULL = 0x00
POLL_IN = 0x01
POLL_OUT = 0x04
POLL_ERR = 0x08
POLL_HUP = 0x10
POLL_NVAL = 0x20
EVENT_NAMES = {
POLL_NULL: 'POLL_NULL',
POLL_IN: 'POLL_IN',
POLL_OUT: 'POLL_OUT',
POLL_ERR: 'POLL_ERR',
POLL_HUP: 'POLL_HUP',
POLL_NVAL: 'POLL_NVAL',
}
class EpollLoop(object):
def __init__(self):
self._epoll = select.epoll()
def poll(self, timeout):
return self._epoll.poll(timeout)
def add_fd(self, fd, mode):
self._epoll.register(fd, mode)
def remove_fd(self, fd):
self._epoll.unregister(fd)
def modify_fd(self, fd, mode):
self._epoll.modify(fd, mode)
class KqueueLoop(object):
MAX_EVENTS = 1024
def __init__(self):
self._kqueue = select.kqueue()
self._fds = {}
def _control(self, fd, mode, flags):
events = []
if mode & POLL_IN:
events.append(select.kevent(fd, select.KQ_FILTER_READ, flags))
if mode & POLL_OUT:
events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags))
for e in events:
self._kqueue.control([e], 0)
def poll(self, timeout):
if timeout < 0:
timeout = None # kqueue behaviour
events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout)
results = defaultdict(lambda: POLL_NULL)
for e in events:
fd = e.ident
if e.filter == select.KQ_FILTER_READ:
results[fd] |= POLL_IN
elif e.filter == select.KQ_FILTER_WRITE:
results[fd] |= POLL_OUT
return results.items()
def add_fd(self, fd, mode):
self._fds[fd] = mode
self._control(fd, mode, select.KQ_EV_ADD)
def remove_fd(self, fd):
self._control(fd, self._fds[fd], select.KQ_EV_DELETE)
del self._fds[fd]
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class SelectLoop(object):
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select.select(self._r_list, self._w_list, self._x_list,
timeout)
results = defaultdict(lambda: POLL_NULL)
for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def add_fd(self, fd, mode):
if mode & POLL_IN:
self._r_list.add(fd)
if mode & POLL_OUT:
self._w_list.add(fd)
if mode & POLL_ERR:
self._x_list.add(fd)
def remove_fd(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class EventLoop(object):
def __init__(self):
self._iterating = False
if hasattr(select, 'epoll'):
self._impl = EpollLoop()
model = 'epoll'
elif hasattr(select, 'kqueue'):
self._impl = KqueueLoop()
model = 'kqueue'
elif hasattr(select, 'select'):
self._impl = SelectLoop()
model = 'select'
else:
raise Exception('can not find any available functions in select '
'package')
self._fd_to_f = {}
self._handlers = []
self._ref_handlers = []
self._handlers_to_remove = []
logging.debug('using event model: %s', model)
def poll(self, timeout=None):
events = self._impl.poll(timeout)
return [(self._fd_to_f[fd], fd, event) for fd, event in events]
def add(self, f, mode):
fd = f.fileno()
self._fd_to_f[fd] = f
self._impl.add_fd(fd, mode)
def remove(self, f):
fd = f.fileno()
del self._fd_to_f[fd]
self._impl.remove_fd(fd)
def modify(self, f, mode):
fd = f.fileno()
self._impl.modify_fd(fd, mode)
def add_handler(self, handler, ref=True):
self._handlers.append(handler)
if ref:
# when all ref handlers are removed, loop stops
self._ref_handlers.append(handler)
def remove_handler(self, handler):
if handler in self._ref_handlers:
self._ref_handlers.remove(handler)
if self._iterating:
self._handlers_to_remove.append(handler)
else:
self._handlers.remove(handler)
def run(self):
events = []
while self._ref_handlers:
try:
events = self.poll(1)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
self._iterating = True
for handler in self._handlers:
# TODO when there are a lot of handlers
try:
handler(events)
except (OSError, IOError) as e:
shell.print_exception(e)
if self._handlers_to_remove:
for handler in self._handlers_to_remove:
self._handlers.remove(handler)
self._handlers_to_remove = []
self._iterating = False
# from tornado
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
# from tornado
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number))
| apache-2.0 | 1,007,031,284,025,270,000 | 5,920,291,187,102,049,000 | 28.120155 | 77 | 0.562891 | false |
indhub/mxnet | example/recommenders/randomproj.py | 14 | 6041 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random projection layers in MXNet as custom python ops.
Currently slow and memory-inefficient, but functional.
"""
import os
import numpy as np
import mxnet as mx
# ref: http://mxnet.io/faq/new_op.html
class RandomBagOfWordsProjection(mx.operator.CustomOp):
"""Random projection layer for sparse bag-of-words (n-hot) inputs.
In the sparse input, only the indices are supplied, because all the
values are understood to be exactly 1.0.
See also RandomProjection for values other than 1.0.
"""
def __init__(self, vocab_size, output_dim, random_seed=54321):
# need_top_grad=True means this is not a loss layer
super(RandomBagOfWordsProjection, self).__init__()
self._vocab = vocab_size
self._proj_dim = output_dim
#NOTE: This naive implementation is slow and uses lots of memory.
# Should use something smarter to not instantiate this matrix.
rs = np.random.RandomState(seed=random_seed)
self.W = self.random_unit_vecs(self._vocab, self._proj_dim, rs)
def random_unit_vecs(self, num_vecs, num_dims, rs):
W = rs.normal(size=(num_vecs, num_dims))
Wlen = np.linalg.norm(W, axis=1)
W_unit = W / Wlen[:,None]
return W_unit
def _get_mask(self, idx, in_data):
"""Returns the mask by which to multiply the parts of the embedding layer.
In this version, we have no weights to apply.
"""
mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)
mask = np.expand_dims(mask,2) # shape = (b,mnz,1)
mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)
return mask
def forward(self, is_train, req, in_data, out_data, aux):
#Note: see this run in notebooks/howto-numpy-random-proj.ipynb
# Notation for shapes: b = batch_size, mnz = max_nonzero, d = proj_dim
idx = in_data[0].asnumpy().astype('int32') # shape=(b,mnz)
wd = self.W[idx] # shape= (b,mnz,d)
mask = self._get_mask(idx, in_data)
wd = np.multiply(wd,mask) # shape=(b,mnz,d), but zero'd out non-masked
y = np.sum(wd,axis=1) # shape=(b,d)
mxy = mx.nd.array(y) #NOTE: this hangs if the environment variables aren't set correctly
# See https://github.com/dmlc/mxnet/issues/3813
self.assign(out_data[0], req[0], mxy)
@mx.operator.register("SparseBOWProj")
class RandomBagOfWordsProjectionProp(mx.operator.CustomOpProp):
def __init__(self, vocab_size, output_dim):
# need_top_grad=True means this is not a loss layer
super(RandomBagOfWordsProjectionProp, self).__init__(need_top_grad=True)
self._kwargs = {
'vocab_size': int(vocab_size),
'output_dim': int(output_dim),
}
def list_arguments(self):
return ['indexes']
def list_outputs(self):
return ['output']
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return RandomBagOfWordsProjection(**self._kwargs)
def infer_shape(self, in_shape):
batch_size = in_shape[0][0]
output_shape = (batch_size, self._kwargs['output_dim'])
return in_shape, [output_shape], []
class SparseRandomProjection(RandomBagOfWordsProjection):
"""Random projection of sparse input vector.
Takes an sparse input layer, effectively in coordinate (COO) format,
where the row number is implicit, because it's the minibatch record.
See the simpler version RandomBagOfWordsProjection if all values are 1.0.
"""
def _get_mask(self, idx, in_data):
"""Returns the mask by which to multiply the parts of the embedding layer.
In this version, we apply the weights.
"""
val = in_data[1].asnumpy() # shape=(b,mnz)
mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)
mask = np.multiply(mask,val) # All (b,mnz)
mask = np.expand_dims(mask,2) # shape = (b,mnz,1)
mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)
return mask
@mx.operator.register("SparseRandomProjection")
class SparseRandomProjectionProp(RandomBagOfWordsProjectionProp):
def list_arguments(self):
return ['indexes', 'values']
def create_operator(self, ctx, shapes, dtypes, **kwargs):
return SparseRandomProjection(**self._kwargs)
def infer_shape(self, in_shape):
# check that indexes and values are the same shape.
if in_shape[0] != in_shape[1]:
raise ValueError("Input shapes differ. indexes:%s. values:%s. must be same"
% (str(in_shape[0]),str(in_shape[1])))
return super(SparseRandomProjectionProp,self).infer_shape(in_shape)
if __name__ == "__main__":
print("Simple test of proj layer")
data = mx.symbol.Variable('data')
vals = mx.symbol.Variable('vals')
net = mx.symbol.Custom(indexes=data, values=vals, name='rproj',
op_type='SparseRandomProjection',
vocab_size=999, output_dim=29)
d = mx.nd.zeros(shape=(3,100))
v = mx.nd.ones(shape=(3,100))
e = net.bind(ctx=mx.cpu(), args={'data':d, 'vals':v})
e.forward()
print(e.outputs[0].asnumpy())
print("Done with proj layer test")
| apache-2.0 | -8,505,338,246,276,671,000 | 1,213,403,182,864,530,700 | 39.273333 | 97 | 0.652044 | false |
nschneid/pyutil | ds/set.py | 4 | 2535 | '''
OrderedSet implementation, from http://stackoverflow.com/questions/1653970/does-python-have-an-ordered-set/1653978#1653978
The index() method and a few unit tests have been added.
@author: Nathan Schneider (nschneid)
@since: 2010-08-11
'''
# Strive towards Python 3 compatibility
from __future__ import print_function, unicode_literals, division, absolute_import
from future_builtins import map, filter
import collections
class OrderedSet(collections.OrderedDict, collections.MutableSet):
'''
A set that preserves the ordering of its entries.
>>> {3,2,9,2}=={9,2,3}
True
>>> x = OrderedSet([3,2,9,2])
>>> x == OrderedSet([2,9,3])
False
>>> x == OrderedSet([3,2,3,9,2])
True
>>> [y for y in x]
[3, 2, 9]
>>> x.index(2)
1
>>> x.index(0)
Traceback (most recent call last):
...
ValueError: 0 is not in set
>>> [y for y in {3,2,9}]
[9, 2, 3]
'''
def update(self, *args, **kwargs):
if kwargs:
raise TypeError("update() takes no keyword arguments")
for s in args:
for e in s:
self.add(e)
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
def index(self, elem):
try:
return self.keys().index(elem)
except ValueError:
raise ValueError('{} is not in set'.format(elem))
def __le__(self, other):
return all(e in other for e in self)
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return all(e in self for e in other)
def __gt__(self, other):
return self >= other and self != other
def __repr__(self):
return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys())))
def __str__(self):
return '{%s}' % (', '.join(map(repr, self.keys())))
difference = property(lambda self: self.__sub__)
difference_update = property(lambda self: self.__isub__)
intersection = property(lambda self: self.__and__)
intersection_update = property(lambda self: self.__iand__)
issubset = property(lambda self: self.__le__)
issuperset = property(lambda self: self.__ge__)
symmetric_difference = property(lambda self: self.__xor__)
symmetric_difference_update = property(lambda self: self.__ixor__)
union = property(lambda self: self.__or__)
def test():
import doctest
doctest.testmod()
if __name__=='__main__':
test()
| mit | -8,520,959,205,549,158,000 | -8,972,021,700,080,776,000 | 26.554348 | 122 | 0.584615 | false |
Dave-ts/Sigil | src/Resource_Files/plugin_launchers/python/sigil_bs4/builder/_lxml.py | 5 | 10167 | from __future__ import unicode_literals, division, absolute_import, print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
unicode = str
else:
range = xrange
text_type = unicode
binary_type = str
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
if PY3:
from io import StringIO
else:
from StringIO import StringIO
import collections
from lxml import etree
from sigil_bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
)
from sigil_bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from sigil_bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(
markup, try_encodings, is_html, exclude_encodings)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Fix bug in bs4 _lxml.py that ignores attributes that specify namespaces on this tag
# Invert each namespace map as it comes in.
if len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can properly recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
elif len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_attr_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_tag_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_attr_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
# To keep the tag prefixes as clean/simple as possible if there is
# more than one possible prefix allowed and it includes None use it instead
# This happens when a namespace prefix is added for an attribute that duplicates
# an earlier namespace meant for tags that had set that namespace prefix to None
def _prefix_for_tag_namespace(self, namespace):
"""Find the currently active prefix for the given namespace for a tag."""
if namespace is None:
return None
prefixes = []
for inverted_nsmap in self.nsmaps:
if inverted_nsmap is not None and namespace in inverted_nsmap:
prefixes.append(inverted_nsmap[namespace])
if len(prefixes) == 0 or None in prefixes:
return None
# ow return the last (most recent) viable prefix
return prefixes[-1]
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_tag_namespace(namespace)
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
self.soup.endData()
self.soup.handle_data(target + ' ' + data)
self.soup.endData(ProcessingInstruction)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| gpl-3.0 | -4,622,860,858,907,824,000 | 8,587,186,473,691,638,000 | 35.053191 | 93 | 0.618963 | false |
ossdemura/django-miniblog | src/Lib/site-packages/django/contrib/gis/db/backends/postgis/pgraster.py | 491 | 5071 | import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode()
| mit | -6,334,916,719,069,067,000 | 6,177,857,146,261,548,000 | 30.496894 | 79 | 0.626504 | false |
RootForum/magrathea | magrathea/cli/commands/version.py | 1 | 1239 | # -*- coding: utf-8 -*-
"""
magrathea.cli.commands.version
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import magrathea
from ..base import BaseCommand
class VersionCommand(BaseCommand):
"""
Command class implementing the version command.
"""
name = 'version'
aliases = ('--version', '-v')
help = 'Show version and copyright information'
arguments = (
(('-s', '--short'), {'help': 'only print the version string', 'action': 'store_true'}),
)
def handle(self):
"""Command handler for the version command"""
if 'short' in self.args and self.args.short:
self.log_notice(magrathea.get_version())
else:
self.log_notice("""Magrathea version {version}
Copyright (C) {year} by {author}
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. """.format(
version=magrathea.get_version(),
year=magrathea.COPYRIGHT[0],
author=magrathea.COPYRIGHT[1])
)
self._status = 0
| mit | 7,474,105,363,412,676,000 | -3,523,605,944,312,882,700 | 30.769231 | 95 | 0.603713 | false |
2014c2g4/c2g4 | w2/static/Brython2.0.0-20140209-164925/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| gpl-2.0 | 5,377,505,398,410,937,000 | 8,219,255,604,507,200,000 | 31.831461 | 84 | 0.612252 | false |
ratnania/pigasus | doc/manual/include/demo/test_neumann_quartcircle.py | 1 | 2730 | #! /usr/bin/python
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
# ...
sin = np.sin ; cos = np.cos ; pi = np.pi ; exp = np.exp
# ...
#-----------------------------------
try:
nx = int(sys.argv[1])
except:
nx = 31
try:
ny = int(sys.argv[2])
except:
ny = 31
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
from igakit.cad_geometry import quart_circle as domain
geo = domain(n=[nx,ny],p=[px,py])
#-----------------------------------
# ...
# exact solution
# ...
R = 1.
r = 0.5
c = 1. # for neumann
#c = pi / (R**2-r**2) # for all dirichlet bc
u = lambda x,y : [ x * y * sin ( c * (R**2 - x**2 - y**2 )) ]
# ...
# ...
# rhs
# ...
f = lambda x,y : [4*c**2*x**3*y*sin(c*(R**2 - x**2 - y**2)) \
+ 4*c**2*x*y**3*sin(c*(R**2 - x**2 - y**2)) \
+ 12*c*x*y*cos(c*(R**2 - x**2 - y**2)) ]
# ...
# ...
# values of gradu.n at the boundary
# ...
gradu = lambda x,y : [-2*c*x**2*y*cos(c*(R**2 - x**2 - y**2)) + y*sin(c*(R**2
-
x**2
-
y**2)) \
,-2*c*x*y**2*cos(c*(R**2 - x**2 - y**2)) + x*sin(c*(R**2 - x**2 - y**2)) ]
def func_g (x,y) :
du = gradu (x, y)
return [ du[0] , du[1] ]
# ...
# ...
# values of u at the boundary
# ...
bc_neumann={}
bc_neumann [0,0] = func_g
Dirichlet = [[1,2,3]]
#AllDirichlet = True
# ...
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print "norm U = ", normU
# ...
# ...
if PLOT:
PDE.plot() ; plt.colorbar(); plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit | -6,280,085,234,493,662,000 | -8,082,561,401,385,837,000 | 17.322148 | 97 | 0.456044 | false |
aroig/offlineimap | test/OLItest/globals.py | 12 | 1373 | #Constants, that don't rely on anything else in the module
# Copyright (C) 2012- Sebastian Spaeth & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
try:
from cStringIO import StringIO
except ImportError: #python3
from io import StringIO
default_conf=StringIO("""[general]
#will be set automatically
metadata =
accounts = test
ui = quiet
[Account test]
localrepository = Maildir
remoterepository = IMAP
[Repository Maildir]
Type = Maildir
# will be set automatically during tests
localfolders =
[Repository IMAP]
type=IMAP
# Don't hammer the server with too many connection attempts:
maxconnections=1
folderfilter= lambda f: f.startswith('INBOX.OLItest')
""")
| gpl-2.0 | 7,658,065,020,478,947,000 | 4,414,208,022,772,053,000 | 31.690476 | 78 | 0.748725 | false |
wsilva/fdp-folha-de-ponto-ach2077 | fdp/settings/base.py | 1 | 2384 | """
Django settings for fdp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j7y4q=&c=n0o9hdoc(ebkfj41k%wyhe&^zq!dqrwnwxgxbz&z+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pontos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fdp.urls'
WSGI_APPLICATION = 'fdp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'pt-br'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
UPLOAD_DIR = os.path.join(BASE_DIR, 'static', 'uploads')
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
| gpl-3.0 | 7,745,456,355,557,954,000 | 6,094,693,808,234,647,000 | 23.326531 | 71 | 0.713926 | false |
flyher/pymo | symbian/PythonForS60_1.9.6/module-repo/standard-modules/encodings/cp1253.py | 593 | 13350 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\ufffe' # 0xAA -> UNDEFINED
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe' # 0xD2 -> UNDEFINED
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit | 9,088,449,105,050,500,000 | -4,117,290,232,040,372,700 | 42.485342 | 119 | 0.545843 | false |
antonioguirola/webpy-base | forms.py | 1 | 4418 | # -*- coding: utf-8 -*-
from web import form
import re
import db
# Expresiones regulares necesarias:
#formatoVisa=re.compile(r'[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}')
# Funciones necesarias para las validaciones
def fooFunction():
pass
"""
EJEMPLO DE FORMULARIO PARA DARSE DE ALTA
formularioInscripcion = form.Form(
form.Textbox(
"nombre",
form.notnull,
class_="form-control",
id="nombreId",
description="Nombre: "
),
form.Textbox(
"apellidos",
form.notnull,
class_="form-control",
id="apellidosId",
description="Apellidos: "
),
form.Textbox(
"dni",
form.notnull,
class_="form-control",
id="dniId",
description="DNI: "
),
form.Textbox(
"email",
form.notnull,
form.regexp(r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}',
'Formato de email incorrecto'),
class_="form-control",
id="emailId",
description=u"Correo electrónico: "
),
form.Dropdown(
"dia",
[(d, d) for d in range(1,32)],
id="diaID",
description=u"Día de nacimiento: ",
),
form.Dropdown(
"mes",
[(1,'Enero'),(2,'Febrero'),(3,'Marzo'),(4,'Abril'),(5,'Mayo'),(6,'Junio'),
(7,'Julio'),(8,'Agosto'),(9,'Septiembre'),(10,'Octubre'),(11,'Noviembre'),(12,'Diciembre')],
id="mesID",
description="Mes de nacimiento: "
),
form.Dropdown(
"anio",
[d for d in range(1930,2006)],
id="anioID",
description=u"Año de nacimiento: "
),
form.Textarea(
"direccion",
form.notnull,
class_="form-control",
id="direccionId",
description=u"Dirección: "
),
form.Textbox(
"username",
form.notnull,
class_="form-control",
id="usernameId",
description="Nombre de usuario: "
),
form.Password(
"password1",
form.notnull,
class_="form-control",
id="password1Id",
description=u"Contraseña: "
),
form.Password(
"password2",
form.notnull,
class_="form-control",
id="password2Id",
description=u"Repita la contraseña: "
),
form.Radio(
'formaPago',
[["VISA","VISA "],["contraReembolso","Contra reembolso"]],
form.notnull,
id="formaPagoId",
description="Forma de pago: "
),
form.Textbox(
"visa",
class_="form-control",
id="visaId",
description="Número de tarjeta VISA: ",
),
form.Checkbox(
"acepto",
description="Acepto las condiciones de uso ",
id="aceptoId",
value="si"
),
validators = [
form.Validator(u"Fecha incorrecta", lambda x: ((int(x.mes)==2 and int(x.dia)<=28)) or
(int(x.mes) in [4,6,9,11] and int(x.dia)<31) or (int(x.mes) in [1,3,5,7,8,10,12])
or (int(x.mes)==2 and int(x.dia)==29 and esBisiesto(x.anio))),
form.Validator(u"La contraseña debe tener al menos 7 caracteres",lambda x: len(x.password1)>6),
form.Validator(u"Las contraseñas no coinciden", lambda x: x.password1 == x.password2),
form.Validator(u"Debe introducir un número de tarjeta válido",lambda x: (x.formaPago=="contraReembolso")
or (x.formaPago=="VISA" and formatoVisa.match(x.visa))),
form.Validator(u"Debe aceptar los términos y condiciones",lambda x: x.acepto=="si")
]
)
"""
| gpl-3.0 | -5,111,416,543,373,467,000 | -6,007,240,353,605,437,000 | 33.155039 | 121 | 0.433727 | false |
hehongliang/tensorflow | tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op.py | 37 | 1215 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Loader for the custom inc_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
if platform.system() != "Windows":
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=g-import-not-at-top
_inc_op = loader.load_op_library(
resource_loader.get_path_to_datafile("_inc_op.so"))
else:
raise RuntimeError("Windows not supported")
| apache-2.0 | 325,638,944,133,152,600 | -8,517,909,292,822,585,000 | 36.96875 | 79 | 0.700412 | false |
binghongcha08/pyQMD | sys_bath/bilinear/sys_bath_lqf.py | 2 | 10991 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 09:42:22 2016
@author: bing
"""
import numpy as np
#import scipy
import numba
import sys
import math
bohr_angstrom = 0.52917721092
hartree_wavenumber = 219474.63
#hartree_wavenumber = scipy.constants.value(u'hartree-inverse meter relationship') / 1e2
def M1mat(a, Nb):
M1 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M1[m,m+1] = np.sqrt(float(m+1)/2.0/a)
M1 = Sym(M1)
return M1
def M2mat(a, Nb):
M2 = np.zeros((Nb,Nb))
for m in range(Nb):
M2[m,m] = (float(m) + 0.5)/a
if Nb > 1:
for m in range(Nb-2):
M2[m,m+2] = np.sqrt(float((m+1)*(m+2)))/2.0/a
M2 = Sym(M2)
return M2
def M3mat(a, Nb):
M3 = np.zeros((Nb,Nb))
for m in range(Nb-1):
M3[m,m+1] = 3.0 * (float(m+1)/2./a)**1.5
if Nb > 2:
for m in range(Nb-3):
M3[m,m+3] = np.sqrt(float((m+1)*(m+2)*(m+3))) / (2.0*a)**1.5
M3 = Sym(M3)
return M3
def M4mat(a, Nb):
M4 = np.zeros((Nb,Nb))
for m in range(Nb):
M4[m,m] = float(3.0 * m**2 + 3.0 * (m+1)**2) / (2.*a)**2
if Nb > 1:
for m in range(Nb-2):
M4[m,m+2] = (4.0*m + 6.0) * np.sqrt(float((m+1)*(m+2))) / (2.*a)**2
if Nb > 3:
for m in range(Nb-4):
M4[m,m+4] = np.sqrt(float((m+1)*(m+2)*(m+3)*(m+4))) / (2.0*a)**2
M4 = Sym(M4)
if Nb > 1:
if not M4[0,1] == M4[1,0]:
print(M4)
print('\n ERROR: Not symmetric matrix M4.\n')
sys.exit()
return M4
def Hermite(x):
cons = np.array([1. / np.sqrt(float(2**n) * float(math.factorial(n))) for n in range(Nb)])
H = []
H.append(1.0)
H.append( x * 2.0 )
if Nb > 2:
for n in range(2,Nb):
Hn = 2.0 * x * H[n-1] - 2.0*(n-1) * H[n-2]
H.append(Hn)
for n in range(Nb):
H[n] = H[n]*cons[n]
return H
# if n == 0:
# H.append(1.)
# elif n == 1:
# return 2. * x * cons
# elif n == 2:
# return (4. * x**2 - 2.) * cons
# elif n == 3:
# return (8.0 * x**3 - 12.0 * x) * cons
# elif n == 4:
# return (16.0 * x**4 - 48.0 * x**2 + 12.0) * cons
# elif n == 5:
# return (32.0*x**5 - 160.0*x**3 + 120.0*x) * cons
# elif n == 6:
# return ()
#def Vx(x):
# g = 0.1
# return x**2/2.0 + g * x**4 / 4.0
def Kmat(alpha,pAve, Nb):
K = np.zeros((Nb,Nb),dtype=complex)
ar = alpha.real
for j in range(Nb):
K[j,j] = np.abs(alpha)**2 / ar * (2. * j + 1.)/2. + pAve**2
for j in range(1,Nb):
K[j-1,j] = -1j*np.conj(alpha) * pAve * np.sqrt(2. * j / ar)
K[j,j-1] = np.conj(K[j-1,j])
if Nb > 2:
for j in range(2,Nb):
K[j-2,j] = - np.sqrt(float((j-1)*j)) * np.conj(alpha)**2 / 2. / ar
K[j,j-2] = np.conj(K[j-2,j])
#K[0,0] = np.abs(alpha)**2/alpha.real / 2. + pAve**2
#K[1,1] = np.abs(alpha)**2/alpha.real * 3.0 / 2. + pAve**2
#K[0,1] = -1j*np.conj(alpha) * pAve * np.sqrt(2.*j/alpha.real)
#K[1,0] = np.conj(K[0,1])
K = K / (2.*amx)
return K
def Sym(V):
n = V.shape[-1]
for i in range(n):
for j in range(i):
V[i,j] = V[j,i]
return V
@numba.autojit
def Vint(x,y):
"""
interaction potential between x and y
"""
PES = 'HO'
if PES == 'Morse':
a, x0 = 1.02, 1.4
De = 0.176 / 100.0
d = (1.0-np.exp(-a*x))
v0 = De*d**2
dv = 2. * De * d * a * np.exp(-a*x)
elif PES == 'HO':
v0 = x**2/2.0 + y**2/2.0
elif PES == 'AHO':
eps = 0.4
v0 = x**2/2.0 + eps * x**4/4.0
dv = x + eps * x**3
#ddv = 2.0 * De * (-d*np.exp(-a*((x-x0)))*a**2 + (np.exp(-a*(x-x0)))**2*a**2)
# elif PES == 'pH2':
#
# dx = 1e-4
#
# v0 = np.zeros(Ntraj)
# dv = np.zeros(Ntraj)
#
# for i in range(Ntraj):
# v0[i] = vpot(x[i])
# dv[i] = ( vpot(x[i] + dx) - v0[i])/dx
return v0
def Vy(y):
v0 = y**2/2.0
dv = y
return v0,dv
def LQF(x,w):
xAve = np.dot(x,w)
xSqdAve = np.dot(x*x,w)
var = (xSqdAve - xAve**2)
a = 1. / 2. / var
r = - a * (x-xAve)
dr = - a
uAve = (np.dot(r**2,w))/2./amy
du = -1./amy * (r*dr)
return r, du, uAve
@numba.autojit
def qpot(x,p,r,w):
"""
Linear Quantum Force : direct polynomial fitting of derivative-log density (amplitude)
curve_fit : randomly choose M points and do a nonlinear least-square fitting to a
predefined functional form
"""
#tau = (max(xdata) - min(xdata))/(max(x) - min(x))
#if tau > 0.6:
# pass
#else:
# print('Data points are not sampled well.'
am= amy
Nb = 2
S = np.zeros((Nb,Nb))
for j in range(Nb):
for k in range(Nb):
S[j,k] = np.dot(x**(j+k), w)
bp = np.zeros(Nb)
br = np.zeros(Nb)
for n in range(Nb):
bp[n] = np.dot(x**n * p, w)
br[n] = np.dot(x**n * r, w)
cp = np.linalg.solve(S,bp)
cr = np.linalg.solve(S,br)
#unit = np.identity(Nb)
#r_approx = cr[0] * unit + cr[1] * x + cr[2] * x**2 + cr[3] * x**3
#p_approx = cp[0] * unit + cp[1] * x + cp[2] * x**2 + cp[3] * x**3
N = len(x)
dr = np.zeros(N)
dp = np.zeros(N)
ddr = np.zeros(N)
ddp = np.zeros(N)
for k in range(1,Nb):
dr += float(k) * cr[k] * x**(k-1)
dp += float(k) * cp[k] * x**(k-1)
for k in range(2,Nb-1):
ddr += float(k * (k-1)) * cr[k] * x**(k-2)
ddp += float(k * (k-1)) * cp[k] * x**(k-2)
fr = -1./2./am * (2. * r * dp + ddp)
fq = 1./2./am * (2. * r * dr + ddr)
Eu = -1./2./am * np.dot(r**2 + dr,w)
return Eu,fq,fr
# initialization
# for DOF y : an ensemble of trajectories
# for DOF x : for each trajectory associate a complex vector c of dimension M
Ntraj = 1024
M = 16
nfit = 2
ax = 1.0 # width of the GH basis
ay0 = 4.0
y0 = 0.0
print('polynomial fitting of c, order = {} \n'.format(nfit))
# initial conditions for c
c = np.zeros((Ntraj,M),dtype=np.complex128)
# mixture of ground and first excited state
#c[:,0] = 1.0/np.sqrt(2.0)+0j
#c[:,1] = 1.0/np.sqrt(2.0)+0j
#for i in range(2,M):
# c[:,i] = 0.0+0.0j
# coherent state
z = 1.0/np.sqrt(2.0)
for i in range(M):
c[:,i] = np.exp(-0.5 * np.abs(z)**2) * z**i / np.sqrt(math.factorial(i))
print('initial occupation \n',c[0,:])
print('trace of density matrix',np.vdot(c[0,:], c[0,:]))
# ---------------------------------
# initial conditions for QTs
y = np.random.randn(Ntraj)
y = y / np.sqrt(2.0 * ay0) + y0
print('trajectory range {}, {}'.format(min(y),max(y)))
py = np.zeros(Ntraj)
ry = - ay0 * (y-y0)
w = np.array([1./Ntraj]*Ntraj)
# -------------------------------
amx = 1.0
amy = 10.0
f_MSE = open('rMSE.out','w')
nout = 20 # number of trajectories to print
fmt = ' {}' * (nout+1) + '\n'
Eu = 0.
Ndim = 1 # dimensionality of the bath
fric_cons = 0.0 # friction constant
Nt = 2**14
dt = 1.0/2.0**10
dt2 = dt/2.0
t = 0.0
print('time range for propagation is [0,{}]'.format(Nt*dt))
print('timestep = {}'.format(dt))
# construct the Hamiltonian matrix for anharmonic oscilator
g = 0.4
V = 0.5 * M2mat(ax,M) + g/4.0 * M4mat(ax,M)
K = Kmat(ax,0.0,M)
H = K+V
print('Hamiltonian matrix in DOF x = \n')
print(H)
print('\n')
@numba.autojit
def norm(c,w):
anm = 0.0
for k in range(Ntraj):
anm += np.vdot(c[k,:], c[k,:]).real * w[k]
return anm
@numba.autojit
def fit_c(c,y):
"""
global approximation of c vs y to obtain the derivative c'',c'
"""
dc = np.zeros((Ntraj,M),dtype=np.complex128)
ddc = np.zeros((Ntraj,M),dtype=np.complex128)
for j in range(M):
z = c[:,j]
pars = np.polyfit(y,z,nfit)
p0 = np.poly1d(pars)
p1 = np.polyder(p0)
p2 = np.polyder(p1)
#for k in range(Ntraj):
dc[:,j] = p1(y)
ddc[:,j] = p2(y)
return dc, ddc
@numba.autojit
def prop_c(H,c,y,ry,py):
dc, ddc = fit_c(c,y)
dcdt = np.zeros([Ntraj,M],dtype=np.complex128)
eps = 0.50 # bilinear coupling Vint = eps*x*y
X1 = M1mat(ax,M)
for k in range(Ntraj):
Vp = eps * y[k] * X1
tmp = (H + Vp).dot(c[k,:]) - ddc[k,:]/2.0/amy - dc[k,:] * ry[k]/amy
dcdt[k,:] = -1j * tmp
return dcdt
@numba.autojit
def xAve(c,y,w):
"""
compute expectation value of x
"""
Xmat = M1mat(ax,M)
x_ave = 0.0+0.0j
for k in range(Ntraj):
for m in range(M):
for n in range(M):
x_ave += Xmat[m,n] * np.conjugate(c[k,m]) * c[k,n] * w[k]
return x_ave.real
# propagate the QTs for y
# update the coeffcients for each trajectory
fmt_c = ' {} '* (M+1)
f = open('traj.dat','w')
fe = open('en.out','w')
fc = open('c.dat','w')
fx = open('xAve.dat','w')
fnorm = open('norm.dat', 'w')
v0, dv = Vy(y)
ry, du, Eu = LQF(y,w)
cold = c
dcdt = prop_c(H,c,y,ry,py)
c = c + dcdt * dt
for k in range(Nt):
t = t + dt
py += (- dv - du) * dt2 - fric_cons * py * dt2
y += py*dt/amy
# force field
ry, du, Eu = LQF(y,w)
v0, dv = Vy(y)
py += (- dv - du) * dt2 - fric_cons * py * dt2
# renormalization
anm = norm(c,w)
c /= np.sqrt(anm)
# update c
dcdt = prop_c(H,c,y,ry,py)
cnew = cold + dcdt * dt * 2.0
cold = c
c = cnew
# output data for each timestep
# d = c
# for k in range(Ntraj):
# for i in range(M):
# d[k,i] = np.exp(-1j*t*H[i,i])*c[k,i]
x_ave = xAve(c,y,w)
fx.write('{} {} \n'.format(t,x_ave))
f.write(fmt.format(t,*y[0:nout]))
fnorm.write(' {} {} \n'.format(t,anm))
Ek = np.dot(py*py,w)/2./amy
Ev = np.dot(v0,w)
Eu = Eu
Etot = Ek + Ev + Eu
fe.write('{} {} {} {} {} \n'.format(t,Ek,Ev,Eu,Etot))
print('The total energy = {} Hartree. \n'.format(Etot))
# print trajectory and coefficients
for k in range(Ntraj):
fc.write( '{} {} {} {} \n'.format(y[k], c[k,0],c[k,-2],c[k,-1]))
fe.close()
f.close()
fc.close()
fx.close()
#a, x0, De = 1.02, 1.4, 0.176/100
#print('The well depth = {} cm-1. \n'.format(De * hartree_wavenumber))
#
#omega = a * np.sqrt(2. * De / am )
#E0 = omega/2. - omega**2/16./De
#dE = (Etot-E0) * hartree_wavenumber
#print('Exact ground-state energy = {} Hartree. \nEnergy deviation = {} cm-1. \n'.format(E0,dE))
#
| gpl-3.0 | -6,172,904,489,373,398,000 | 1,340,156,213,176,367,600 | 20.136538 | 96 | 0.459467 | false |
estaban/pyload | module/plugins/accounts/FileserveCom.py | 1 | 2261 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
from time import mktime, strptime
from module.plugins.Account import Account
from module.common.json_layer import json_loads
class FileserveCom(Account):
__name__ = "FileserveCom"
__version__ = "0.2"
__type__ = "account"
__description__ = """Fileserve.com account plugin"""
__author_name__ = "mkaay"
__author_mail__ = "[email protected]"
def loadAccountInfo(self, user, req):
data = self.getAccountData(user)
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if res['type'] == "premium":
validuntil = mktime(strptime(res['expireTime'], "%Y-%m-%d %H:%M:%S"))
return {"trafficleft": res['traffic'], "validuntil": validuntil}
else:
return {"premium": False, "trafficleft": None, "validuntil": None}
def login(self, user, data, req):
page = req.load("http://app.fileserve.com/api/login/", post={"username": user, "password": data['password'],
"submit": "Submit+Query"})
res = json_loads(page)
if not res['type']:
self.wrongPassword()
#login at fileserv page
req.load("http://www.fileserve.com/login.php",
post={"loginUserName": user, "loginUserPassword": data['password'], "autoLogin": "checked",
"loginFormSubmit": "Login"})
| gpl-3.0 | 902,533,730,763,959,600 | -213,686,237,428,314,900 | 37.982759 | 116 | 0.597523 | false |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 24