AhmedSSoliman/Llama2-CodeGen-PEFT-QLoRA
Text Generation
•
Updated
•
21
•
5
Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
0 | def format_exp_floats(decimals):
threshold = 10 ** 5
return (
lambda n: "{:.{prec}e}".format(n, prec=decimals) if n > threshold else "{:4.{prec}f}".format(n, prec=decimals)
) | sometimes the exp. column can be too large |
1 | async def execute(self, coro, name, user, info=None):
task = self.schedCoro(coro)
return await s_task.Task.anit(self, task, name, user, info=info) | Create a synapse task from the given coroutine. |
2 | def pinch(self, direction=, percent=0.6, duration=2.0, dead_zone=0.1):
if direction not in (, ):
raise ValueError(.format(repr(direction)))
if dead_zone >= percent:
raise ValueError(
.format(repr(dead_zone), repr(percent)))
w, h = self.get_size()
x, y = self.get_position()
tracks = make_pinching(direction, [x, y], [w, h], percent, dead_zone, duration)
speed = math.sqrt(w * h) * (percent - dead_zone) / 2 / duration
ret = self.poco.apply_motion_tracks(tracks, accuracy=speed * 0.03)
return ret | Squeezing or expanding 2 fingers on this UI with given motion range and duration.
Args:
direction (:py:obj:`str`): pinching direction, only "in" or "out". "in" for squeezing, "out" for expanding
percent (:py:obj:`float`): squeezing range from or expanding range to of the bounds of the UI
duration (:py:obj:`float`): time interval in which the action is performed
dead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``
Raises:
PocoNoSuchNodeException: raised when the UI element does not exist |
3 | def on_log(self):
def decorator(handler):
self.client.on_log = handler
return handler
return decorator | Decorate a callback function to handle MQTT logging.
**Example Usage:**
::
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(client, userdata, level, buf) |
4 | def fixminimized(self, alphabet):
insymbols = fst.SymbolTable()
outsymbols = fst.SymbolTable()
num = 1
for char in self.alphabet:
self.isyms.__setitem__(char, num)
self.osyms.__setitem__(char, num)
insymbols.add_symbol(char, num)
outsymbols.add_symbol(char, num)
num = num + 1
self.automaton.set_input_symbols(insymbols)
self.automaton.set_output_symbols(outsymbols)
endstate = self.add_state()
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = False
for char in alphabet:
self.add_arc(endstate, endstate, char) | After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None |
5 | def predict_proba(self, time):
check_is_fitted(self, "unique_time_")
time = check_array(time, ensure_2d=False)
extends = time > self.unique_time_[-1]
if self.prob_[-1] > 0 and extends.any():
raise ValueError("time must be smaller than largest "
"observed time point: {}".format(self.unique_time_[-1]))
Shat = numpy.empty(time.shape, dtype=float)
Shat[extends] = 0.0
valid = ~extends
time = time[valid]
idx = numpy.searchsorted(self.unique_time_, time)
eps = numpy.finfo(self.unique_time_.dtype).eps
exact = numpy.absolute(self.unique_time_[idx] - time) < eps
idx[~exact] -= 1
Shat[valid] = self.prob_[idx]
return Shat | Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event. |
6 | def classify(self, token_type, value, lineno, column, line):
if token_type == self.grammar.KEYWORD_TOKEN:
label_index = self.grammar.keyword_ids.get(value, -1)
if label_index != -1:
return label_index
label_index = self.grammar.token_ids.get(token_type, -1)
if label_index == -1:
raise ParseError("invalid token", token_type, value, lineno, column,
line)
return label_index | Find the label for a token. |
7 | def phi_vector(self):
weights = self.pst.observation_data.loc[self.names,"weight"]
obsval = self.pst.observation_data.loc[self.names,"obsval"]
phi_vec = []
for idx in self.index.values:
simval = self.loc[idx,self.names]
phi = (((simval - obsval) * weights)**2).sum()
phi_vec.append(phi)
return pd.Series(data=phi_vec,index=self.index) | property decorated method to get a vector of L2 norm (phi)
for the realizations. The ObservationEnsemble.pst.weights can be
updated prior to calling this method to evaluate new weighting strategies
Return
------
pandas.DataFrame : pandas.DataFrame |
8 | def nrmse_iqr(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
rmse_value = np.sqrt(np.mean((simulated_array - observed_array) ** 2))
q1 = np.percentile(observed_array, 25)
q3 = np.percentile(observed_array, 75)
iqr = q3 - q1
return rmse_value / iqr | Compute the IQR normalized root mean square error between the simulated and observed data.
.. image:: /pictures/NRMSE_IQR.png
**Range:** 0 ≤ NRMSE < inf.
**Notes:** This metric is the RMSE normalized by the interquartile range of the observed time
series (x). Normalizing allows comparison between data sets with different scales.
The NRMSEquartile is the least sensitive to outliers of the three normalized rmse metrics.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The IQR normalized root mean square error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.nrmse_iqr(sim, obs)
0.2595461185212093
References
----------
- Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple
resolution comparison between maps that share a real variable. Environmental and Ecological
Statistics 15(2) 111-142. |
9 | def _updatePoolingState(self, activeColWithPredictedInput, fractionUnpredicted):
if fractionUnpredicted > self._poolingThreshUnpredicted:
if self._spVerbosity > 3:
print " reset pooling state for all cells"
self._poolingActivation = numpy.zeros(self._numColumns, dtype="int32")
else:
self._poolingActivation[self._poolingColumns] -= 1
self._poolingActivation[activeColWithPredictedInput] = self._poolingLife
self._poolingColumns = self._poolingActivation.nonzero()[0] | This function updates the pooling state of TP cells. A cell will stop
pooling if:
(1) It hasn't received any predicted input in the last self._poolingLife
steps
or
(2) the overall fraction of unpredicted input to the TP is above
_poolingThreshUnpredicted |
10 | def _init_vocab(self, token_generator, add_reserved_tokens=True):
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token)) | Initialize vocabulary with tokens from token_generator. |
11 | def startTicker(self, reqId, contract, tickType):
ticker = self.tickers.get(id(contract))
if not ticker:
ticker = Ticker(
contract=contract, ticks=[], tickByTicks=[],
domBids=[], domAsks=[], domTicks=[])
self.tickers[id(contract)] = ticker
self.reqId2Ticker[reqId] = ticker
self._reqId2Contract[reqId] = contract
self.ticker2ReqId[tickType][ticker] = reqId
return ticker | Start a tick request that has the reqId associated with the contract.
Return the ticker. |
12 | def MakePmfFromList(t, name=):
hist = MakeHistFromList(t)
d = hist.GetDict()
pmf = Pmf(d, name)
pmf.Normalize()
return pmf | Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this PMF
Returns:
Pmf object |
13 | def to_geopandas(raster, **kwargs):
df = to_pandas(raster, **kwargs)
df[] = df.apply(squares, georaster=raster, axis=1)
df = gp.GeoDataFrame(df, crs=from_string(raster.projection.ExportToProj4()))
return df | Convert GeoRaster to GeoPandas DataFrame, which can be easily exported to other types of files
and used to do other types of operations.
The DataFrame has the geometry (Polygon), row, col, value, x, and y values for each cell
Usage:
df = gr.to_geopandas(raster) |
14 | async def ensure_closed(self):
if self._writer is None:
return
send_data = struct.pack(, 1) + int2byte(COMMAND.COM_QUIT)
self._writer.write(send_data)
await self._writer.drain()
self.close() | Send quit command and then close socket connection |
15 | def add_missing_components(network):
new_trafo = str(network.transformers.index.astype(int).max() + 1)
network.add("Transformer", new_trafo, bus0="16573", bus1="23648",
x=0.135 / (2750 / 2),
r=0.0, tap_ratio=1, s_nom=2750 / 2)
def add_110kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=280)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=260)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 110
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] =
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])
[0][0] * 1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0177)
network.lines.loc[new_line, "g"] = 0
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
250e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
1.2e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
9.5e-9)
add_110kv_line("16573", "28353")
add_110kv_line("16573", "28092")
add_110kv_line("25096", "25369")
add_110kv_line("25096", "28232")
add_110kv_line("25353", "25356")
add_110kv_line("23822", "25355")
add_110kv_line("23822", "28212")
add_110kv_line("25357", "665")
add_110kv_line("25354", "27414")
add_110kv_line("27414", "28212")
add_110kv_line("25354", "28294")
add_110kv_line("28335", "28294")
add_110kv_line("28335", "28139")
add_110kv_line("16573", "24182", overhead=True)
network.add("Transformer", , bus0="18967", bus1="25766",
x=0.135 / 300, r=0.0, tap_ratio=1, s_nom=300)
add_110kv_line("18967", "22449", overhead=True)
add_110kv_line("21165", "24068", overhead=True)
add_110kv_line("23782", "24089", overhead=True)
add_110kv_line("19962", "27671", overhead=True)
add_110kv_line("19962", "27671", overhead=True)
add_110kv_line("23697", "24090", overhead=True)
add_110kv_line("23697", "24090", overhead=True)
def add_220kv_line(bus0, bus1, overhead=False):
new_line = str(network.lines.index.astype(int).max() + 1)
if not overhead:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=550)
else:
network.add("Line", new_line, bus0=bus0, bus1=bus1, s_nom=520)
network.lines.loc[new_line, "scn_name"] = "Status Quo"
network.lines.loc[new_line, "v_nom"] = 220
network.lines.loc[new_line, "version"] = "added_manually"
network.lines.loc[new_line, "frequency"] = 50
network.lines.loc[new_line, "cables"] = 3.0
network.lines.loc[new_line, "country"] =
network.lines.loc[new_line, "length"] = (
pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]],
network.buses.loc[bus1, ["x", "y"]])[0][0] *
1.2)
if not overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.0176)
network.lines.loc[new_line, "g"] = 0
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] *
0.3e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] *
210e-9)
elif overhead:
network.lines.loc[new_line, "r"] = (network.lines.
loc[new_line, "length"] *
0.05475)
network.lines.loc[new_line, "g"] = 0
network.lines.loc[new_line, "x"] = (network.lines.
loc[new_line, "length"] * 1e-3)
network.lines.loc[new_line, "b"] = (network.lines.
loc[new_line, "length"] * 11e-9
)
add_220kv_line("266", "24633", overhead=True)
network.transformers["v_nom0"] = network.transformers.bus0.map(
network.buses.v_nom)
network.transformers["v_nom1"] = network.transformers.bus1.map(
network.buses.v_nom)
new_bus0 = network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1]
new_bus1 = network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1]
network.transformers.bus0[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus0.values
network.transformers.bus1[network.transformers.v_nom0>network.transformers.v_nom1] = new_bus1.values
return network | Add missing transformer at Heizkraftwerk Nord in Munich and missing
transformer in Stuttgart
Parameters
----------
network : :class:`pypsa.Network
Overall container of PyPSA
Returns
-------
network : :class:`pypsa.Network
Overall container of PyPSA |
16 | def get_attr_text(self):
return .join([
.format(key, value)
for key, value in self.attr.items()
]) | Get html attr text to render in template |
17 | def get_lldp_neighbor_detail_output_has_more(self, **kwargs):
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
18 | def _get_directory_stash(self, path):
try:
save_dir = AdjacentTempDirectory(path)
save_dir.create()
except OSError:
save_dir = TempDirectory(kind="uninstall")
save_dir.create()
self._save_dirs[os.path.normcase(path)] = save_dir
return save_dir.path | Stashes a directory.
Directories are stashed adjacent to their original location if
possible, or else moved/copied into the user's temp dir. |
19 | def get_dict_for_class(self, class_name, state=None, base_name=):
classes = []
klass = class_name
while True:
classes.append(klass)
if klass.__name__ == base_name:
break
klass = klass.__bases__[0]
if state is None:
state =
style = {}
for klass in classes:
class_name = klass.__name__
try:
state_styles = self._styles[class_name][state]
except KeyError:
state_styles = {}
if state != :
try:
normal_styles = self._styles[class_name][]
except KeyError:
normal_styles = {}
state_styles = dict(chain(normal_styles.iteritems(),
state_styles.iteritems()))
style = dict(chain(state_styles.iteritems(),
style.iteritems()))
return style | The style dict for a given class and state.
This collects the style attributes from parent classes
and the class of the given object and gives precedence
to values thereof to the children.
The state attribute of the view instance is taken as
the current state if state is None.
If the state is not 'normal' then the style definitions
for the 'normal' state are mixed-in from the given state
style definitions, giving precedence to the non-'normal'
style definitions. |
20 | def hashes(self):
hashes = set()
if (self.resources is not None):
for resource in self:
if (resource.md5 is not None):
hashes.add()
if (resource.sha1 is not None):
hashes.add()
if (resource.sha256 is not None):
hashes.add()
return(hashes) | Return set of hashes uses in this resource_list. |
21 | def _get_local_ip(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((, 80))
return sock.getsockname()[0]
except socket.error:
try:
return socket.gethostbyname(socket.gethostname())
except socket.gaierror:
return
finally:
sock.close() | Try to determine the local IP address of the machine. |
22 | def ss(inlist):
ss = 0
for item in inlist:
ss = ss + item * item
return ss | Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist) |
23 | def voronoi(script, region_num=10, overlap=False):
filter_xml = .join([
,
,
% region_num,
,
,
,
,
,
% str(overlap).lower(),
,
,
,
,
])
util.write_filter(script, filter_xml)
return None | Voronoi Atlas parameterization |
24 | def write_member(self, data):
if isinstance(data, basestring):
self.write(data)
else:
for text in data:
self.write(text)
self.close_member() | Writes the given data as one gzip member.
The data can be a string, an iterator that gives strings or a file-like object. |
25 | def make_gp_funs(cov_func, num_cov_params):
def unpack_kernel_params(params):
mean = params[0]
cov_params = params[2:]
noise_scale = np.exp(params[1]) + 0.0001
return mean, cov_params, noise_scale
def predict(params, x, y, xstar):
mean, cov_params, noise_scale = unpack_kernel_params(params)
cov_f_f = cov_func(cov_params, xstar, xstar)
cov_y_f = cov_func(cov_params, x, xstar)
cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
pred_mean = mean + np.dot(solve(cov_y_y, cov_y_f).T, y - mean)
pred_cov = cov_f_f - np.dot(solve(cov_y_y, cov_y_f).T, cov_y_f)
return pred_mean, pred_cov
def log_marginal_likelihood(params, x, y):
mean, cov_params, noise_scale = unpack_kernel_params(params)
cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
prior_mean = mean * np.ones(len(y))
return mvn.logpdf(y, prior_mean, cov_y_y)
return num_cov_params + 2, predict, log_marginal_likelihood | Functions that perform Gaussian process regression.
cov_func has signature (cov_params, x, x') |
26 | async def fetch_neighbourhood(lat: float, long: float) -> Optional[dict]:
lookup_url = f"https://data.police.uk/api/locate-neighbourhood?q={lat},{long}"
async with ClientSession() as session:
try:
async with session.get(lookup_url) as request:
if request.status == 404:
return None
neighbourhood = await request.json()
except ClientConnectionError as con_err:
logger.debug(f"Could not connect to {con_err.host}")
raise ApiError(f"Could not connect to {con_err.host}")
except JSONDecodeError as dec_err:
logger.error(f"Could not decode data: {dec_err}")
raise ApiError(f"Could not decode data: {dec_err}")
neighbourhood_url = f"https://data.police.uk/api/{neighbourhood[]}/{neighbourhood[]}"
try:
async with session.get(neighbourhood_url) as request:
neighbourhood_data = await request.json()
except ConnectionError as con_err:
logger.debug(f"Could not connect to {con_err.args[0].pool.host}")
raise ApiError(f"Could not connect to {con_err.args[0].pool.host}")
except JSONDecodeError as dec_err:
logger.error(f"Could not decode data: {dec_err}")
raise ApiError(f"Could not decode data: {dec_err}")
return neighbourhood_data | Gets the neighbourhood from the fetch that is associated with the given postcode.
:return: A neighbourhood object parsed from the fetch.
:raise ApiError: When there was an error connecting to the API. |
27 | def newDocNodeEatName(self, ns, name, content):
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewDocNodeEatName(self._o, ns__o, name, content)
if ret is None:raise treeError()
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a new node element within a document. @ns and
@content are optional (None). NOTE: @content is supposed to
be a piece of XML CDATA, so it allow entities references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant(). Use xmlNewDocRawNode() if you
don't need entities support. |
28 | def position(self, chromosome, position, exact=False):
return self._clone(
filters=[GenomicFilter(chromosome, position, exact=exact)]) | Shortcut to do a single position filter on genomic datasets. |
29 | def oftype(self, typ):
for key, val in self.items():
if val.type == typ:
yield key | Return a generator of formatters codes of type typ |
30 | def url_join(url, path):
p = six.moves.urllib.parse.urlparse(url)
t = None
if p.path and p.path[-1] == :
if path and path[0] == :
path = path[1:]
t = .join([p.path, path])
else:
t = ( if path and path[0] == else ).join([p.path, path])
return six.moves.urllib.parse.urlunparse(
p[:2]+
(t,)+
p[3:]
) | url version of os.path.join |
31 | def add_arguments(self, parser):
parser.add_argument(, nargs=1, choices=[],
help=)
return self.add_common_arguments(parser, True) | Adds the unlock command arguments to the parser.
Args:
self (UnlockCommand): the ``UnlockCommand`` instance
parser (argparse.ArgumentParser): the parser to add the arguments to
Returns:
``None`` |
32 | def plan_to_assignment(plan):
assignment = {}
for elem in plan[]:
assignment[
(elem[], elem[])
] = elem[]
return assignment | Convert the plan to the format used by cluster-topology. |
33 | def get_imagery(cls, lat, lon, date=None, dim=None, cloud_score=False):
instance = cls()
filters = {
: lat,
: lon,
: date,
: dim,
: cloud_score
}
return instance.get_resource(**filters) | Returns satellite image
Args:
lat: latitude float
lon: longitude float
date: date instance of available date from `get_assets`
dim: width and height of image in degrees as float
cloud_score: boolean to calculate the percentage of the image covered by clouds
Returns:
json |
34 | def write_artifacts_metadata(self):
if self.conf.artifacts_metadata_file:
logger.info(,
self.conf.artifacts_metadata_file)
with open(self.conf.artifacts_metadata_file, ) as fp:
json.dump(self.artifacts_metadata, fp) | Write out a JSON file with all built targets artifact metadata,
if such output file is specified. |
35 | def get_rva_from_offset(self, offset):
s = self.get_section_by_offset(offset)
if not s:
if self.sections:
lowest_rva = min( [ adjust_SectionAlignment( s.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) for s in self.sections] )
if offset < lowest_rva:
return offset
else:
return offset
return s.get_rva_from_offset(offset) | Get the RVA corresponding to this file offset. |
36 | def mixin_class(target, cls):
for name, field in getmembers(cls):
Mixin.mixin(target, field, name) | Mix cls content in target. |
37 | def to_json_data(self):
d = collections.OrderedDict((t.get_ref(), t.to_json_data()) for t in self._tables.values())
d["_comment"] = self._comment
d.move_to_end("_comment", last=False)
d["_external_files"] = self._dev_external_files_manager
return d | Returns
-------
A dictionary of serialized data. |
38 | def images_create(self, filename):
suffix = get_filename_suffix(filename, image.VALID_IMGFILE_SUFFIXES)
if not suffix is None:
return self.images.create_object(filename)
suffix = get_filename_suffix(filename, ARCHIVE_SUFFIXES)
if not suffix is None:
temp_dir = tempfile.mkdtemp()
try:
tf = tarfile.open(name=filename, mode=)
tf.extractall(path=temp_dir)
except (tarfile.ReadError, IOError) as err:
shutil.rmtree(temp_dir)
raise ValueError(str(err))
group = []
for img_file in image.get_image_files(temp_dir, []):
img_obj = self.images.create_object(img_file)
folder = img_file[len(temp_dir):-len(img_obj.name)]
group.append(image.GroupImage(
img_obj.identifier,
folder,
img_obj.name,
img_obj.image_file
))
name = os.path.basename(os.path.normpath(filename))[:-len(suffix)]
img_grp = self.image_groups.create_object(name, group, filename)
shutil.rmtree(temp_dir)
return img_grp
else:
raise ValueError( + os.path.basename(os.path.normpath(filename))) | Create and image file or image group object from the given file. The
type of the created database object is determined by the suffix of the
given file. An ValueError exception is thrown if the file has an unknown
suffix.
Raises ValueError if invalid file is given.
Parameters
----------
filename : File-type object
File on local disk. Expected to be either an image file or an
archive containing image.
Returns
-------
DataObjectHandle
Handle for create dtabase object. Either an ImageHandle or an
ImageGroupHandle |
39 | def default(restart_cb=None, restart_func=None, close_fds=True):
if _active:
msg =
raise RuntimeWarning(msg)
_python_path = os.getenv()
if not _python_path:
msg =
raise RuntimeError(msg)
if restart_cb and not callable(restart_cb):
msg =
raise TypeError(msg)
if restart_func and not callable(restart_func):
msg =
raise TypeError(msg)
global _close_fds
_close_fds = close_fds
try:
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
except ImportError as ie:
msg = % str(ie)
raise RuntimeError(msg)
class _Handler(FileSystemEventHandler):
def __init__(self):
self.active = True
def dispatch(self, event):
if not self.active:
return
super(_Handler, self).dispatch(event)
def all_events(self, event):
if is_restart_event(event):
cancelled = _restart()
if not cancelled:
self.active = False
def on_created(self, event):
self.all_events(event)
def on_deleted(self, event):
self.all_events(event)
def on_modified(self, event):
self.all_events(event)
def on_moved(self, event):
self.all_events(event)
global _observer
_observer = Observer()
handler = _Handler()
_observer.schedule(handler, _python_path, recursive=True)
global _restart_cb
_restart_cb = restart_cb
global _restart_func
_restart_func = restart_func
_activate()
_observer.start() | Sets up lazarus in default mode.
See the :py:func:`custom` function for a more powerful mode of use.
The default mode of lazarus is to watch all modules rooted at
``PYTHONPATH`` for changes and restart when they take place.
Keyword arguments:
restart_cb -- Callback invoked prior to restarting the process; allows
for any cleanup to occur prior to restarting. Returning anything other
than *None* in the callback will cancel the restart.
restart_func -- Function invoked to restart the process. This supplants
the default behavior of using *sys.executable* and *sys.argv*.
close_fds -- Whether all file descriptors other than *stdin*, *stdout*,
and *stderr* should be closed
A simple example:
>>> import lazarus
>>> lazarus.default()
>>> lazarus.stop() |
40 | def next_id(self):
id_str_lst = self._element.xpath()
used_ids = [int(id_str) for id_str in id_str_lst if id_str.isdigit()]
if not used_ids:
return 1
return max(used_ids) + 1 | Next available positive integer id value in this story XML document.
The value is determined by incrementing the maximum existing id value. Gaps in
the existing id sequence are not filled. The id attribute value is unique in the
document, without regard to the element type it appears on. |
41 | def register_arrays(self, arrays):
if isinstance(arrays, collections.Mapping):
arrays = arrays.itervalues()
for ary in arrays:
self.register_array(**ary) | Register arrays using a list of dictionaries defining the arrays.
The list should itself contain dictionaries. i.e.
.. code-block:: python
D = [{ 'name':'uvw', 'shape':(3,'ntime','nbl'),'dtype':np.float32 },
{ 'name':'lm', 'shape':(2,'nsrc'),'dtype':np.float32 }]
Parameters
----------
arrays : A list or dict.
A list or dictionary of dictionaries describing arrays. |
42 | def indirect_font(font, fonts, text):
if font == "rnd-small" or font == "random-small" or font == "rand-small":
font = random.choice(RND_SIZE_DICT["small_list"])
return font
if font == "rnd-medium" or font == "random-medium" or font == "rand-medium":
font = random.choice(RND_SIZE_DICT["medium_list"])
return font
if font == "rnd-large" or font == "random-large" or font == "rand-large":
font = random.choice(RND_SIZE_DICT["large_list"])
return font
if font == "rnd-xlarge" or font == "random-xlarge" or font == "rand-xlarge":
font = random.choice(RND_SIZE_DICT["xlarge_list"])
return font
if font == "random" or font == "rand" or font == "rnd":
filtered_fonts = list(set(fonts) - set(RANDOM_FILTERED_FONTS))
font = random.choice(filtered_fonts)
return font
if font == "wizard" or font == "wiz" or font == "magic":
font = wizard_font(text)
return font
if font == "rnd-na" or font == "random-na" or font == "rand-na":
font = random.choice(TEST_FILTERED_FONTS)
return font
if font not in FONT_MAP.keys():
distance_list = list(map(lambda x: distance_calc(font, x), fonts))
font = fonts[distance_list.index(min(distance_list))]
return font | Check input font for indirect modes.
:param font: input font
:type font : str
:param fonts: fonts list
:type fonts : list
:param text: input text
:type text:str
:return: font as str |
43 | def _init_metadata(self):
self._choice_ids_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: False,
: False,
: False,
: False,
: [[]],
: ,
}
self._choice_id_metadata = {
: Id(self.my_osid_object_form._authority,
self.my_osid_object_form._namespace,
),
: ,
: ,
: True,
: False,
: False,
: False,
: [],
: ,
: []
} | stub |
44 | def get_covariance(datargs, outargs, vargs, datvar, outvar):
argn = len(vargs)
nobs = 1
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
c = avar.get(b, 0.0)
try:
nobs = max(nobs, len(c))
except (TypeError, ValueError):
LOGGER.debug(, a, b, c)
cov = np.zeros((nobs, argn, argn))
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
cov[:, m, n] = avar.get(b, 0.0)
if nobs == 1:
cov = cov.squeeze()
LOGGER.debug(, cov)
return cov | Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance |
45 | def _has_not_qual(ntd):
for qual in ntd.Qualifier:
if in qual:
return True
if in qual:
return True
return False | Return True if the qualifiers contain a 'NOT |
46 | def vertical_horizontal_filter(data, period):
catch_errors.check_for_period_error(data, period)
vhf = [abs(np.max(data[idx+1-period:idx+1]) -
np.min(data[idx+1-period:idx+1])) /
sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))]
vhf = fill_for_noncomputable_vals(data, vhf)
return vhf | Vertical Horizontal Filter.
Formula:
ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1)) |
47 | def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
returnDistinctValues=False,
returnExtentOnly=False,
maxAllowableOffset=None,
geometryPrecision=None,
outSR=None,
groupByFieldsForStatistics=None,
statisticFilter=None,
out_fc=None,
**kwargs):
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
"returnDistinctValues" : returnDistinctValues,
"returnExtentOnly" : returnExtentOnly
}
if outSR is not None:
params[] = outSR
if not maxAllowableOffset is None:
params[] = maxAllowableOffset
if not geometryPrecision is None:
params[] = geometryPrecision
for k,v in kwargs.items():
params[k] = v
if returnDistinctValues:
params["returnGeometry"] = False
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params[] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params[] = gf[]
params[] = gf[]
params[] = gf[]
params[] = gf[]
if "buffer" in gf:
params[] = gf[]
if "units" in gf:
params[] = gf[]
if not groupByFieldsForStatistics is None:
params[] = groupByFieldsForStatistics
if not statisticFilter is None and \
isinstance(statisticFilter, filters.StatisticFilter):
params[] = statisticFilter.filter
fURL = self._url + "/query"
results = self._post(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly and \
not returnDistinctValues and not returnExtentOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, ) as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return | queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
groupByFieldsForStatistics - One or more field names on
which the values need to be grouped for
calculating the statistics.
statisticFilter - object that performs statistic queries
kwargs - optional parameters that can be passed to the Query
function. This will allow users to pass additional
parameters not explicitly implemented on the function. A
complete list of functions available is documented on the
Query REST API.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True. |
48 | def pic_inflow_v2(self):
flu = self.sequences.fluxes.fastaccess
inl = self.sequences.inlets.fastaccess
flu.inflow = inl.q[0]+inl.s[0]+inl.r[0] | Update the inlet link sequences.
Required inlet sequences:
|dam_inlets.Q|
|dam_inlets.S|
|dam_inlets.R|
Calculated flux sequence:
|Inflow|
Basic equation:
:math:`Inflow = Q + S + R` |
49 | def __import_vars(self, env_file):
with open(env_file, "r") as f:
for line in f:
try:
line = line.lstrip()
if line.startswith():
line = line.replace(, , 1)
key, val = line.strip().split(, 1)
except ValueError:
pass
else:
if not callable(val):
if self.verbose_mode:
if key in self.app.config:
print(
" * Overwriting an existing config var:"
" {0}".format(key))
else:
print(
" * Setting an entirely new config var:"
" {0}".format(key))
self.app.config[key] = re.sub(
r"\A[\"]\Z", "", val) | Actual importing function. |
50 | def attributive(adjective, gender=MALE, role=SUBJECT, article=None):
w, g, c, a = \
adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None
if w in adjective_attributive:
return adjective_attributive[w]
if a is None \
or a in ("mir", "dir", "ihm") \
or a in ("ein", "etwas", "mehr") \
or a.startswith(("all", "mehrer", "wenig", "viel")):
return w + adjectives_strong.get((g, c), "")
if a.startswith(("ein", "kein")) \
or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")):
return w + adjectives_mixed.get((g, c), "")
if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \
or a.startswith((
"derselb", "derjenig", "jed", "jeglich", "jen", "manch",
"dies", "solch", "welch")):
return w + adjectives_weak.get((g, c), "")
return w + adjectives_strong.get((g, c), "") | For a predicative adjective, returns the attributive form (lowercase).
In German, the attributive is formed with -e, -em, -en, -er or -es,
depending on gender (masculine, feminine, neuter or plural) and role
(nominative, accusative, dative, genitive). |
51 | def append_summary_to_module_docstring(module):
pairs = [(name, getattr(module, name)) for name in module.__all__]
kws = dict(key_header="Name", summary_type="module contents")
module.__doc__ = docstring_with_summary(module.__doc__, pairs, **kws) | Change the ``module.__doc__`` docstring to include a summary table based
on its contents as declared on ``module.__all__``. |
52 | def filter_parts(self, predicate=, exclude=True):
source = self._source_data
if source is None:
raise ExpressionError()
def _parse_partition_predicate(p):
if not in p:
raise ExpressionError()
field_name, field_value = [s.strip() for s in p.split(, 1)]
if not hasattr(source, ):
raise ExpressionError()
if field_name not in source.schema:
raise ExpressionError( % field_name)
if field_name not in source.schema._partition_schema:
raise ExpressionError( % field_name)
part_col = self[field_name]
if field_value.startswith(\"string-escapeunicode-escape"\)).decode(encoding)
if isinstance(part_col.data_type, types.Integer):
field_value = int(field_value)
elif isinstance(part_col.data_type, types.Float):
field_value = float(field_value)
return part_col == field_value
from ...models.partition import Partition
from ...types import PartitionSpec
if isinstance(predicate, Partition):
predicate = predicate.partition_spec
if isinstance(predicate, PartitionSpec):
predicate = .join("%s=" % (k, v) for k, v in six.iteritems(predicate.kv))
if isinstance(predicate, list):
predicate = .join(str(s) for s in predicate)
elif not isinstance(predicate, six.string_types):
raise ExpressionError()
if not predicate:
predicate_obj = None
else:
part_formatter = lambda p: reduce(operator.and_, map(_parse_partition_predicate, p.split()))
predicate_obj = reduce(operator.or_, map(part_formatter, predicate.split()))
if not source.schema.partitions:
raise ExpressionError()
if exclude:
columns = [c for c in self.schema if c.name not in source.schema._partition_schema]
new_schema = types.Schema.from_lists([c.name for c in columns], [c.type for c in columns])
return FilterPartitionCollectionExpr(self, predicate_obj, _schema=new_schema, _predicate_string=predicate)
else:
return self.filter(predicate_obj) | Filter the data by partition string. A partition string looks like `pt1=1,pt2=2/pt1=2,pt2=1`, where
comma (,) denotes 'and', while (/) denotes 'or'.
:param str|Partition predicate: predicate string of partition filter
:param bool exclude: True if you want to exclude partition fields, otherwise False. True for default.
:return: new collection
:rtype: :class:`odps.df.expr.expressions.CollectionExpr` |
53 | def transform(self, y):
if self.transform_type == :
return np.log(y)
elif self.transform_type == :
return np.exp(y)
elif self.transform_type == :
return np.sqrt(y)
elif self.transform_type == :
return np.sin(y)
elif self.transform_type == :
return np.cos(y)
elif self.transform_type == :
return np.tan(y)
elif self.transform_type == :
return np.abs(y) | Transform features per specified math function.
:param y:
:return: |
54 | def long_fname_format(fmt_str, fmt_dict, hashable_keys=[], max_len=64,
hashlen=16, ABS_MAX_LEN=255, hack27=False):
r
from utool import util_hash
fname = fmt_str.format(**fmt_dict)
if max_len is None:
return fname
if len(fname) > max_len:
fmt_dict_ = fmt_dict.copy()
for key in hashable_keys:
if hack27:
fmt_dict_[key] = util_hash.hashstr27(fmt_dict_[key], hashlen=hashlen)
else:
fmt_dict_[key] = util_hash.hashstr(fmt_dict_[key], hashlen=hashlen)
fname = fmt_str.format(**fmt_dict_)
if len(fname) <= max_len:
break
if len(fname) > max_len:
diff = len(fname) - max_len
msg = (
) % (diff,)
print(msg)
print( % len(fname))
print( % fname)
if ABS_MAX_LEN is not None and len(fname) > ABS_MAX_LEN:
raise AssertionError(msg)
return fname | r"""
DEPRICATE
Formats a string and hashes certain parts if the resulting string becomes
too long. Used for making filenames fit onto disk.
Args:
fmt_str (str): format of fname
fmt_dict (str): dict to format fname with
hashable_keys (list): list of dict keys you are willing to have hashed
max_len (int): tries to fit fname into this length
ABS_MAX_LEN (int): throws AssertionError if fname over this length
CommandLine:
python -m utool.util_str --exec-long_fname_format
Example:
>>> # ENABLE_DOCTET
>>> import utool as ut
>>> fmt_str = 'qaid={qaid}_res_{cfgstr}_quuid={quuid}'
>>> quuid_str = 'blahblahblahblahblahblah'
>>> cfgstr = 'big_long_string__________________________________'
>>> qaid = 5
>>> fmt_dict = dict(cfgstr=cfgstr, qaid=qaid, quuid=quuid_str)
>>> hashable_keys = ['cfgstr', 'quuid']
>>> max_len = 64
>>> hashlen = 8
>>> fname0 = ut.long_fname_format(fmt_str, fmt_dict, max_len=None)
>>> fname1 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys,
>>> max_len=64, hashlen=8)
>>> fname2 = ut.long_fname_format(fmt_str, fmt_dict, hashable_keys, max_len=42,
>>> hashlen=8)
>>> result = fname0 + '\n' + fname1 + '\n' + fname2
>>> print(result)
qaid=5_res_big_long_string___________________________________quuid=blahblahblahblahblahblah
qaid=5_res_racfntgq_quuid=blahblahblahblahblahblah
qaid=5_res_racfntgq_quuid=yvuaffrp |
55 | def commercial_domains():
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index) | Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False |
56 | def _get_function_wrapper(
self, func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]
) -> typing.Callable[..., typing.Any]:
raise NotImplementedError() | Here should be constructed and returned real decorator.
:param func: Wrapped function
:type func: typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]
:rtype: typing.Callable |
57 | def render_item(self, all_posts):
index = all_posts.index(self)
if index > 0:
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1:
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context=
) | Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string. |
58 | def removeTab(self, index):
widget = self.widget(index)
try:
self._widgets.remove(widget)
except ValueError:
pass
self.tab_closed.emit(widget)
self._del_code_edit(widget)
QTabWidget.removeTab(self, index)
if widget == self._current:
self._current = None | Removes tab at index ``index``.
This method will emits tab_closed for the removed tab.
:param index: index of the tab to remove. |
59 | def on_exception(wait_gen,
exception,
max_tries=None,
max_time=None,
jitter=full_jitter,
giveup=lambda e: False,
on_success=None,
on_backoff=None,
on_giveup=None,
logger=,
**wait_gen_kwargs):
def decorate(target):
logger_ = logger
if isinstance(logger_, basestring):
logger_ = logging.getLogger(logger_)
on_success_ = _config_handlers(on_success)
on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_)
on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_)
retry = None
if sys.version_info[:2] >= (3, 5):
import asyncio
if asyncio.iscoroutinefunction(target):
import backoff._async
retry = backoff._async.retry_exception
elif _is_event_loop() and _is_current_task():
raise TypeError(
"backoff.on_exception applied to a regular function "
"inside coroutine, this will lead to event loop "
"hiccups. Use backoff.on_exception on coroutines in "
"asynchronous code.")
if retry is None:
retry = _sync.retry_exception
return retry(target, wait_gen, exception,
max_tries, max_time, jitter, giveup,
on_success_, on_backoff_, on_giveup_,
wait_gen_kwargs)
return decorate | Returns decorator for backoff and retry triggered by exception.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
exception: An exception type (or tuple of types) which triggers
backoff.
max_tries: The maximum number of attempts to make before giving
up. Once exhausted, the exception will be allowed to escape.
The default value of None means their is no limit to the
number of tries. If a callable is passed, it will be
evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. Once expired, the exception will be allowed to
escape. If a callable is passed, it will be
evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
giveup: Function accepting an exception instance and
returning whether or not to give up. Optional. The default
is to always continue.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
logger: Name or Logger object to log to. Defaults to 'backoff'.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration. |
60 | def GetParserFromFilename(self, path):
handler_name = path.split("://")[0]
for parser_cls in itervalues(GRRConfigParser.classes):
if parser_cls.name == handler_name:
return parser_cls
extension = os.path.splitext(path)[1]
if extension in [".yaml", ".yml"]:
return YamlParser
return ConfigFileParser | Returns the appropriate parser class from the filename. |
61 | def iat(x, maxlag=None):
if not maxlag:
maxlag = _find_max_lag(x)
acr = [autocorr(x, lag) for lag in range(1, maxlag + 1)]
gammas = [(acr[2 * i] + acr[2 * i + 1]) for i in range(maxlag // 2)]
cut = _cut_time(gammas)
if cut + 1 == len(gammas):
print_("Not enough lag to calculate IAT")
return np.sum(2 * gammas[:cut + 1]) - 1.0 | Calculate the integrated autocorrelation time (IAT), given the trace from a Stochastic. |
62 | def put(self, destination):
target = get_target_path(destination, self.dirname)
valid_paths = (self.dirname, % self.dirname)
with tarfile.open(self.archive_path, ) as tf:
members = []
for tarinfo in tf:
pathsplit = os.path.normpath(tarinfo.path).split(os.sep)
if pathsplit[0] not in valid_paths:
print( %
(tarinfo.path, self.dirname))
continue
if len(pathsplit) == 1:
continue
tarinfo.name = os.path.join(*pathsplit[1:])
members.append(tarinfo)
if not members:
raise ValueError("No files under path directory in this tarfile")
tf.extractall(target, members) | Copy the referenced directory to this path
Note:
This ignores anything not in the desired directory, given by ``self.dirname``.
Args:
destination (str): path to put this directory (which must NOT already exist)
References:
https://stackoverflow.com/a/8261083/1958900 |
63 | def read(self, stream):
def read_it(stream):
bytes = stream.read()
transportIn = TMemoryBuffer(bytes)
protocolIn = TBinaryProtocol.TBinaryProtocol(transportIn)
topology = StormTopology()
topology.read(protocolIn)
return topology
if isinstance(stream, six.string_types):
with open(stream, ) as f:
return read_it(f)
else:
return read_it(stream) | Reads the topology from a stream or file. |
64 | def get_version(self, diff_to_increase_ratio):
diffs = self.get_diff_amounts()
version = Version()
for diff in diffs:
version.increase_by_changes(diff, diff_to_increase_ratio)
return version | Gets version
:param diff_to_increase_ratio: Ratio to convert number of changes into
:return: Version of this code, based on commits diffs |
65 | def reparentUnions(self):
removals = []
for u in self.unions:
parts = u.name.split("::")
if len(parts) >= 2:
parent_name = "::".join(p for p in parts[:-1])
reparented = False
for node in itertools.chain(self.class_like, self.namespaces):
if node.name == parent_name:
node.children.append(u)
u.parent = node
reparented = True
break
if reparented:
removals.append(u)
else:
utils.verbose_log(
"The union {0} has in its name, but no parent was found!".format(u.name),
utils.AnsiColors.BOLD_RED
)
for rm in removals:
self.unions.remove(rm) | Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and
classes should have the unions defined in them to be in the child list of itself
rather than floating around. Union nodes that are reparented (e.g. a union
defined in a class) will be removed from the list ``self.unions`` since the
Breathe directive for its parent (e.g. the class) will include the documentation
for the union. The consequence of this is that a union defined in a class will
**not** appear in the full api listing of Unions. |
66 | def get(self, address):
loopback = super(LoopbackCollection, self).get(address=address)
if loopback:
return loopback
raise InterfaceNotFound() | Get a loopback address by it's address. Find all loopback addresses
by iterating at either the node level or the engine::
loopback = engine.loopback_interface.get('127.0.0.10')
:param str address: ip address of loopback
:raises InterfaceNotFound: invalid interface specified
:rtype: LoopbackInterface |
67 | def _ImportHookBySuffix(
name, globals=None, locals=None, fromlist=None, level=None):
_IncrementNestLevel()
if level is None:
level = 0 if six.PY3 else -1
try:
module = _real_import(name, globals, locals, fromlist, level)
finally:
_ProcessImportBySuffix(name, fromlist, globals)
return module | Callback when an import statement is executed by the Python interpreter.
Argument names have to exactly match those of __import__. Otherwise calls
to __import__ that use keyword syntax will fail: __import('a', fromlist=[]). |
68 | def modify_column_if_table_exists(self,
tablename: str,
fieldname: str,
newdef: str) -> Optional[int]:
if not self.table_exists(tablename):
return None
sql = "ALTER TABLE {t} MODIFY COLUMN {field} {newdef}".format(
t=tablename,
field=fieldname,
newdef=newdef
)
log.info(sql)
return self.db_exec_literal(sql) | Alters a column's definition without renaming it. |
69 | def center(self):
if np.all(np.isfinite(self.xyz)):
return np.mean(self.xyz, axis=0) | The cartesian center of the Compound based on its Particles.
Returns
-------
np.ndarray, shape=(3,), dtype=float
The cartesian center of the Compound based on its Particles |
70 | def setup_config(self, cfg=None):
_opts, _args = optparse.OptionParser.parse_args(self)
configs = self.find_existing_configs(_opts.support_unit)
if configs and cfg not in configs:
cfg = configs[0]
return config.master_config(self.get_config_file_path(cfg)) | Open suitable config file.
:return: |
71 | def find_sanitiser_nodes(
sanitiser,
sanitisers_in_file
):
for sanitiser_tuple in sanitisers_in_file:
if sanitiser == sanitiser_tuple.trigger_word:
yield sanitiser_tuple.cfg_node | Find nodes containing a particular sanitiser.
Args:
sanitiser(string): sanitiser to look for.
sanitisers_in_file(list[Node]): list of CFG nodes with the sanitiser.
Returns:
Iterable of sanitiser nodes. |
72 | def hypercube_edges(dims, use_map=False):
edges = []
nodes = np.arange(np.product(dims)).reshape(dims)
for i,d in enumerate(dims):
for j in range(d-1):
for n1, n2 in zip(np.take(nodes, [j], axis=i).flatten(), np.take(nodes,[j+1], axis=i).flatten()):
edges.append((n1,n2))
if use_map:
return edge_map_from_edge_list(edges)
return edges | Create edge lists for an arbitrary hypercube. TODO: this is probably not the fastest way. |
73 | def get_context_data(self, **kwargs):
context = super(FilterFormMixin, self).get_context_data(**kwargs)
context[self.context_filterform_name] = self.get_filter()
return context | Add filter form to the context.
TODO: Currently we construct the filter form object twice - in
get_queryset and here, in get_context_data. Will need to figure out a
good way to eliminate extra initialization. |
74 | async def volume(self, ctx, volume: int):
if ctx.voice_client is None:
return await ctx.send("Not connected to a voice channel.")
ctx.voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume)) | Changes the player's volume |
75 | def process_doc(text):
document = docutils.core.publish_doctree(text)
visitor = RefVisitor(document)
document.walk(visitor)
return visitor.kwd, visitor.values | The :ref: role is supported by Sphinx but not by plain docutils |
76 | def discharge(self):
rv = np.zeros(self.aq[0].naq)
Qls = self.parameters[:, 0] * self.dischargeinf()
Qls.shape = (self.nls, self.nlayers, self.order + 1)
Qls = np.sum(Qls, 2)
for i, q in enumerate(Qls):
rv[self.layers[i]] += q
return rv | Discharge of the element in each layer |
77 | async def message_throttled(self, message: types.Message, throttled: Throttled):
handler = current_handler.get()
dispatcher = Dispatcher.get_current()
if handler:
key = getattr(handler, , f"{self.prefix}_{handler.__name__}")
else:
key = f"{self.prefix}_message"
delta = throttled.rate - throttled.delta
if throttled.exceeded_count <= 2:
await message.reply()
await asyncio.sleep(delta)
thr = await dispatcher.check_key(key)
if thr.exceeded_count == throttled.exceeded_count:
await message.reply() | Notify user only on first exceed and notify about unlocking only on last exceed
:param message:
:param throttled: |
78 | def generate_private_investment(asset_manager_id=None, asset_id=None, client_id=None):
attributes = generate_common(asset_manager_id=asset_manager_id, asset_id=asset_id)
private_investment = PrivateInvestment(client_id=client_id or random_string(5),
asset_issuer_id=random_string(8),
category=,
sub_category=,
num_shares=1000,
price_share=1000,
share_type=,
maturity_date=random_date(),
lock_up_period=52,
investment_term=52,
**attributes)
return private_investment | currency, display_name |
79 | def append(self, parent, content):
appender = self.default
for matcher, candidate_appender in self.appenders:
if matcher == content.value:
appender = candidate_appender
break
appender.append(parent, content) | Select an appender and append the content to parent.
@param parent: A parent node.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Content} |
80 | def in_simo_and_inner(self):
return len(self.successor) > 1 and self.successor[0] is not None and not self.successor[0].in_or_out and \
len(self.precedence) == 1 and self.precedence[0] is not None and not self.successor[0].in_or_out | Test if a node is simo: single input and multiple output |
81 | def _stream_blob(self, key, fileobj, progress_callback):
file_size = None
start_range = 0
chunk_size = self.conn.MAX_CHUNK_GET_SIZE
end_range = chunk_size - 1
while True:
try:
blob = self.conn._get_blob(self.container_name, key, start_range=start_range, end_range=end_range)
if file_size is None:
file_size = self._parse_length_from_content_range(blob.properties.content_range)
fileobj.write(blob.content)
start_range += blob.properties.content_length
if start_range == file_size:
break
if blob.properties.content_length == 0:
raise StorageError(
"Empty response received for {}, range {}-{}".format(key, start_range, end_range)
)
end_range += blob.properties.content_length
if end_range >= file_size:
end_range = file_size - 1
if progress_callback:
progress_callback(start_range, file_size)
except azure.common.AzureHttpError as ex:
if ex.status_code == 416:
return
raise | Streams contents of given key to given fileobj. Data is read sequentially in chunks
without any seeks. This requires duplicating some functionality of the Azure SDK, which only
allows reading entire blob into memory at once or returning data from random offsets |
82 | def dump_pk(obj, abspath,
pk_protocol=pk_protocol, replace=False, compress=False,
enable_verbose=True):
abspath = str(abspath)
msg = Messenger(enable_verbose=enable_verbose)
if compress:
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception(
"compressed pickle has to use extension !")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception(
"compressed pickle has to use extension !")
else:
root, ext = os.path.splitext(abspath)
if ext != ".pickle":
if ext != ".tmp":
raise Exception("file extension are not !")
else:
_, ext = os.path.splitext(root)
if ext != ".pickle":
raise Exception("file extension are not !")
msg.show("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath):
if replace:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
else:
raise Exception("\tCANNOT WRITE to %s, "
"it's already exists" % abspath)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st)) | Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值`` |
83 | def publish_predictions_to_core(self):
status = FAILED
msg = "not started"
try:
msg = "generating request"
log.info(msg)
publish_req = generate_ai_request(
predict_rows=self.df.fillna(
ANTINEX_MISSING_VALUE).to_dict("records"),
req_dict=self.request_dict)
if publish_req["status"] != SUCCESS:
log.error(("failed generate_ai_request with err={}")
.format(
publish_req["error"]))
status = ERROR
else:
msg = "publishing as user={} url={} model={}".format(
ANTINEX_USER,
ANTINEX_URL,
ANTINEX_USE_MODEL_NAME)
log.info(msg)
response = self.client.run_job(
body=publish_req["data"])
if response["status"] == SUCCESS:
log.info("predictions sent")
status = SUCCESS
elif response["status"] == FAILED:
log.error(("job failed with error= with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
elif response["status"] == ERROR:
log.error(("job had an error= with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
elif response["status"] == LOGIN_FAILED:
log.error(("job reported user was not able to log in "
"with an error= with response={}")
.format(
response["error"],
response["data"]))
status = ERROR
except Exception as e:
log.error(("failed generating request last_step= ex={}")
.format(
msg,
e))
return status | publish_predictions_to_core |
84 | def get_organizations(self, page=None):
opts = {}
if page:
opts[] = page
return self.api_call(ENDPOINTS[][], **opts) | Get organizations |
85 | def create_salt(length: int=128) -> bytes:
return b.join(bytes([SystemRandom().randint(0, 255)]) for _ in range(length)) | Create a new salt
:param int length: How many bytes should the salt be long?
:return: The salt
:rtype: bytes |
86 | def delete_biggest(self):
logger.info(
"Deleting all mails sharing the biggest size of {} bytes..."
"".format(self.biggest_size))
candidates = [
mail for mail in self.pool
if mail.size == self.biggest_size]
if len(candidates) == self.size:
logger.warning(
"Skip deletion: all {} mails share the same size."
"".format(self.size))
return
logger.info(
"{} candidates found for deletion.".format(len(candidates)))
for mail in candidates:
self.delete(mail) | Delete all the biggest duplicates.
Keeps all mail of the duplicate set but those sharing the biggest
size. |
87 | def check_async(paths, options, rootdir=None):
LOGGER.info()
path_queue = Queue.Queue()
result_queue = Queue.Queue()
for num in range(CPU_COUNT):
worker = Worker(path_queue, result_queue)
worker.setDaemon(True)
LOGGER.info(, (num + 1))
worker.start()
for path in paths:
path_queue.put((path, dict(options=options, rootdir=rootdir)))
path_queue.join()
errors = []
while True:
try:
errors += result_queue.get(False)
except Queue.Empty:
break
return errors | Check given paths asynchronously.
:return list: list of errors |
88 | def begin_batch(self):
self.is_batch = True
self.batch_table =
self.batch_partition_key =
self.batch_row_keys = []
self.batch_requests = [] | Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests. |
89 | def subdivide_to_size(vertices,
faces,
max_edge,
max_iter=10):
done_face = []
done_vert = []
current_faces = np.array(faces,
dtype=np.int64,
copy=True)
current_vertices = np.array(vertices,
dtype=np.float64,
copy=True)
for i in range(max_iter + 1):
triangles = current_vertices[current_faces]
edge_lengths = (np.diff(triangles[:, [0, 1, 2, 0]],
axis=1) ** 2).sum(axis=2) ** .5
too_long = (edge_lengths > max_edge).any(axis=1)
unique, inverse = np.unique(
current_faces[np.logical_not(too_long)],
return_inverse=True)
done_vert.append(current_vertices[unique])
done_face.append(inverse.reshape((-1, 3)))
if not too_long.any():
break
(current_vertices,
current_faces) = subdivide(current_vertices,
current_faces[too_long])
vertices, faces = util.append_faces(done_vert,
done_face)
return vertices, faces | Subdivide a mesh until every edge is shorter than a
specified length.
Will return a triangle soup, not a nicely structured mesh.
Parameters
------------
vertices : (n, 3) float
Vertices in space
faces : (m, 3) int
Indices of vertices which make up triangles
max_edge : float
Maximum length of any edge in the result
max_iter : int
The maximum number of times to run subdivision
Returns
------------
vertices : (j, 3) float
Vertices in space
faces : (q, 3) int
Indices of vertices |
90 | def get_context_arguments(self):
cargs = {}
for context in self.__context_stack:
cargs.update(context.context_arguments)
return cargs | Return a dictionary containing the current context arguments. |
91 | def system(cmd, data=None):
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode() | pipes the output of a program |
92 | def Write2000256List(self, arr):
for item in arr:
ba = bytearray(binascii.unhexlify(item))
ba.reverse()
self.WriteBytes(ba) | Write an array of 64 byte items to the stream.
Args:
arr (list): a list of 2000 items of 64 bytes in size. |
93 | def midi2f(params, midi=69):
midi = create_buffer(params, midi)
output = 2**((midi - 69)/12)*440
return output | Convert a midi value to a frequency.
Midi value 69 corresponds to A4 (440Hz). Changing the midi value by 1 corresponds to
one semitone
:param params: buffer parameters, controls length of signal created
:param midi: midi value
:return: array of resulting frequency |
94 | def _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn,
unroll_length, horizon, preprocessors, obs_filters,
clip_rewards, clip_actions, pack, callbacks, tf_sess,
perf_stats, soft_horizon):
try:
if not horizon:
horizon = (base_env.get_unwrapped()[0].spec.max_episode_steps)
except Exception:
logger.debug("no episode horizon specified, assuming inf")
if not horizon:
horizon = float("inf")
batch_builder_pool = []
def get_batch_builder():
if batch_builder_pool:
return batch_builder_pool.pop()
else:
return MultiAgentSampleBatchBuilder(
policies, clip_rewards, callbacks.get("on_postprocess_traj"))
def new_episode():
episode = MultiAgentEpisode(policies, policy_mapping_fn,
get_batch_builder, extra_batch_callback)
if callbacks.get("on_episode_start"):
callbacks["on_episode_start"]({
"env": base_env,
"policy": policies,
"episode": episode,
})
return episode
active_episodes = defaultdict(new_episode)
while True:
perf_stats.iters += 1
t0 = time.time()
unfiltered_obs, rewards, dones, infos, off_policy_actions = \
base_env.poll()
perf_stats.env_wait_time += time.time() - t0
if log_once("env_returns"):
logger.info("Raw obs from env: {}".format(
summarize(unfiltered_obs)))
logger.info("Info return from env: {}".format(summarize(infos)))
t1 = time.time()
active_envs, to_eval, outputs = _process_observations(
base_env, policies, batch_builder_pool, active_episodes,
unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon,
preprocessors, obs_filters, unroll_length, pack, callbacks,
soft_horizon)
perf_stats.processing_time += time.time() - t1
for o in outputs:
yield o
t2 = time.time()
eval_results = _do_policy_eval(tf_sess, to_eval, policies,
active_episodes)
perf_stats.inference_time += time.time() - t2
t3 = time.time()
actions_to_send = _process_policy_eval_results(
to_eval, eval_results, active_episodes, active_envs,
off_policy_actions, policies, clip_actions)
perf_stats.processing_time += time.time() - t3
t4 = time.time()
base_env.send_actions(actions_to_send)
perf_stats.env_wait_time += time.time() - t4 | This implements the common experience collection logic.
Args:
base_env (BaseEnv): env implementing BaseEnv.
extra_batch_callback (fn): function to send extra batch data to.
policies (dict): Map of policy ids to PolicyGraph instances.
policy_mapping_fn (func): Function that maps agent ids to policy ids.
This is called when an agent first enters the environment. The
agent is then "bound" to the returned policy for the episode.
unroll_length (int): Number of episode steps before `SampleBatch` is
yielded. Set to infinity to yield complete episodes.
horizon (int): Horizon of the episode.
preprocessors (dict): Map of policy id to preprocessor for the
observations prior to filtering.
obs_filters (dict): Map of policy id to filter used to process
observations for the policy.
clip_rewards (bool): Whether to clip rewards before postprocessing.
pack (bool): Whether to pack multiple episodes into each batch. This
guarantees batches will be exactly `unroll_length` in size.
clip_actions (bool): Whether to clip actions to the space range.
callbacks (dict): User callbacks to run on episode events.
tf_sess (Session|None): Optional tensorflow session to use for batching
TF policy evaluations.
perf_stats (PerfStats): Record perf stats into this object.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
Yields:
rollout (SampleBatch): Object containing state, action, reward,
terminal condition, and other fields as dictated by `policy`. |
95 | def _fetch(self, url, params):
if not self.from_archive:
self.sleep_for_rate_limit()
headers = {: + self.api_key}
r = self.fetch(url, payload=params, headers=headers)
if not self.from_archive:
self.update_rate_limit(r)
return r.text | Fetch a resource.
Method to fetch and to iterate over the contents of a
type of resource. The method returns a generator of
pages for that resource and parameters.
:param url: the endpoint of the API
:param params: parameters to filter
:returns: the text of the response |
96 | def run_flow(flow, storage, flags=None, http=None):
if flags is None:
flags = argparser.parse_args()
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print(_FAILED_START_MESSAGE)
if not flags.noauth_local_webserver:
oauth_callback = .format(
host=flags.auth_host_name, port=port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
import webbrowser
webbrowser.open(authorize_url, new=1, autoraise=True)
print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url))
else:
print(_GO_TO_LINK_MESSAGE.format(address=authorize_url))
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if in httpd.query_params:
sys.exit()
if in httpd.query_params:
code = httpd.query_params[]
else:
print(
)
sys.exit()
else:
code = input().strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit(.format(e))
storage.put(credential)
credential.set_store(storage)
print()
return credential | Core code for a command-line application.
The ``run()`` function is called from your application and runs
through all the steps to obtain credentials. It takes a ``Flow``
argument and attempts to open an authorization server page in the
user's default web browser. The server asks the user to grant your
application access to the user's data. If the user grants access,
the ``run()`` function returns new credentials. The new credentials
are also stored in the ``storage`` argument, which updates the file
associated with the ``Storage`` object.
It presumes it is run from a command-line application and supports the
following flags:
``--auth_host_name`` (string, default: ``localhost``)
Host name to use when running a local web server to handle
redirects during OAuth authorization.
``--auth_host_port`` (integer, default: ``[8080, 8090]``)
Port to use when running a local web server to handle redirects
during OAuth authorization. Repeat this option to specify a list
of values.
``--[no]auth_local_webserver`` (boolean, default: ``True``)
Run a local web server to handle redirects during OAuth
authorization.
The tools module defines an ``ArgumentParser`` the already contains the
flag definitions that ``run()`` requires. You can pass that
``ArgumentParser`` to your ``ArgumentParser`` constructor::
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a ``Storage`` to store the credential in.
flags: ``argparse.Namespace``, (Optional) The command-line flags. This
is the object returned from calling ``parse_args()`` on
``argparse.ArgumentParser`` as described above. Defaults
to ``argparser.parse_args()``.
http: An instance of ``httplib2.Http.request`` or something that
acts like it.
Returns:
Credentials, the obtained credential. |
97 | def upload_and_confirm(self, incoming, **kwargs):
response_dict = self.upload(incoming)
if in response_dict:
log.warning()
return response_dict
if isinstance(incoming, Info):
kwargs.setdefault(, incoming.thumb_nail_left)
kwargs.setdefault(, incoming.thumb_nail_top)
kwargs.setdefault(, incoming.thumb_nail_right)
kwargs.setdefault(, incoming.thumb_nail_bottom)
kwargs[] = response_dict.get()
kwargs[] = response_dict.get()
self.confirm(response_dict[], **kwargs)
return response_dict | Upload the file to okcupid and confirm, among other things, its
thumbnail position.
:param incoming: A filepath string, :class:`.Info` object or
a file like object to upload to okcupid.com.
If an info object is provided, its thumbnail
positioning will be used by default.
:param caption: The caption to add to the photo.
:param thumb_nail_left: For thumb nail positioning.
:param thumb_nail_top: For thumb nail positioning.
:param thumb_nail_right: For thumb nail positioning.
:param thumb_nail_bottom: For thumb nail positioning. |
98 | def get_dyndns_records(login, password):
params = dict(action=, sha=get_auth_key(login, password))
response = requests.get(, params=params, timeout=timeout)
raw_records = (line.split() for line in response.content.split())
try:
records = frozenset(DnsRecord(*record) for record in raw_records)
except TypeError:
raise ApiError("Couldns response",
response.content)
return records | Gets the set of dynamic DNS records associated with this account |
99 | def _repr_html_(self):
out="<table class=>\n"
if not(self.name()[:4]=="Col_"):
out+="<tr>"
out+="<th><b>"+self.name()+"</b></th>"
out+="</tr>"
cropped=False
rowcount=0
colkeywords=self.getkeywords()
for row in self:
out +="\n<tr>"
out += "<td>" + _format_cell(row, colkeywords) + "</td>\n"
out += "</tr>\n"
rowcount+=1
out+="\n"
if rowcount>=20:
cropped=True
break
if out[-2:]=="\n\n":
out=out[:-1]
out+="</table>"
if cropped:
out+="<p style=>("+str(self.nrows()-20)+" more rows)</p>\n"
return out | Give a nice representation of columns in notebooks. |