repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
kejbaly2/metrique | metrique/sqlalchemy.py | https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/sqlalchemy.py#L877-L888 | def ls(self, startswith=None):
'''
List all cubes available to the calling client.
:param startswith: string to use in a simple "startswith" query filter
:returns list: sorted list of cube names
'''
logger.info('Listing cubes starting with "%s")' % startswith)
startswith = unicode(startswith or '')
tables = sorted(name for name in self.db_tables
if name.startswith(startswith))
return tables | [
"def",
"ls",
"(",
"self",
",",
"startswith",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Listing cubes starting with \"%s\")'",
"%",
"startswith",
")",
"startswith",
"=",
"unicode",
"(",
"startswith",
"or",
"''",
")",
"tables",
"=",
"sorted",
"(",
"name",
"for",
"name",
"in",
"self",
".",
"db_tables",
"if",
"name",
".",
"startswith",
"(",
"startswith",
")",
")",
"return",
"tables"
] | List all cubes available to the calling client.
:param startswith: string to use in a simple "startswith" query filter
:returns list: sorted list of cube names | [
"List",
"all",
"cubes",
"available",
"to",
"the",
"calling",
"client",
"."
] | python | train |
gmr/tinman | tinman/handlers/base.py | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/base.py#L145-L170 | def write(self, chunk):
"""Writes the given chunk to the output buffer. Checks for curl in the
user-agent and if set, provides indented output if returning JSON.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
:param mixed chunk: The string or dict to write to the client
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, dict):
options = {'ensure_ascii': False}
if 'curl' in self.request.headers.get('user-agent'):
options['indent'] = 2
options['sort_keys'] = True
chunk = json.dumps(chunk, **options).replace("</", "<\\/") + '\n'
self.set_header("Content-Type", "application/json; charset=UTF-8")
self._write_buffer.append(web.utf8(chunk)) | [
"def",
"write",
"(",
"self",
",",
"chunk",
")",
":",
"if",
"self",
".",
"_finished",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot write() after finish(). May be caused \"",
"\"by using async operations without the \"",
"\"@asynchronous decorator.\"",
")",
"if",
"isinstance",
"(",
"chunk",
",",
"dict",
")",
":",
"options",
"=",
"{",
"'ensure_ascii'",
":",
"False",
"}",
"if",
"'curl'",
"in",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"'user-agent'",
")",
":",
"options",
"[",
"'indent'",
"]",
"=",
"2",
"options",
"[",
"'sort_keys'",
"]",
"=",
"True",
"chunk",
"=",
"json",
".",
"dumps",
"(",
"chunk",
",",
"*",
"*",
"options",
")",
".",
"replace",
"(",
"\"</\"",
",",
"\"<\\\\/\"",
")",
"+",
"'\\n'",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"\"application/json; charset=UTF-8\"",
")",
"self",
".",
"_write_buffer",
".",
"append",
"(",
"web",
".",
"utf8",
"(",
"chunk",
")",
")"
] | Writes the given chunk to the output buffer. Checks for curl in the
user-agent and if set, provides indented output if returning JSON.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
:param mixed chunk: The string or dict to write to the client | [
"Writes",
"the",
"given",
"chunk",
"to",
"the",
"output",
"buffer",
".",
"Checks",
"for",
"curl",
"in",
"the",
"user",
"-",
"agent",
"and",
"if",
"set",
"provides",
"indented",
"output",
"if",
"returning",
"JSON",
"."
] | python | train |
iotile/coretools | iotileemulate/iotile/emulate/virtual/emulated_tile.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/virtual/emulated_tile.py#L359-L374 | async def reset(self):
"""Synchronously reset a tile.
This method must be called from the emulation loop and will
synchronously shut down all background tasks running this tile, clear
it to reset state and then restart the initialization background task.
"""
await self._device.emulator.stop_tasks(self.address)
self._handle_reset()
self._logger.info("Tile at address %d has reset itself.", self.address)
self._logger.info("Starting main task for tile at address %d", self.address)
self._device.emulator.add_task(self.address, self._reset_vector()) | [
"async",
"def",
"reset",
"(",
"self",
")",
":",
"await",
"self",
".",
"_device",
".",
"emulator",
".",
"stop_tasks",
"(",
"self",
".",
"address",
")",
"self",
".",
"_handle_reset",
"(",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Tile at address %d has reset itself.\"",
",",
"self",
".",
"address",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Starting main task for tile at address %d\"",
",",
"self",
".",
"address",
")",
"self",
".",
"_device",
".",
"emulator",
".",
"add_task",
"(",
"self",
".",
"address",
",",
"self",
".",
"_reset_vector",
"(",
")",
")"
] | Synchronously reset a tile.
This method must be called from the emulation loop and will
synchronously shut down all background tasks running this tile, clear
it to reset state and then restart the initialization background task. | [
"Synchronously",
"reset",
"a",
"tile",
"."
] | python | train |
drongo-framework/drongo | drongo/utils/dict2.py | https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/utils/dict2.py#L77-L100 | def to_dict(self, val=UNSET):
"""Creates dict object from dict2 object
Args:
val (:obj:`dict2`): Value to create from
Returns:
Equivalent dict object.
"""
if val is UNSET:
val = self
if isinstance(val, dict2) or isinstance(val, dict):
res = dict()
for k, v in val.items():
res[k] = self.to_dict(v)
return res
elif isinstance(val, list):
res = []
for item in val:
res.append(self.to_dict(item))
return res
else:
return val | [
"def",
"to_dict",
"(",
"self",
",",
"val",
"=",
"UNSET",
")",
":",
"if",
"val",
"is",
"UNSET",
":",
"val",
"=",
"self",
"if",
"isinstance",
"(",
"val",
",",
"dict2",
")",
"or",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"res",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"val",
".",
"items",
"(",
")",
":",
"res",
"[",
"k",
"]",
"=",
"self",
".",
"to_dict",
"(",
"v",
")",
"return",
"res",
"elif",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"res",
"=",
"[",
"]",
"for",
"item",
"in",
"val",
":",
"res",
".",
"append",
"(",
"self",
".",
"to_dict",
"(",
"item",
")",
")",
"return",
"res",
"else",
":",
"return",
"val"
] | Creates dict object from dict2 object
Args:
val (:obj:`dict2`): Value to create from
Returns:
Equivalent dict object. | [
"Creates",
"dict",
"object",
"from",
"dict2",
"object"
] | python | train |
RiotGames/cloud-inquisitor | backend/cloud_inquisitor/plugins/views/config.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/config.py#L77-L95 | def post(self):
"""Create a new config item"""
self.reqparse.add_argument('namespacePrefix', type=str, required=True)
self.reqparse.add_argument('description', type=str, required=True)
self.reqparse.add_argument('key', type=str, required=True)
self.reqparse.add_argument('value', required=True)
self.reqparse.add_argument('type', type=str, required=True)
args = self.reqparse.parse_args()
if not self.dbconfig.namespace_exists(args['namespacePrefix']):
return self.make_response('The namespace doesnt exist', HTTP.NOT_FOUND)
if self.dbconfig.key_exists(args['namespacePrefix'], args['key']):
return self.make_response('This config item already exists', HTTP.CONFLICT)
self.dbconfig.set(args['namespacePrefix'], args['key'], _to_dbc_class(args), description=args['description'])
auditlog(event='configItem.create', actor=session['user'].username, data=args)
return self.make_response('Config item added', HTTP.CREATED) | [
"def",
"post",
"(",
"self",
")",
":",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'namespacePrefix'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'description'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'key'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'value'",
",",
"required",
"=",
"True",
")",
"self",
".",
"reqparse",
".",
"add_argument",
"(",
"'type'",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
")",
"args",
"=",
"self",
".",
"reqparse",
".",
"parse_args",
"(",
")",
"if",
"not",
"self",
".",
"dbconfig",
".",
"namespace_exists",
"(",
"args",
"[",
"'namespacePrefix'",
"]",
")",
":",
"return",
"self",
".",
"make_response",
"(",
"'The namespace doesnt exist'",
",",
"HTTP",
".",
"NOT_FOUND",
")",
"if",
"self",
".",
"dbconfig",
".",
"key_exists",
"(",
"args",
"[",
"'namespacePrefix'",
"]",
",",
"args",
"[",
"'key'",
"]",
")",
":",
"return",
"self",
".",
"make_response",
"(",
"'This config item already exists'",
",",
"HTTP",
".",
"CONFLICT",
")",
"self",
".",
"dbconfig",
".",
"set",
"(",
"args",
"[",
"'namespacePrefix'",
"]",
",",
"args",
"[",
"'key'",
"]",
",",
"_to_dbc_class",
"(",
"args",
")",
",",
"description",
"=",
"args",
"[",
"'description'",
"]",
")",
"auditlog",
"(",
"event",
"=",
"'configItem.create'",
",",
"actor",
"=",
"session",
"[",
"'user'",
"]",
".",
"username",
",",
"data",
"=",
"args",
")",
"return",
"self",
".",
"make_response",
"(",
"'Config item added'",
",",
"HTTP",
".",
"CREATED",
")"
] | Create a new config item | [
"Create",
"a",
"new",
"config",
"item"
] | python | train |
secdev/scapy | scapy/arch/unix.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/unix.py#L149-L181 | def _in6_getifaddr(ifname):
"""
Returns a list of IPv6 addresses configured on the interface ifname.
"""
# Get the output of ifconfig
try:
f = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
except OSError:
log_interactive.warning("Failed to execute ifconfig.")
return []
# Iterate over lines and extract IPv6 addresses
ret = []
for line in f:
if "inet6" in line:
addr = line.rstrip().split(None, 2)[1] # The second element is the IPv6 address # noqa: E501
else:
continue
if '%' in line: # Remove the interface identifier if present
addr = addr.split("%", 1)[0]
# Check if it is a valid IPv6 address
try:
inet_pton(socket.AF_INET6, addr)
except (socket.error, ValueError):
continue
# Get the scope and keep the address
scope = in6_getscope(addr)
ret.append((addr, scope, ifname))
return ret | [
"def",
"_in6_getifaddr",
"(",
"ifname",
")",
":",
"# Get the output of ifconfig",
"try",
":",
"f",
"=",
"os",
".",
"popen",
"(",
"\"%s %s\"",
"%",
"(",
"conf",
".",
"prog",
".",
"ifconfig",
",",
"ifname",
")",
")",
"except",
"OSError",
":",
"log_interactive",
".",
"warning",
"(",
"\"Failed to execute ifconfig.\"",
")",
"return",
"[",
"]",
"# Iterate over lines and extract IPv6 addresses",
"ret",
"=",
"[",
"]",
"for",
"line",
"in",
"f",
":",
"if",
"\"inet6\"",
"in",
"line",
":",
"addr",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"None",
",",
"2",
")",
"[",
"1",
"]",
"# The second element is the IPv6 address # noqa: E501",
"else",
":",
"continue",
"if",
"'%'",
"in",
"line",
":",
"# Remove the interface identifier if present",
"addr",
"=",
"addr",
".",
"split",
"(",
"\"%\"",
",",
"1",
")",
"[",
"0",
"]",
"# Check if it is a valid IPv6 address",
"try",
":",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"addr",
")",
"except",
"(",
"socket",
".",
"error",
",",
"ValueError",
")",
":",
"continue",
"# Get the scope and keep the address",
"scope",
"=",
"in6_getscope",
"(",
"addr",
")",
"ret",
".",
"append",
"(",
"(",
"addr",
",",
"scope",
",",
"ifname",
")",
")",
"return",
"ret"
] | Returns a list of IPv6 addresses configured on the interface ifname. | [
"Returns",
"a",
"list",
"of",
"IPv6",
"addresses",
"configured",
"on",
"the",
"interface",
"ifname",
"."
] | python | train |
PmagPy/PmagPy | programs/conversion_scripts2/iodp_dscr_magic2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/iodp_dscr_magic2.py#L9-L197 | def main(command_line=True, **kwargs):
"""
NAME
iodp_dscr_magic.py
DESCRIPTION
converts ODP LIMS discrete sample format files to magic_measurements format files
SYNTAX
iodp_descr_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-A : don't average replicate measurements
INPUTS
IODP discrete sample .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
csv_file=''
MagRecs,Specs=[],[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
# format variables
meas_file= os.path.join(output_dir_path, meas_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
file_found = False
for fname in filelist: # parse each file
if fname[-3:].lower()=='csv':
file_found = True
print('processing: ',fname)
with open(fname, 'r') as finput:
data = list(finput.readlines())
keys = data[0].replace('\n','').split(',') # splits on underscores
interval_key="Offset (cm)"
demag_key="Demag level (mT)"
offline_demag_key="Treatment Value (mT or °C)"
offline_treatment_type="Treatment type"
run_key="Test No."
if "Inclination background + tray corrected (deg)" in keys: inc_key="Inclination background + tray corrected (deg)"
if "Inclination background & tray corrected (deg)" in keys: inc_key="Inclination background & tray corrected (deg)"
if "Declination background + tray corrected (deg)" in keys: dec_key="Declination background + tray corrected (deg)"
if "Declination background & tray corrected (deg)" in keys: dec_key="Declination background & tray corrected (deg)"
if "Intensity background + tray corrected (A/m)" in keys: int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys: int_key="Intensity background & tray corrected (A/m)"
type="Type"
sect_key="Sect"
half_key="A/W"
# need to add volume_key to LORE format!
if "Sample volume (cm^3)" in keys:volume_key="Sample volume (cm^3)"
if "Sample volume (cc)" in keys:volume_key="Sample volume (cc)"
if "Sample volume (cm³)" in keys:volume_key="Sample volume (cm³)"
for line in data[1:]:
InRec={}
for k in range(len(keys)):InRec[keys[k]]=line.split(',')[k]
inst="IODP-SRM"
MagRec={}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
offsets=InRec[interval_key].split('.') # maintain consistency with er_samples convention of using top interval
if len(offsets)==1:
offset=int(offsets[0])
else:
offset=int(offsets[0])-1
#interval=str(offset+1)# maintain consistency with er_samples convention of using top interval
interval=str(offset)# maintain consistency with er_samples convention of using top interval
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[type]+"-"+InRec[sect_key]+'_'+InRec[half_key]+'_'+interval
if specimen not in Specs:Specs.append(specimen)
MagRec['er_expedition_name']=expedition
MagRec['er_location_name']=location
MagRec['er_site_name']=specimen
MagRec['er_citation_names']=citation
MagRec['er_specimen_name']=specimen
MagRec['er_sample_name']=specimen
MagRec['er_site_name']=specimen
# set up measurement record - default is NRM
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='0' # assume all data are "good"
volume=InRec[volume_key]
MagRec["magic_method_codes"]='LT-NO'
sort_by='treatment_ac_field' # set default to AF demag
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
if sort_by =="treatment_ac_field":
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
else:
MagRec["treatment_ac_field"]=str(treatment_value)# AF demag in treat mT => T
elif offline_treatment_type in list(InRec.keys()) and InRec[offline_treatment_type]!="":
if "Lowrie" in InRec['Comments']:
MagRec['magic_method_codes'] = 'LP-IRM-3D'
treatment_value=float(InRec[offline_demag_key].strip('"'))+273. # convert C => K
MagRec["treatment_temp"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_temp'
elif 'Isothermal' in InRec[offline_treatment_type]:
MagRec['magic_method_codes'] = 'LT-IRM'
treatment_value=float(InRec[offline_demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_dc_field"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_dc_field'
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_key in list(InRec.keys()):
run_number=InRec[run_key]
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_description']='sample orientation: '+InRec['Sample orientation']
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
MagOuts=[]
for spec in Specs:
Speclist=pmag.get_dictitem(MagRecs,'er_specimen_name',spec,'T')
Meassorted=sorted(Speclist, key=lambda x,y=None: int(round(float(x[sort_by])-float(y[sort_by]))) if y!=None else 0)
for rec in Meassorted:
for key in list(rec.keys()): rec[key]=str(rec[key])
MagOuts.append(rec)
Fixed=pmag.measurements_methods(MagOuts,noave)
Out,keys=pmag.fillkeys(Fixed)
if pmag.magic_write(meas_file,Out,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | [
"def",
"main",
"(",
"command_line",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"#",
"# initialize defaults",
"version_num",
"=",
"pmag",
".",
"get_version",
"(",
")",
"meas_file",
"=",
"'magic_measurements.txt'",
"csv_file",
"=",
"''",
"MagRecs",
",",
"Specs",
"=",
"[",
"]",
",",
"[",
"]",
"citation",
"=",
"\"This study\"",
"dir_path",
",",
"demag",
"=",
"'.'",
",",
"'NRM'",
"args",
"=",
"sys",
".",
"argv",
"noave",
"=",
"0",
"# get command line args",
"if",
"command_line",
":",
"if",
"'-WD'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-WD\"",
")",
"dir_path",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-ID'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"'-ID'",
")",
"input_dir_path",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"else",
":",
"input_dir_path",
"=",
"dir_path",
"output_dir_path",
"=",
"dir_path",
"if",
"\"-h\"",
"in",
"args",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"return",
"False",
"if",
"\"-A\"",
"in",
"args",
":",
"noave",
"=",
"1",
"if",
"'-f'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-f\"",
")",
"csv_file",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"'-F'",
"in",
"args",
":",
"ind",
"=",
"args",
".",
"index",
"(",
"\"-F\"",
")",
"meas_file",
"=",
"args",
"[",
"ind",
"+",
"1",
"]",
"if",
"not",
"command_line",
":",
"dir_path",
"=",
"kwargs",
".",
"get",
"(",
"'dir_path'",
",",
"'.'",
")",
"input_dir_path",
"=",
"kwargs",
".",
"get",
"(",
"'input_dir_path'",
",",
"dir_path",
")",
"output_dir_path",
"=",
"dir_path",
"# rename dir_path after input_dir_path is set",
"noave",
"=",
"kwargs",
".",
"get",
"(",
"'noave'",
",",
"0",
")",
"# default (0) is DO average",
"csv_file",
"=",
"kwargs",
".",
"get",
"(",
"'csv_file'",
",",
"''",
")",
"meas_file",
"=",
"kwargs",
".",
"get",
"(",
"'meas_file'",
",",
"'magic_measurements.txt'",
")",
"# format variables",
"meas_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir_path",
",",
"meas_file",
")",
"if",
"csv_file",
"==",
"\"\"",
":",
"filelist",
"=",
"os",
".",
"listdir",
"(",
"input_dir_path",
")",
"# read in list of files to import",
"else",
":",
"csv_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_dir_path",
",",
"csv_file",
")",
"filelist",
"=",
"[",
"csv_file",
"]",
"# parsing the data",
"file_found",
"=",
"False",
"for",
"fname",
"in",
"filelist",
":",
"# parse each file",
"if",
"fname",
"[",
"-",
"3",
":",
"]",
".",
"lower",
"(",
")",
"==",
"'csv'",
":",
"file_found",
"=",
"True",
"print",
"(",
"'processing: '",
",",
"fname",
")",
"with",
"open",
"(",
"fname",
",",
"'r'",
")",
"as",
"finput",
":",
"data",
"=",
"list",
"(",
"finput",
".",
"readlines",
"(",
")",
")",
"keys",
"=",
"data",
"[",
"0",
"]",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
"# splits on underscores",
"interval_key",
"=",
"\"Offset (cm)\"",
"demag_key",
"=",
"\"Demag level (mT)\"",
"offline_demag_key",
"=",
"\"Treatment Value (mT or °C)\"",
"offline_treatment_type",
"=",
"\"Treatment type\"",
"run_key",
"=",
"\"Test No.\"",
"if",
"\"Inclination background + tray corrected (deg)\"",
"in",
"keys",
":",
"inc_key",
"=",
"\"Inclination background + tray corrected (deg)\"",
"if",
"\"Inclination background & tray corrected (deg)\"",
"in",
"keys",
":",
"inc_key",
"=",
"\"Inclination background & tray corrected (deg)\"",
"if",
"\"Declination background + tray corrected (deg)\"",
"in",
"keys",
":",
"dec_key",
"=",
"\"Declination background + tray corrected (deg)\"",
"if",
"\"Declination background & tray corrected (deg)\"",
"in",
"keys",
":",
"dec_key",
"=",
"\"Declination background & tray corrected (deg)\"",
"if",
"\"Intensity background + tray corrected (A/m)\"",
"in",
"keys",
":",
"int_key",
"=",
"\"Intensity background + tray corrected (A/m)\"",
"if",
"\"Intensity background & tray corrected (A/m)\"",
"in",
"keys",
":",
"int_key",
"=",
"\"Intensity background & tray corrected (A/m)\"",
"type",
"=",
"\"Type\"",
"sect_key",
"=",
"\"Sect\"",
"half_key",
"=",
"\"A/W\"",
"# need to add volume_key to LORE format!",
"if",
"\"Sample volume (cm^3)\"",
"in",
"keys",
":",
"volume_key",
"=",
"\"Sample volume (cm^3)\"",
"if",
"\"Sample volume (cc)\"",
"in",
"keys",
":",
"volume_key",
"=",
"\"Sample volume (cc)\"",
"if",
"\"Sample volume (cm³)\"",
"in",
"keys",
":",
"volume_key",
"=",
"\"Sample volume (cm³)\"",
"for",
"line",
"in",
"data",
"[",
"1",
":",
"]",
":",
"InRec",
"=",
"{",
"}",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"keys",
")",
")",
":",
"InRec",
"[",
"keys",
"[",
"k",
"]",
"]",
"=",
"line",
".",
"split",
"(",
"','",
")",
"[",
"k",
"]",
"inst",
"=",
"\"IODP-SRM\"",
"MagRec",
"=",
"{",
"}",
"expedition",
"=",
"InRec",
"[",
"'Exp'",
"]",
"location",
"=",
"InRec",
"[",
"'Site'",
"]",
"+",
"InRec",
"[",
"'Hole'",
"]",
"offsets",
"=",
"InRec",
"[",
"interval_key",
"]",
".",
"split",
"(",
"'.'",
")",
"# maintain consistency with er_samples convention of using top interval",
"if",
"len",
"(",
"offsets",
")",
"==",
"1",
":",
"offset",
"=",
"int",
"(",
"offsets",
"[",
"0",
"]",
")",
"else",
":",
"offset",
"=",
"int",
"(",
"offsets",
"[",
"0",
"]",
")",
"-",
"1",
"#interval=str(offset+1)# maintain consistency with er_samples convention of using top interval",
"interval",
"=",
"str",
"(",
"offset",
")",
"# maintain consistency with er_samples convention of using top interval",
"specimen",
"=",
"expedition",
"+",
"'-'",
"+",
"location",
"+",
"'-'",
"+",
"InRec",
"[",
"'Core'",
"]",
"+",
"InRec",
"[",
"type",
"]",
"+",
"\"-\"",
"+",
"InRec",
"[",
"sect_key",
"]",
"+",
"'_'",
"+",
"InRec",
"[",
"half_key",
"]",
"+",
"'_'",
"+",
"interval",
"if",
"specimen",
"not",
"in",
"Specs",
":",
"Specs",
".",
"append",
"(",
"specimen",
")",
"MagRec",
"[",
"'er_expedition_name'",
"]",
"=",
"expedition",
"MagRec",
"[",
"'er_location_name'",
"]",
"=",
"location",
"MagRec",
"[",
"'er_site_name'",
"]",
"=",
"specimen",
"MagRec",
"[",
"'er_citation_names'",
"]",
"=",
"citation",
"MagRec",
"[",
"'er_specimen_name'",
"]",
"=",
"specimen",
"MagRec",
"[",
"'er_sample_name'",
"]",
"=",
"specimen",
"MagRec",
"[",
"'er_site_name'",
"]",
"=",
"specimen",
"# set up measurement record - default is NRM",
"MagRec",
"[",
"'magic_software_packages'",
"]",
"=",
"version_num",
"MagRec",
"[",
"\"treatment_temp\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"273",
")",
"# room temp in kelvin",
"MagRec",
"[",
"\"measurement_temp\"",
"]",
"=",
"'%8.3e'",
"%",
"(",
"273",
")",
"# room temp in kelvin",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"'0'",
"MagRec",
"[",
"\"treatment_dc_field\"",
"]",
"=",
"'0'",
"MagRec",
"[",
"\"treatment_dc_field_phi\"",
"]",
"=",
"'0'",
"MagRec",
"[",
"\"treatment_dc_field_theta\"",
"]",
"=",
"'0'",
"MagRec",
"[",
"\"measurement_flag\"",
"]",
"=",
"'g'",
"# assume all data are \"good\"",
"MagRec",
"[",
"\"measurement_standard\"",
"]",
"=",
"'u'",
"# assume all data are \"good\"",
"MagRec",
"[",
"\"measurement_csd\"",
"]",
"=",
"'0'",
"# assume all data are \"good\"",
"volume",
"=",
"InRec",
"[",
"volume_key",
"]",
"MagRec",
"[",
"\"magic_method_codes\"",
"]",
"=",
"'LT-NO'",
"sort_by",
"=",
"'treatment_ac_field'",
"# set default to AF demag",
"if",
"InRec",
"[",
"demag_key",
"]",
"!=",
"\"0\"",
":",
"MagRec",
"[",
"'magic_method_codes'",
"]",
"=",
"'LT-AF-Z'",
"inst",
"=",
"inst",
"+",
"':IODP-SRM-AF'",
"# measured on shipboard in-line 2G AF",
"treatment_value",
"=",
"float",
"(",
"InRec",
"[",
"demag_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
")",
"*",
"1e-3",
"# convert mT => T",
"if",
"sort_by",
"==",
"\"treatment_ac_field\"",
":",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"treatment_value",
"# AF demag in treat mT => T",
"else",
":",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"str",
"(",
"treatment_value",
")",
"# AF demag in treat mT => T",
"elif",
"offline_treatment_type",
"in",
"list",
"(",
"InRec",
".",
"keys",
"(",
")",
")",
"and",
"InRec",
"[",
"offline_treatment_type",
"]",
"!=",
"\"\"",
":",
"if",
"\"Lowrie\"",
"in",
"InRec",
"[",
"'Comments'",
"]",
":",
"MagRec",
"[",
"'magic_method_codes'",
"]",
"=",
"'LP-IRM-3D'",
"treatment_value",
"=",
"float",
"(",
"InRec",
"[",
"offline_demag_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
")",
"+",
"273.",
"# convert C => K",
"MagRec",
"[",
"\"treatment_temp\"",
"]",
"=",
"treatment_value",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"\"0\"",
"sort_by",
"=",
"'treatment_temp'",
"elif",
"'Isothermal'",
"in",
"InRec",
"[",
"offline_treatment_type",
"]",
":",
"MagRec",
"[",
"'magic_method_codes'",
"]",
"=",
"'LT-IRM'",
"treatment_value",
"=",
"float",
"(",
"InRec",
"[",
"offline_demag_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
")",
"*",
"1e-3",
"# convert mT => T",
"MagRec",
"[",
"\"treatment_dc_field\"",
"]",
"=",
"treatment_value",
"MagRec",
"[",
"\"treatment_ac_field\"",
"]",
"=",
"\"0\"",
"sort_by",
"=",
"'treatment_dc_field'",
"MagRec",
"[",
"\"measurement_standard\"",
"]",
"=",
"'u'",
"# assume all data are \"good\"",
"vol",
"=",
"float",
"(",
"volume",
")",
"*",
"1e-6",
"# convert from cc to m^3",
"if",
"run_key",
"in",
"list",
"(",
"InRec",
".",
"keys",
"(",
")",
")",
":",
"run_number",
"=",
"InRec",
"[",
"run_key",
"]",
"MagRec",
"[",
"'external_database_ids'",
"]",
"=",
"run_number",
"MagRec",
"[",
"'external_database_names'",
"]",
"=",
"'LIMS'",
"else",
":",
"MagRec",
"[",
"'external_database_ids'",
"]",
"=",
"\"\"",
"MagRec",
"[",
"'external_database_names'",
"]",
"=",
"''",
"MagRec",
"[",
"'measurement_description'",
"]",
"=",
"'sample orientation: '",
"+",
"InRec",
"[",
"'Sample orientation'",
"]",
"MagRec",
"[",
"'measurement_inc'",
"]",
"=",
"InRec",
"[",
"inc_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
"MagRec",
"[",
"'measurement_dec'",
"]",
"=",
"InRec",
"[",
"dec_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
"intens",
"=",
"InRec",
"[",
"int_key",
"]",
".",
"strip",
"(",
"'\"'",
")",
"MagRec",
"[",
"'measurement_magn_moment'",
"]",
"=",
"'%8.3e'",
"%",
"(",
"float",
"(",
"intens",
")",
"*",
"vol",
")",
"# convert intensity from A/m to Am^2 using vol",
"MagRec",
"[",
"'magic_instrument_codes'",
"]",
"=",
"inst",
"MagRec",
"[",
"'measurement_number'",
"]",
"=",
"'1'",
"MagRec",
"[",
"'measurement_positions'",
"]",
"=",
"''",
"MagRecs",
".",
"append",
"(",
"MagRec",
")",
"if",
"not",
"file_found",
":",
"print",
"(",
"\"No .csv files were found\"",
")",
"return",
"False",
",",
"\"No .csv files were found\"",
"MagOuts",
"=",
"[",
"]",
"for",
"spec",
"in",
"Specs",
":",
"Speclist",
"=",
"pmag",
".",
"get_dictitem",
"(",
"MagRecs",
",",
"'er_specimen_name'",
",",
"spec",
",",
"'T'",
")",
"Meassorted",
"=",
"sorted",
"(",
"Speclist",
",",
"key",
"=",
"lambda",
"x",
",",
"y",
"=",
"None",
":",
"int",
"(",
"round",
"(",
"float",
"(",
"x",
"[",
"sort_by",
"]",
")",
"-",
"float",
"(",
"y",
"[",
"sort_by",
"]",
")",
")",
")",
"if",
"y",
"!=",
"None",
"else",
"0",
")",
"for",
"rec",
"in",
"Meassorted",
":",
"for",
"key",
"in",
"list",
"(",
"rec",
".",
"keys",
"(",
")",
")",
":",
"rec",
"[",
"key",
"]",
"=",
"str",
"(",
"rec",
"[",
"key",
"]",
")",
"MagOuts",
".",
"append",
"(",
"rec",
")",
"Fixed",
"=",
"pmag",
".",
"measurements_methods",
"(",
"MagOuts",
",",
"noave",
")",
"Out",
",",
"keys",
"=",
"pmag",
".",
"fillkeys",
"(",
"Fixed",
")",
"if",
"pmag",
".",
"magic_write",
"(",
"meas_file",
",",
"Out",
",",
"'magic_measurements'",
")",
":",
"print",
"(",
"'data stored in '",
",",
"meas_file",
")",
"return",
"True",
",",
"meas_file",
"else",
":",
"print",
"(",
"'no data found. bad magfile?'",
")",
"return",
"False",
",",
"'no data found. bad magfile?'"
] | NAME
iodp_dscr_magic.py
DESCRIPTION
converts ODP LIMS discrete sample format files to magic_measurements format files
SYNTAX
iodp_descr_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-A : don't average replicate measurements
INPUTS
IODP discrete sample .csv file format exported from LIMS database | [
"NAME",
"iodp_dscr_magic",
".",
"py"
] | python | train |
AlecAivazis/graphql-over-kafka | nautilus/api/util/graph_entity.py | https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/api/util/graph_entity.py#L102-L122 | async def _has_id(self, *args, **kwds):
"""
Equality checks are overwitten to perform the actual check in a
semantic way.
"""
# if there is only one positional argument
if len(args) == 1:
# parse the appropriate query
result = await parse_string(
self._query,
self.service.object_resolver,
self.service.connection_resolver,
self.service.mutation_resolver,
obey_auth=False
)
# go to the bottom of the result for the list of matching ids
return self._find_id(result['data'], args[0])
# otherwise
else:
# treat the attribute like a normal filter
return self._has_id(**kwds) | [
"async",
"def",
"_has_id",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"# if there is only one positional argument",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"# parse the appropriate query",
"result",
"=",
"await",
"parse_string",
"(",
"self",
".",
"_query",
",",
"self",
".",
"service",
".",
"object_resolver",
",",
"self",
".",
"service",
".",
"connection_resolver",
",",
"self",
".",
"service",
".",
"mutation_resolver",
",",
"obey_auth",
"=",
"False",
")",
"# go to the bottom of the result for the list of matching ids",
"return",
"self",
".",
"_find_id",
"(",
"result",
"[",
"'data'",
"]",
",",
"args",
"[",
"0",
"]",
")",
"# otherwise",
"else",
":",
"# treat the attribute like a normal filter",
"return",
"self",
".",
"_has_id",
"(",
"*",
"*",
"kwds",
")"
] | Equality checks are overwitten to perform the actual check in a
semantic way. | [
"Equality",
"checks",
"are",
"overwitten",
"to",
"perform",
"the",
"actual",
"check",
"in",
"a",
"semantic",
"way",
"."
] | python | train |
rcbops/flake8-filename | flake8_filename/rules.py | https://github.com/rcbops/flake8-filename/blob/5718d4af394c318d376de7434193543e0da45651/flake8_filename/rules.py#L7-L18 | def _generate_mark_code(rule_name):
"""Generates a two digit string based on a provided string
Args:
rule_name (str): A configured rule name 'pytest_mark3'.
Returns:
str: A two digit code based on the provided string '03'
"""
code = ''.join([i for i in str(rule_name) if i.isdigit()])
code = code.zfill(2)
return code | [
"def",
"_generate_mark_code",
"(",
"rule_name",
")",
":",
"code",
"=",
"''",
".",
"join",
"(",
"[",
"i",
"for",
"i",
"in",
"str",
"(",
"rule_name",
")",
"if",
"i",
".",
"isdigit",
"(",
")",
"]",
")",
"code",
"=",
"code",
".",
"zfill",
"(",
"2",
")",
"return",
"code"
] | Generates a two digit string based on a provided string
Args:
rule_name (str): A configured rule name 'pytest_mark3'.
Returns:
str: A two digit code based on the provided string '03' | [
"Generates",
"a",
"two",
"digit",
"string",
"based",
"on",
"a",
"provided",
"string"
] | python | train |
odlgroup/odl | odl/operator/default_ops.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/default_ops.py#L856-L872 | def derivative(self, point):
"""Derivative of this operator, always zero.
Returns
-------
derivative : `ZeroOperator`
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ConstantOperator(x)
>>> deriv = op.derivative([1, 1, 1])
>>> deriv([2, 2, 2])
rn(3).element([ 0., 0., 0.])
"""
return ZeroOperator(domain=self.domain, range=self.range) | [
"def",
"derivative",
"(",
"self",
",",
"point",
")",
":",
"return",
"ZeroOperator",
"(",
"domain",
"=",
"self",
".",
"domain",
",",
"range",
"=",
"self",
".",
"range",
")"
] | Derivative of this operator, always zero.
Returns
-------
derivative : `ZeroOperator`
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ConstantOperator(x)
>>> deriv = op.derivative([1, 1, 1])
>>> deriv([2, 2, 2])
rn(3).element([ 0., 0., 0.]) | [
"Derivative",
"of",
"this",
"operator",
"always",
"zero",
"."
] | python | train |
jd/tenacity | tenacity/compat.py | https://github.com/jd/tenacity/blob/354c40b7dc8e728c438668100dd020b65c84dfc6/tenacity/compat.py#L120-L136 | def stop_func_accept_retry_state(stop_func):
"""Wrap "stop" function to accept "retry_state" parameter."""
if not six.callable(stop_func):
return stop_func
if func_takes_retry_state(stop_func):
return stop_func
@_utils.wraps(stop_func)
def wrapped_stop_func(retry_state):
warn_about_non_retry_state_deprecation(
'stop', stop_func, stacklevel=4)
return stop_func(
retry_state.attempt_number,
retry_state.seconds_since_start,
)
return wrapped_stop_func | [
"def",
"stop_func_accept_retry_state",
"(",
"stop_func",
")",
":",
"if",
"not",
"six",
".",
"callable",
"(",
"stop_func",
")",
":",
"return",
"stop_func",
"if",
"func_takes_retry_state",
"(",
"stop_func",
")",
":",
"return",
"stop_func",
"@",
"_utils",
".",
"wraps",
"(",
"stop_func",
")",
"def",
"wrapped_stop_func",
"(",
"retry_state",
")",
":",
"warn_about_non_retry_state_deprecation",
"(",
"'stop'",
",",
"stop_func",
",",
"stacklevel",
"=",
"4",
")",
"return",
"stop_func",
"(",
"retry_state",
".",
"attempt_number",
",",
"retry_state",
".",
"seconds_since_start",
",",
")",
"return",
"wrapped_stop_func"
] | Wrap "stop" function to accept "retry_state" parameter. | [
"Wrap",
"stop",
"function",
"to",
"accept",
"retry_state",
"parameter",
"."
] | python | train |
PyHDI/Pyverilog | pyverilog/vparser/parser.py | https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1865-L1868 | def p_instance_port_arg(self, p):
'instance_port_arg : DOT ID LPAREN identifier RPAREN'
p[0] = PortArg(p[2], p[4], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | [
"def",
"p_instance_port_arg",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"PortArg",
"(",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"4",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")",
"p",
".",
"set_lineno",
"(",
"0",
",",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] | instance_port_arg : DOT ID LPAREN identifier RPAREN | [
"instance_port_arg",
":",
"DOT",
"ID",
"LPAREN",
"identifier",
"RPAREN"
] | python | train |
blockstack/blockstack-core | api/search/substring_search.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L79-L105 | def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT):
""" main function to call for searching
"""
matching = []
query_words = query.split(' ')
# sort by longest word (higest probability of not finding a match)
query_words.sort(key=len, reverse=True)
counter = 0
for s in list_of_strings:
target_words = s.split(' ')
# the anyword searching function is separate
if(anyword_substring_search(target_words, query_words)):
matching.append(s)
# limit results
counter += 1
if(counter == limit_results):
break
return matching | [
"def",
"substring_search",
"(",
"query",
",",
"list_of_strings",
",",
"limit_results",
"=",
"DEFAULT_LIMIT",
")",
":",
"matching",
"=",
"[",
"]",
"query_words",
"=",
"query",
".",
"split",
"(",
"' '",
")",
"# sort by longest word (higest probability of not finding a match)",
"query_words",
".",
"sort",
"(",
"key",
"=",
"len",
",",
"reverse",
"=",
"True",
")",
"counter",
"=",
"0",
"for",
"s",
"in",
"list_of_strings",
":",
"target_words",
"=",
"s",
".",
"split",
"(",
"' '",
")",
"# the anyword searching function is separate",
"if",
"(",
"anyword_substring_search",
"(",
"target_words",
",",
"query_words",
")",
")",
":",
"matching",
".",
"append",
"(",
"s",
")",
"# limit results",
"counter",
"+=",
"1",
"if",
"(",
"counter",
"==",
"limit_results",
")",
":",
"break",
"return",
"matching"
] | main function to call for searching | [
"main",
"function",
"to",
"call",
"for",
"searching"
] | python | train |
pandas-dev/pandas | pandas/util/_validators.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L325-L358 | def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method | [
"def",
"validate_fillna_kwargs",
"(",
"value",
",",
"method",
",",
"validate_scalar_dict_value",
"=",
"True",
")",
":",
"from",
"pandas",
".",
"core",
".",
"missing",
"import",
"clean_fill_method",
"if",
"value",
"is",
"None",
"and",
"method",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must specify a fill 'value' or 'method'.\"",
")",
"elif",
"value",
"is",
"None",
"and",
"method",
"is",
"not",
"None",
":",
"method",
"=",
"clean_fill_method",
"(",
"method",
")",
"elif",
"value",
"is",
"not",
"None",
"and",
"method",
"is",
"None",
":",
"if",
"validate_scalar_dict_value",
"and",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"'\"value\" parameter must be a scalar or dict, but '",
"'you passed a \"{0}\"'",
".",
"format",
"(",
"type",
"(",
"value",
")",
".",
"__name__",
")",
")",
"elif",
"value",
"is",
"not",
"None",
"and",
"method",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot specify both 'value' and 'method'.\"",
")",
"return",
"value",
",",
"method"
] | Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object | [
"Validate",
"the",
"keyword",
"arguments",
"to",
"fillna",
"."
] | python | train |
scanny/python-pptx | spec/gen_spec/gen_spec.py | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/spec/gen_spec/gen_spec.py#L205-L209 | def print_mso_auto_shape_type_constants():
"""print symbolic constant definitions for msoAutoShapeType"""
auto_shape_types = MsoAutoShapeTypeCollection.load(sort='const_name')
out = render_mso_auto_shape_type_constants(auto_shape_types)
print out | [
"def",
"print_mso_auto_shape_type_constants",
"(",
")",
":",
"auto_shape_types",
"=",
"MsoAutoShapeTypeCollection",
".",
"load",
"(",
"sort",
"=",
"'const_name'",
")",
"out",
"=",
"render_mso_auto_shape_type_constants",
"(",
"auto_shape_types",
")",
"print",
"out"
] | print symbolic constant definitions for msoAutoShapeType | [
"print",
"symbolic",
"constant",
"definitions",
"for",
"msoAutoShapeType"
] | python | train |
alephdata/memorious | memorious/logic/check.py | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/check.py#L68-L73 | def must_contain(self, value, q, strict=False):
"""if value must contain q"""
if value is not None:
if value.find(q) != -1:
return
self.shout('Value %r does not contain %r', strict, value, q) | [
"def",
"must_contain",
"(",
"self",
",",
"value",
",",
"q",
",",
"strict",
"=",
"False",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"value",
".",
"find",
"(",
"q",
")",
"!=",
"-",
"1",
":",
"return",
"self",
".",
"shout",
"(",
"'Value %r does not contain %r'",
",",
"strict",
",",
"value",
",",
"q",
")"
] | if value must contain q | [
"if",
"value",
"must",
"contain",
"q"
] | python | train |
jobovy/galpy | galpy/potential/CosmphiDiskPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/CosmphiDiskPotential.py#L151-L170 | def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS)
"""
if R < self._rb:
return self._mphio*math.sin(self._m*phi-self._mphib)\
*self._rbp*(2.*self._r1p-self._rbp/R**self._p)
else:
return self._mphio*R**self._p*math.sin(self._m*phi-self._mphib) | [
"def",
"_phiforce",
"(",
"self",
",",
"R",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"if",
"R",
"<",
"self",
".",
"_rb",
":",
"return",
"self",
".",
"_mphio",
"*",
"math",
".",
"sin",
"(",
"self",
".",
"_m",
"*",
"phi",
"-",
"self",
".",
"_mphib",
")",
"*",
"self",
".",
"_rbp",
"*",
"(",
"2.",
"*",
"self",
".",
"_r1p",
"-",
"self",
".",
"_rbp",
"/",
"R",
"**",
"self",
".",
"_p",
")",
"else",
":",
"return",
"self",
".",
"_mphio",
"*",
"R",
"**",
"self",
".",
"_p",
"*",
"math",
".",
"sin",
"(",
"self",
".",
"_m",
"*",
"phi",
"-",
"self",
".",
"_mphib",
")"
] | NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2011-10-19 - Written - Bovy (IAS) | [
"NAME",
":",
"_phiforce",
"PURPOSE",
":",
"evaluate",
"the",
"azimuthal",
"force",
"for",
"this",
"potential",
"INPUT",
":",
"R",
"-",
"Galactocentric",
"cylindrical",
"radius",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"the",
"azimuthal",
"force",
"HISTORY",
":",
"2011",
"-",
"10",
"-",
"19",
"-",
"Written",
"-",
"Bovy",
"(",
"IAS",
")"
] | python | train |
PetrochukM/PyTorch-NLP | torchnlp/metrics/accuracy.py | https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/metrics/accuracy.py#L53-L102 | def get_token_accuracy(targets, outputs, ignore_index=None):
""" Get the accuracy token accuracy between two tensors.
Args:
targets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure
saccuracy
outputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector
ignore_index (int, optional): Specifies a target index that is ignored
Returns:
:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and
total (:class:`int`)
Example:
>>> import torch
>>> from torchnlp.metrics import get_token_accuracy
>>> targets = torch.LongTensor([[1, 1], [2, 2], [3, 3]])
>>> outputs = torch.LongTensor([[1, 1], [2, 3], [4, 4]])
>>> accuracy, n_correct, n_total = get_token_accuracy(targets, outputs, ignore_index=3)
>>> accuracy
0.75
>>> n_correct
3.0
>>> n_total
4.0
"""
n_correct = 0.0
n_total = 0.0
for target, output in zip(targets, outputs):
if not torch.is_tensor(target) or is_scalar(target):
target = torch.LongTensor([target])
if not torch.is_tensor(output) or is_scalar(output):
output = torch.LongTensor([[output]])
if len(target.size()) != len(output.size()):
prediction = output.max(dim=0)[0].view(-1)
else:
prediction = output
if ignore_index is not None:
mask = target.ne(ignore_index)
n_correct += prediction.eq(target).masked_select(mask).sum().item()
n_total += mask.sum().item()
else:
n_total += len(target)
n_correct += prediction.eq(target).sum().item()
return n_correct / n_total, n_correct, n_total | [
"def",
"get_token_accuracy",
"(",
"targets",
",",
"outputs",
",",
"ignore_index",
"=",
"None",
")",
":",
"n_correct",
"=",
"0.0",
"n_total",
"=",
"0.0",
"for",
"target",
",",
"output",
"in",
"zip",
"(",
"targets",
",",
"outputs",
")",
":",
"if",
"not",
"torch",
".",
"is_tensor",
"(",
"target",
")",
"or",
"is_scalar",
"(",
"target",
")",
":",
"target",
"=",
"torch",
".",
"LongTensor",
"(",
"[",
"target",
"]",
")",
"if",
"not",
"torch",
".",
"is_tensor",
"(",
"output",
")",
"or",
"is_scalar",
"(",
"output",
")",
":",
"output",
"=",
"torch",
".",
"LongTensor",
"(",
"[",
"[",
"output",
"]",
"]",
")",
"if",
"len",
"(",
"target",
".",
"size",
"(",
")",
")",
"!=",
"len",
"(",
"output",
".",
"size",
"(",
")",
")",
":",
"prediction",
"=",
"output",
".",
"max",
"(",
"dim",
"=",
"0",
")",
"[",
"0",
"]",
".",
"view",
"(",
"-",
"1",
")",
"else",
":",
"prediction",
"=",
"output",
"if",
"ignore_index",
"is",
"not",
"None",
":",
"mask",
"=",
"target",
".",
"ne",
"(",
"ignore_index",
")",
"n_correct",
"+=",
"prediction",
".",
"eq",
"(",
"target",
")",
".",
"masked_select",
"(",
"mask",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"n_total",
"+=",
"mask",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"else",
":",
"n_total",
"+=",
"len",
"(",
"target",
")",
"n_correct",
"+=",
"prediction",
".",
"eq",
"(",
"target",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"return",
"n_correct",
"/",
"n_total",
",",
"n_correct",
",",
"n_total"
] | Get the accuracy token accuracy between two tensors.
Args:
targets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure
saccuracy
outputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector
ignore_index (int, optional): Specifies a target index that is ignored
Returns:
:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and
total (:class:`int`)
Example:
>>> import torch
>>> from torchnlp.metrics import get_token_accuracy
>>> targets = torch.LongTensor([[1, 1], [2, 2], [3, 3]])
>>> outputs = torch.LongTensor([[1, 1], [2, 3], [4, 4]])
>>> accuracy, n_correct, n_total = get_token_accuracy(targets, outputs, ignore_index=3)
>>> accuracy
0.75
>>> n_correct
3.0
>>> n_total
4.0 | [
"Get",
"the",
"accuracy",
"token",
"accuracy",
"between",
"two",
"tensors",
"."
] | python | train |
cocoakekeyu/cancan | cancan/rule.py | https://github.com/cocoakekeyu/cancan/blob/f198d560e6e008e6c5580ba55581a939a5d544ed/cancan/rule.py#L42-L46 | def is_relevant(self, action, subject):
"""
Matches both the subject and action, not necessarily the conditions.
"""
return self.matches_action(action) and self.matches_subject(subject) | [
"def",
"is_relevant",
"(",
"self",
",",
"action",
",",
"subject",
")",
":",
"return",
"self",
".",
"matches_action",
"(",
"action",
")",
"and",
"self",
".",
"matches_subject",
"(",
"subject",
")"
] | Matches both the subject and action, not necessarily the conditions. | [
"Matches",
"both",
"the",
"subject",
"and",
"action",
"not",
"necessarily",
"the",
"conditions",
"."
] | python | train |
consbio/ncdjango | ncdjango/interfaces/arcgis/views.py | https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/arcgis/views.py#L303-L315 | def _get_form_defaults(self):
"""Returns default values for the identify form"""
return {
'response_format': 'html',
'geometry_type': 'esriGeometryPoint',
'projection': pyproj.Proj(str(self.service.projection)),
'return_geometry': True,
'maximum_allowable_offset': 2,
'geometry_precision': 3,
'return_z': False,
'return_m': False
} | [
"def",
"_get_form_defaults",
"(",
"self",
")",
":",
"return",
"{",
"'response_format'",
":",
"'html'",
",",
"'geometry_type'",
":",
"'esriGeometryPoint'",
",",
"'projection'",
":",
"pyproj",
".",
"Proj",
"(",
"str",
"(",
"self",
".",
"service",
".",
"projection",
")",
")",
",",
"'return_geometry'",
":",
"True",
",",
"'maximum_allowable_offset'",
":",
"2",
",",
"'geometry_precision'",
":",
"3",
",",
"'return_z'",
":",
"False",
",",
"'return_m'",
":",
"False",
"}"
] | Returns default values for the identify form | [
"Returns",
"default",
"values",
"for",
"the",
"identify",
"form"
] | python | train |
Jaymon/prom | prom/cli/dump.py | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/cli/dump.py#L81-L96 | def get_orm_classes(path):
"""this will return prom.Orm classes found in the given path (classpath or modulepath)"""
ret = set()
try:
m = importlib.import_module(path)
except ImportError:
# we have a classpath
m, klass = get_objects(path)
if issubclass(klass, Orm):
ret.add(klass)
else:
ret.update(get_subclasses(m, Orm))
return ret | [
"def",
"get_orm_classes",
"(",
"path",
")",
":",
"ret",
"=",
"set",
"(",
")",
"try",
":",
"m",
"=",
"importlib",
".",
"import_module",
"(",
"path",
")",
"except",
"ImportError",
":",
"# we have a classpath",
"m",
",",
"klass",
"=",
"get_objects",
"(",
"path",
")",
"if",
"issubclass",
"(",
"klass",
",",
"Orm",
")",
":",
"ret",
".",
"add",
"(",
"klass",
")",
"else",
":",
"ret",
".",
"update",
"(",
"get_subclasses",
"(",
"m",
",",
"Orm",
")",
")",
"return",
"ret"
] | this will return prom.Orm classes found in the given path (classpath or modulepath) | [
"this",
"will",
"return",
"prom",
".",
"Orm",
"classes",
"found",
"in",
"the",
"given",
"path",
"(",
"classpath",
"or",
"modulepath",
")"
] | python | train |
amcat/amcatclient | demo_wikinews_scraper.py | https://github.com/amcat/amcatclient/blob/bda525f7ace0c26a09fa56d2baf7550f639e62ee/demo_wikinews_scraper.py#L118-L133 | def scrape_wikinews(conn, project, articleset, query):
"""
Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name
"""
url = "http://en.wikinews.org/w/index.php?search={}&limit=50".format(query)
logging.info(url)
for page in get_pages(url):
urls = get_article_urls(page)
arts = list(get_articles(urls))
logging.info("Adding {} articles to set {}:{}"
.format(len(arts), project, articleset))
conn.create_articles(project=project, articleset=articleset,
json_data=arts) | [
"def",
"scrape_wikinews",
"(",
"conn",
",",
"project",
",",
"articleset",
",",
"query",
")",
":",
"url",
"=",
"\"http://en.wikinews.org/w/index.php?search={}&limit=50\"",
".",
"format",
"(",
"query",
")",
"logging",
".",
"info",
"(",
"url",
")",
"for",
"page",
"in",
"get_pages",
"(",
"url",
")",
":",
"urls",
"=",
"get_article_urls",
"(",
"page",
")",
"arts",
"=",
"list",
"(",
"get_articles",
"(",
"urls",
")",
")",
"logging",
".",
"info",
"(",
"\"Adding {} articles to set {}:{}\"",
".",
"format",
"(",
"len",
"(",
"arts",
")",
",",
"project",
",",
"articleset",
")",
")",
"conn",
".",
"create_articles",
"(",
"project",
"=",
"project",
",",
"articleset",
"=",
"articleset",
",",
"json_data",
"=",
"arts",
")"
] | Scrape wikinews articles from the given query
@param conn: The AmcatAPI object
@param articleset: The target articleset ID
@param category: The wikinews category name | [
"Scrape",
"wikinews",
"articles",
"from",
"the",
"given",
"query"
] | python | train |
wummel/linkchecker | linkcheck/containers.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/containers.py#L106-L113 | def get_true (self, key, default):
"""Return default element if key is not in the dict, or if self[key]
evaluates to False. Useful for example if value is None, but
default value should be an empty string.
"""
if key not in self or not self[key]:
return default
return self[key] | [
"def",
"get_true",
"(",
"self",
",",
"key",
",",
"default",
")",
":",
"if",
"key",
"not",
"in",
"self",
"or",
"not",
"self",
"[",
"key",
"]",
":",
"return",
"default",
"return",
"self",
"[",
"key",
"]"
] | Return default element if key is not in the dict, or if self[key]
evaluates to False. Useful for example if value is None, but
default value should be an empty string. | [
"Return",
"default",
"element",
"if",
"key",
"is",
"not",
"in",
"the",
"dict",
"or",
"if",
"self",
"[",
"key",
"]",
"evaluates",
"to",
"False",
".",
"Useful",
"for",
"example",
"if",
"value",
"is",
"None",
"but",
"default",
"value",
"should",
"be",
"an",
"empty",
"string",
"."
] | python | train |
pyparsing/pyparsing | examples/pymicko.py | https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L992-L999 | def function_body_action(self, text, loc, fun):
"""Code executed after recognising the beginning of function's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("FUN_BODY:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.function_body() | [
"def",
"function_body_action",
"(",
"self",
",",
"text",
",",
"loc",
",",
"fun",
")",
":",
"exshared",
".",
"setpos",
"(",
"loc",
",",
"text",
")",
"if",
"DEBUG",
">",
"0",
":",
"print",
"(",
"\"FUN_BODY:\"",
",",
"fun",
")",
"if",
"DEBUG",
"==",
"2",
":",
"self",
".",
"symtab",
".",
"display",
"(",
")",
"if",
"DEBUG",
">",
"2",
":",
"return",
"self",
".",
"codegen",
".",
"function_body",
"(",
")"
] | Code executed after recognising the beginning of function's body | [
"Code",
"executed",
"after",
"recognising",
"the",
"beginning",
"of",
"function",
"s",
"body"
] | python | train |
hayd/ctox | ctox/subst.py | https://github.com/hayd/ctox/blob/6f032488ad67170d57d025a830d7b967075b0d7f/ctox/subst.py#L33-L44 | def expand_curlys(s):
"""Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"]
"""
from functools import reduce
curleys = list(re.finditer(r"{[^{}]*}", s))
return reduce(_replace_curly, reversed(curleys), [s]) | [
"def",
"expand_curlys",
"(",
"s",
")",
":",
"from",
"functools",
"import",
"reduce",
"curleys",
"=",
"list",
"(",
"re",
".",
"finditer",
"(",
"r\"{[^{}]*}\"",
",",
"s",
")",
")",
"return",
"reduce",
"(",
"_replace_curly",
",",
"reversed",
"(",
"curleys",
")",
",",
"[",
"s",
"]",
")"
] | Takes string and returns list of options:
Example
-------
>>> expand_curlys("py{26, 27}")
["py26", "py27"] | [
"Takes",
"string",
"and",
"returns",
"list",
"of",
"options",
":"
] | python | train |
spyder-ide/spyder | spyder/plugins/projects/plugin.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/plugin.py#L405-L410 | def get_active_project_path(self):
"""Get path of the active project"""
active_project_path = None
if self.current_active_project:
active_project_path = self.current_active_project.root_path
return active_project_path | [
"def",
"get_active_project_path",
"(",
"self",
")",
":",
"active_project_path",
"=",
"None",
"if",
"self",
".",
"current_active_project",
":",
"active_project_path",
"=",
"self",
".",
"current_active_project",
".",
"root_path",
"return",
"active_project_path"
] | Get path of the active project | [
"Get",
"path",
"of",
"the",
"active",
"project"
] | python | train |
nerdvegas/rez | src/rez/solver.py | https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L2275-L2284 | def _short_req_str(package_request):
"""print shortened version of '==X|==Y|==Z' ranged requests."""
if not package_request.conflict:
versions = package_request.range.to_versions()
if versions and len(versions) == len(package_request.range) \
and len(versions) > 1:
return "%s-%s(%d)" % (package_request.name,
str(package_request.range.span()),
len(versions))
return str(package_request) | [
"def",
"_short_req_str",
"(",
"package_request",
")",
":",
"if",
"not",
"package_request",
".",
"conflict",
":",
"versions",
"=",
"package_request",
".",
"range",
".",
"to_versions",
"(",
")",
"if",
"versions",
"and",
"len",
"(",
"versions",
")",
"==",
"len",
"(",
"package_request",
".",
"range",
")",
"and",
"len",
"(",
"versions",
")",
">",
"1",
":",
"return",
"\"%s-%s(%d)\"",
"%",
"(",
"package_request",
".",
"name",
",",
"str",
"(",
"package_request",
".",
"range",
".",
"span",
"(",
")",
")",
",",
"len",
"(",
"versions",
")",
")",
"return",
"str",
"(",
"package_request",
")"
] | print shortened version of '==X|==Y|==Z' ranged requests. | [
"print",
"shortened",
"version",
"of",
"==",
"X|",
"==",
"Y|",
"==",
"Z",
"ranged",
"requests",
"."
] | python | train |
wickman/pystachio | pystachio/typing.py | https://github.com/wickman/pystachio/blob/601a2c36d7d67efa8f917e7cbf0ab8dc66c7827f/pystachio/typing.py#L77-L86 | def new(type_dict, type_factory, *type_parameters):
"""
Create a fully reified type from a type schema.
"""
type_tuple = (type_factory,) + type_parameters
if type_tuple not in type_dict:
factory = TypeFactory.get_factory(type_factory)
reified_type = factory.create(type_dict, *type_parameters)
type_dict[type_tuple] = reified_type
return type_dict[type_tuple] | [
"def",
"new",
"(",
"type_dict",
",",
"type_factory",
",",
"*",
"type_parameters",
")",
":",
"type_tuple",
"=",
"(",
"type_factory",
",",
")",
"+",
"type_parameters",
"if",
"type_tuple",
"not",
"in",
"type_dict",
":",
"factory",
"=",
"TypeFactory",
".",
"get_factory",
"(",
"type_factory",
")",
"reified_type",
"=",
"factory",
".",
"create",
"(",
"type_dict",
",",
"*",
"type_parameters",
")",
"type_dict",
"[",
"type_tuple",
"]",
"=",
"reified_type",
"return",
"type_dict",
"[",
"type_tuple",
"]"
] | Create a fully reified type from a type schema. | [
"Create",
"a",
"fully",
"reified",
"type",
"from",
"a",
"type",
"schema",
"."
] | python | train |
django-ses/django-ses | django_ses/views.py | https://github.com/django-ses/django-ses/blob/2f0fd8e3fdc76d3512982c0bb8e2f6e93e09fa3c/django_ses/views.py#L73-L82 | def emails_parse(emails_dict):
"""
Parse the output of ``SESConnection.list_verified_emails()`` and get
a list of emails.
"""
result = emails_dict['ListVerifiedEmailAddressesResponse'][
'ListVerifiedEmailAddressesResult']
emails = [email for email in result['VerifiedEmailAddresses']]
return sorted(emails) | [
"def",
"emails_parse",
"(",
"emails_dict",
")",
":",
"result",
"=",
"emails_dict",
"[",
"'ListVerifiedEmailAddressesResponse'",
"]",
"[",
"'ListVerifiedEmailAddressesResult'",
"]",
"emails",
"=",
"[",
"email",
"for",
"email",
"in",
"result",
"[",
"'VerifiedEmailAddresses'",
"]",
"]",
"return",
"sorted",
"(",
"emails",
")"
] | Parse the output of ``SESConnection.list_verified_emails()`` and get
a list of emails. | [
"Parse",
"the",
"output",
"of",
"SESConnection",
".",
"list_verified_emails",
"()",
"and",
"get",
"a",
"list",
"of",
"emails",
"."
] | python | train |
cathalgarvey/deadlock | deadlock/passwords/zxcvbn/scoring.py | https://github.com/cathalgarvey/deadlock/blob/30099b476ff767611ce617150a0c574fc03fdf79/deadlock/passwords/zxcvbn/scoring.py#L7-L21 | def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
# http://blog.plover.com/math/choose.html
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result | [
"def",
"binom",
"(",
"n",
",",
"k",
")",
":",
"# http://blog.plover.com/math/choose.html",
"if",
"k",
">",
"n",
":",
"return",
"0",
"if",
"k",
"==",
"0",
":",
"return",
"1",
"result",
"=",
"1",
"for",
"denom",
"in",
"range",
"(",
"1",
",",
"k",
"+",
"1",
")",
":",
"result",
"*=",
"n",
"result",
"/=",
"denom",
"n",
"-=",
"1",
"return",
"result"
] | Returns binomial coefficient (n choose k). | [
"Returns",
"binomial",
"coefficient",
"(",
"n",
"choose",
"k",
")",
"."
] | python | train |
GaryLee/cmdlet | cmdlet/cmds.py | https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L686-L688 | def join(prev, sep, *args, **kw):
'''alias of str.join'''
yield sep.join(prev, *args, **kw) | [
"def",
"join",
"(",
"prev",
",",
"sep",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"yield",
"sep",
".",
"join",
"(",
"prev",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | alias of str.join | [
"alias",
"of",
"str",
".",
"join"
] | python | valid |
matrix-org/matrix-python-sdk | matrix_client/api.py | https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L287-L307 | def send_message_event(self, room_id, event_type, content, txn_id=None,
timestamp=None):
"""Perform PUT /rooms/$room_id/send/$event_type
Args:
room_id (str): The room ID to send the message event in.
event_type (str): The event type to send.
content (dict): The JSON content to send.
txn_id (int): Optional. The transaction ID to use.
timestamp (int): Set origin_server_ts (For application services only)
"""
if not txn_id:
txn_id = self._make_txn_id()
path = "/rooms/%s/send/%s/%s" % (
quote(room_id), quote(event_type), quote(str(txn_id)),
)
params = {}
if timestamp:
params["ts"] = timestamp
return self._send("PUT", path, content, query_params=params) | [
"def",
"send_message_event",
"(",
"self",
",",
"room_id",
",",
"event_type",
",",
"content",
",",
"txn_id",
"=",
"None",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"not",
"txn_id",
":",
"txn_id",
"=",
"self",
".",
"_make_txn_id",
"(",
")",
"path",
"=",
"\"/rooms/%s/send/%s/%s\"",
"%",
"(",
"quote",
"(",
"room_id",
")",
",",
"quote",
"(",
"event_type",
")",
",",
"quote",
"(",
"str",
"(",
"txn_id",
")",
")",
",",
")",
"params",
"=",
"{",
"}",
"if",
"timestamp",
":",
"params",
"[",
"\"ts\"",
"]",
"=",
"timestamp",
"return",
"self",
".",
"_send",
"(",
"\"PUT\"",
",",
"path",
",",
"content",
",",
"query_params",
"=",
"params",
")"
] | Perform PUT /rooms/$room_id/send/$event_type
Args:
room_id (str): The room ID to send the message event in.
event_type (str): The event type to send.
content (dict): The JSON content to send.
txn_id (int): Optional. The transaction ID to use.
timestamp (int): Set origin_server_ts (For application services only) | [
"Perform",
"PUT",
"/",
"rooms",
"/",
"$room_id",
"/",
"send",
"/",
"$event_type"
] | python | train |
PmagPy/PmagPy | programs/chartmaker.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/chartmaker.py#L9-L33 | def main():
"""
Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt
"""
print(main.__doc__)
if '-h' in sys.argv:sys.exit()
cont,Int,Top=1,[],[]
while cont==1:
try:
Int.append(int(input(" Enter desired treatment step interval: <return> to quit ")))
Top.append(int(input(" Enter upper bound for this interval: ")))
except:
cont=0
pmag.chart_maker(Int,Top) | [
"def",
"main",
"(",
")",
":",
"print",
"(",
"main",
".",
"__doc__",
")",
"if",
"'-h'",
"in",
"sys",
".",
"argv",
":",
"sys",
".",
"exit",
"(",
")",
"cont",
",",
"Int",
",",
"Top",
"=",
"1",
",",
"[",
"]",
",",
"[",
"]",
"while",
"cont",
"==",
"1",
":",
"try",
":",
"Int",
".",
"append",
"(",
"int",
"(",
"input",
"(",
"\" Enter desired treatment step interval: <return> to quit \"",
")",
")",
")",
"Top",
".",
"append",
"(",
"int",
"(",
"input",
"(",
"\" Enter upper bound for this interval: \"",
")",
")",
")",
"except",
":",
"cont",
"=",
"0",
"pmag",
".",
"chart_maker",
"(",
"Int",
",",
"Top",
")"
] | Welcome to the thellier-thellier experiment automatic chart maker.
Please select desired step interval and upper bound for which it is valid.
e.g.,
50
500
10
600
a blank entry signals the end of data entry.
which would generate steps with 50 degree intervals up to 500, followed by 10 degree intervals up to 600.
chart is stored in: chart.txt | [
"Welcome",
"to",
"the",
"thellier",
"-",
"thellier",
"experiment",
"automatic",
"chart",
"maker",
".",
"Please",
"select",
"desired",
"step",
"interval",
"and",
"upper",
"bound",
"for",
"which",
"it",
"is",
"valid",
".",
"e",
".",
"g",
".",
"50",
"500",
"10",
"600",
"a",
"blank",
"entry",
"signals",
"the",
"end",
"of",
"data",
"entry",
".",
"which",
"would",
"generate",
"steps",
"with",
"50",
"degree",
"intervals",
"up",
"to",
"500",
"followed",
"by",
"10",
"degree",
"intervals",
"up",
"to",
"600",
".",
"chart",
"is",
"stored",
"in",
":",
"chart",
".",
"txt"
] | python | train |
zsethna/OLGA | olga/generation_probability.py | https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/generation_probability.py#L472-L601 | def list_seqs_from_regex(self, regex_seq, print_warnings = True, raise_overload_warning = True):
"""List sequences that match regular expression template.
This function parses a limited regular expression vocabulary, and
lists all the sequences consistent with the regular expression. Supported
regex syntax: [] and {}. Cannot have two {} in a row. Note we can't use
Kline star (*) as this is the symbol for a stop codon --- use {}.
Parameters
----------
regex_seq : str
The regular expression string that represents the sequences to be
listed.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
CDR3_seqs : list
A list of CDR3 sequences that correspond to the regex_seq
Examples
--------
>>> generation_probability.list_seqs_from_regex('CASS[AGR]SARPEQFF')
['CASSGSARPEQFF', 'CASSRSARPEQFF', 'CASSASARPEQFF']
>>> generation_probability.list_seqs_from_regex('CASSAX{0,5}SARPEQFF')
['CASSASARPEQFF',
'CASSAXXXXSARPEQFF',
'CASSAXXSARPEQFF',
'CASSAXXXXXSARPEQFF',
'CASSAXXXSARPEQFF',
'CASSAXSARPEQFF']
"""
aa_symbols = ''.join(self.codons_dict)
default_max_reps = 40
#Check to make sure that expression is of the right form/symbols
#Identify bracket expressions
bracket_ex = [x for x in re.findall('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)]
split_seq = re.split('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)
#Check that all remaining characters are in the codon dict
for aa in ''.join(split_seq):
if aa not in aa_symbols:
if print_warnings:
print 'Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary or the regex syntax'
return []
regex_list = [split_seq[i/2] if i%2 == 0 else bracket_ex[i/2] for i in range(len(bracket_ex) + len(split_seq)) if not (i%2 == 0 and len(split_seq[i/2]) == 0)]
max_num_seqs = 1
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if ex[0] == '[': #bracket expression
#check characters
for aa in ex.strip('[]'):
if aa not in aa_symbols:
if print_warnings:
print 'Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary'
return []
max_num_seqs *= len(ex) - 2
elif ex[0] == '{': #curly bracket
if i == 0:
if print_warnings:
print "Can't have {} expression at start of sequence"
return []
elif isinstance(regex_list[i-1], list):
if print_warnings:
print "Two {} expressions in a row is not supported"
return []
elif regex_list[i-1][0] == '[':
syms = regex_list[i-1].strip('[]')
regex_list[i-1] = ''
else:
syms = regex_list[i-1][-1]
regex_list[i-1] = regex_list[i-1][:-1]
if ',' not in ex:
new_expression = [int(ex.strip('{}')), int(ex.strip('{}')), syms]
max_num_seqs *= len(syms)**new_expression[0]
else:
try:
new_expression = [int(ex.strip('{}').split(',')[0]), int(ex.strip('{}').split(',')[1]), syms]
except ValueError: #No max limit --- use default
new_expression = [int(ex.strip('{}').split(',')[0]), default_max_reps, syms]
if new_expression[0] > new_expression[1]:
if print_warnings:
print 'Check regex syntax --- should be {min,max}'
return []
max_num_seqs *= sum([len(syms)**n for n in range(new_expression[0], new_expression[1]+1)])/len(syms)
#print new_expression
regex_list[i] = new_expression
if max_num_seqs > 10000 and raise_overload_warning:
if print_warnings:
answer = raw_input('Warning large number of sequences (estimated ' + str(max_num_seqs) + ' seqs) match the regular expression. Possible memory and time issues. Continue? (y/n)')
if not answer == 'y':
print 'Canceling...'
return []
else:
return []
#print regex_list
CDR3_seqs = ['']
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if isinstance(ex, list): #curly bracket case
c_seqs = ['']
f_seqs = []
for j in range(ex[1] + 1):
if j in range(ex[0], ex[1]+1):
f_seqs += c_seqs
c_seqs = [aa + c_seq for aa in ex[2] for c_seq in c_seqs]
CDR3_seqs = [f_seq + CDR3_seq for f_seq in f_seqs for CDR3_seq in CDR3_seqs]
elif len(ex) == 0:
pass
elif ex[0] == '[': #square bracket case
CDR3_seqs = [aa + CDR3_seq for aa in ex.strip('[]') for CDR3_seq in CDR3_seqs]
else:
CDR3_seqs = [ex + CDR3_seq for CDR3_seq in CDR3_seqs]
return list(set(CDR3_seqs)) | [
"def",
"list_seqs_from_regex",
"(",
"self",
",",
"regex_seq",
",",
"print_warnings",
"=",
"True",
",",
"raise_overload_warning",
"=",
"True",
")",
":",
"aa_symbols",
"=",
"''",
".",
"join",
"(",
"self",
".",
"codons_dict",
")",
"default_max_reps",
"=",
"40",
"#Check to make sure that expression is of the right form/symbols",
"#Identify bracket expressions",
"bracket_ex",
"=",
"[",
"x",
"for",
"x",
"in",
"re",
".",
"findall",
"(",
"'\\[['",
"+",
"aa_symbols",
"+",
"']*?\\]|\\{\\d+,{0,1}\\d*\\}'",
",",
"regex_seq",
")",
"]",
"split_seq",
"=",
"re",
".",
"split",
"(",
"'\\[['",
"+",
"aa_symbols",
"+",
"']*?\\]|\\{\\d+,{0,1}\\d*\\}'",
",",
"regex_seq",
")",
"#Check that all remaining characters are in the codon dict",
"for",
"aa",
"in",
"''",
".",
"join",
"(",
"split_seq",
")",
":",
"if",
"aa",
"not",
"in",
"aa_symbols",
":",
"if",
"print_warnings",
":",
"print",
"'Unfamiliar symbol representing a codon:'",
"+",
"aa",
"+",
"' --- check codon dictionary or the regex syntax'",
"return",
"[",
"]",
"regex_list",
"=",
"[",
"split_seq",
"[",
"i",
"/",
"2",
"]",
"if",
"i",
"%",
"2",
"==",
"0",
"else",
"bracket_ex",
"[",
"i",
"/",
"2",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"bracket_ex",
")",
"+",
"len",
"(",
"split_seq",
")",
")",
"if",
"not",
"(",
"i",
"%",
"2",
"==",
"0",
"and",
"len",
"(",
"split_seq",
"[",
"i",
"/",
"2",
"]",
")",
"==",
"0",
")",
"]",
"max_num_seqs",
"=",
"1",
"for",
"l",
",",
"ex",
"in",
"enumerate",
"(",
"regex_list",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"i",
"=",
"len",
"(",
"regex_list",
")",
"-",
"l",
"-",
"1",
"if",
"ex",
"[",
"0",
"]",
"==",
"'['",
":",
"#bracket expression",
"#check characters",
"for",
"aa",
"in",
"ex",
".",
"strip",
"(",
"'[]'",
")",
":",
"if",
"aa",
"not",
"in",
"aa_symbols",
":",
"if",
"print_warnings",
":",
"print",
"'Unfamiliar symbol representing a codon:'",
"+",
"aa",
"+",
"' --- check codon dictionary'",
"return",
"[",
"]",
"max_num_seqs",
"*=",
"len",
"(",
"ex",
")",
"-",
"2",
"elif",
"ex",
"[",
"0",
"]",
"==",
"'{'",
":",
"#curly bracket",
"if",
"i",
"==",
"0",
":",
"if",
"print_warnings",
":",
"print",
"\"Can't have {} expression at start of sequence\"",
"return",
"[",
"]",
"elif",
"isinstance",
"(",
"regex_list",
"[",
"i",
"-",
"1",
"]",
",",
"list",
")",
":",
"if",
"print_warnings",
":",
"print",
"\"Two {} expressions in a row is not supported\"",
"return",
"[",
"]",
"elif",
"regex_list",
"[",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'['",
":",
"syms",
"=",
"regex_list",
"[",
"i",
"-",
"1",
"]",
".",
"strip",
"(",
"'[]'",
")",
"regex_list",
"[",
"i",
"-",
"1",
"]",
"=",
"''",
"else",
":",
"syms",
"=",
"regex_list",
"[",
"i",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"regex_list",
"[",
"i",
"-",
"1",
"]",
"=",
"regex_list",
"[",
"i",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"if",
"','",
"not",
"in",
"ex",
":",
"new_expression",
"=",
"[",
"int",
"(",
"ex",
".",
"strip",
"(",
"'{}'",
")",
")",
",",
"int",
"(",
"ex",
".",
"strip",
"(",
"'{}'",
")",
")",
",",
"syms",
"]",
"max_num_seqs",
"*=",
"len",
"(",
"syms",
")",
"**",
"new_expression",
"[",
"0",
"]",
"else",
":",
"try",
":",
"new_expression",
"=",
"[",
"int",
"(",
"ex",
".",
"strip",
"(",
"'{}'",
")",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
",",
"int",
"(",
"ex",
".",
"strip",
"(",
"'{}'",
")",
".",
"split",
"(",
"','",
")",
"[",
"1",
"]",
")",
",",
"syms",
"]",
"except",
"ValueError",
":",
"#No max limit --- use default",
"new_expression",
"=",
"[",
"int",
"(",
"ex",
".",
"strip",
"(",
"'{}'",
")",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
")",
",",
"default_max_reps",
",",
"syms",
"]",
"if",
"new_expression",
"[",
"0",
"]",
">",
"new_expression",
"[",
"1",
"]",
":",
"if",
"print_warnings",
":",
"print",
"'Check regex syntax --- should be {min,max}'",
"return",
"[",
"]",
"max_num_seqs",
"*=",
"sum",
"(",
"[",
"len",
"(",
"syms",
")",
"**",
"n",
"for",
"n",
"in",
"range",
"(",
"new_expression",
"[",
"0",
"]",
",",
"new_expression",
"[",
"1",
"]",
"+",
"1",
")",
"]",
")",
"/",
"len",
"(",
"syms",
")",
"#print new_expression",
"regex_list",
"[",
"i",
"]",
"=",
"new_expression",
"if",
"max_num_seqs",
">",
"10000",
"and",
"raise_overload_warning",
":",
"if",
"print_warnings",
":",
"answer",
"=",
"raw_input",
"(",
"'Warning large number of sequences (estimated '",
"+",
"str",
"(",
"max_num_seqs",
")",
"+",
"' seqs) match the regular expression. Possible memory and time issues. Continue? (y/n)'",
")",
"if",
"not",
"answer",
"==",
"'y'",
":",
"print",
"'Canceling...'",
"return",
"[",
"]",
"else",
":",
"return",
"[",
"]",
"#print regex_list",
"CDR3_seqs",
"=",
"[",
"''",
"]",
"for",
"l",
",",
"ex",
"in",
"enumerate",
"(",
"regex_list",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"i",
"=",
"len",
"(",
"regex_list",
")",
"-",
"l",
"-",
"1",
"if",
"isinstance",
"(",
"ex",
",",
"list",
")",
":",
"#curly bracket case",
"c_seqs",
"=",
"[",
"''",
"]",
"f_seqs",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"ex",
"[",
"1",
"]",
"+",
"1",
")",
":",
"if",
"j",
"in",
"range",
"(",
"ex",
"[",
"0",
"]",
",",
"ex",
"[",
"1",
"]",
"+",
"1",
")",
":",
"f_seqs",
"+=",
"c_seqs",
"c_seqs",
"=",
"[",
"aa",
"+",
"c_seq",
"for",
"aa",
"in",
"ex",
"[",
"2",
"]",
"for",
"c_seq",
"in",
"c_seqs",
"]",
"CDR3_seqs",
"=",
"[",
"f_seq",
"+",
"CDR3_seq",
"for",
"f_seq",
"in",
"f_seqs",
"for",
"CDR3_seq",
"in",
"CDR3_seqs",
"]",
"elif",
"len",
"(",
"ex",
")",
"==",
"0",
":",
"pass",
"elif",
"ex",
"[",
"0",
"]",
"==",
"'['",
":",
"#square bracket case",
"CDR3_seqs",
"=",
"[",
"aa",
"+",
"CDR3_seq",
"for",
"aa",
"in",
"ex",
".",
"strip",
"(",
"'[]'",
")",
"for",
"CDR3_seq",
"in",
"CDR3_seqs",
"]",
"else",
":",
"CDR3_seqs",
"=",
"[",
"ex",
"+",
"CDR3_seq",
"for",
"CDR3_seq",
"in",
"CDR3_seqs",
"]",
"return",
"list",
"(",
"set",
"(",
"CDR3_seqs",
")",
")"
] | List sequences that match regular expression template.
This function parses a limited regular expression vocabulary, and
lists all the sequences consistent with the regular expression. Supported
regex syntax: [] and {}. Cannot have two {} in a row. Note we can't use
Kline star (*) as this is the symbol for a stop codon --- use {}.
Parameters
----------
regex_seq : str
The regular expression string that represents the sequences to be
listed.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
CDR3_seqs : list
A list of CDR3 sequences that correspond to the regex_seq
Examples
--------
>>> generation_probability.list_seqs_from_regex('CASS[AGR]SARPEQFF')
['CASSGSARPEQFF', 'CASSRSARPEQFF', 'CASSASARPEQFF']
>>> generation_probability.list_seqs_from_regex('CASSAX{0,5}SARPEQFF')
['CASSASARPEQFF',
'CASSAXXXXSARPEQFF',
'CASSAXXSARPEQFF',
'CASSAXXXXXSARPEQFF',
'CASSAXXXSARPEQFF',
'CASSAXSARPEQFF'] | [
"List",
"sequences",
"that",
"match",
"regular",
"expression",
"template",
".",
"This",
"function",
"parses",
"a",
"limited",
"regular",
"expression",
"vocabulary",
"and",
"lists",
"all",
"the",
"sequences",
"consistent",
"with",
"the",
"regular",
"expression",
".",
"Supported",
"regex",
"syntax",
":",
"[]",
"and",
"{}",
".",
"Cannot",
"have",
"two",
"{}",
"in",
"a",
"row",
".",
"Note",
"we",
"can",
"t",
"use",
"Kline",
"star",
"(",
"*",
")",
"as",
"this",
"is",
"the",
"symbol",
"for",
"a",
"stop",
"codon",
"---",
"use",
"{}",
".",
"Parameters",
"----------",
"regex_seq",
":",
"str",
"The",
"regular",
"expression",
"string",
"that",
"represents",
"the",
"sequences",
"to",
"be",
"listed",
".",
"print_warnings",
":",
"bool",
"Determines",
"whether",
"warnings",
"are",
"printed",
"or",
"not",
".",
"Default",
"ON",
".",
"raise_overload_warning",
":",
"bool",
"A",
"flag",
"to",
"warn",
"of",
"more",
"than",
"10000",
"seqs",
"corresponding",
"to",
"the",
"regex_seq",
"Returns",
"-------",
"CDR3_seqs",
":",
"list",
"A",
"list",
"of",
"CDR3",
"sequences",
"that",
"correspond",
"to",
"the",
"regex_seq",
"Examples",
"--------",
">>>",
"generation_probability",
".",
"list_seqs_from_regex",
"(",
"CASS",
"[",
"AGR",
"]",
"SARPEQFF",
")",
"[",
"CASSGSARPEQFF",
"CASSRSARPEQFF",
"CASSASARPEQFF",
"]",
">>>",
"generation_probability",
".",
"list_seqs_from_regex",
"(",
"CASSAX",
"{",
"0",
"5",
"}",
"SARPEQFF",
")",
"[",
"CASSASARPEQFF",
"CASSAXXXXSARPEQFF",
"CASSAXXSARPEQFF",
"CASSAXXXXXSARPEQFF",
"CASSAXXXSARPEQFF",
"CASSAXSARPEQFF",
"]"
] | python | train |
jtwhite79/pyemu | pyemu/sc.py | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/sc.py#L501-L532 | def get_par_group_contribution(self, include_prior_results=False):
"""get the forecast uncertainty contribution from each parameter
group. Just some sugar for get_contribution_dataframe() - this method
automatically constructs the parlist_dict argument where the keys are the
group names and the values are the adjustable parameters in the groups
Parameters
----------
include_prior_results : bool
flag to return a multi-indexed dataframe with both conditional
prior and posterior forecast uncertainty estimates. Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe that summarizes the parameter contribution analysis.
The dataframe has index (row labels) that are the parameter groups
and a column labels of forecast names. The values in the dataframe
are the posterior variance of the forecast conditional on perfect
knowledge of the adjustable parameters in each parameter groups
Varies depending on `include_prior_results`.
"""
pargrp_dict = {}
par = self.pst.parameter_data
groups = par.groupby("pargp").groups
for grp,idxs in groups.items():
#pargrp_dict[grp] = list(par.loc[idxs,"parnme"])
pargrp_dict[grp] = [pname for pname in list(par.loc[idxs,"parnme"])
if pname in self.jco.col_names and pname in self.parcov.row_names]
return self.get_par_contribution(pargrp_dict,include_prior_results=include_prior_results) | [
"def",
"get_par_group_contribution",
"(",
"self",
",",
"include_prior_results",
"=",
"False",
")",
":",
"pargrp_dict",
"=",
"{",
"}",
"par",
"=",
"self",
".",
"pst",
".",
"parameter_data",
"groups",
"=",
"par",
".",
"groupby",
"(",
"\"pargp\"",
")",
".",
"groups",
"for",
"grp",
",",
"idxs",
"in",
"groups",
".",
"items",
"(",
")",
":",
"#pargrp_dict[grp] = list(par.loc[idxs,\"parnme\"])",
"pargrp_dict",
"[",
"grp",
"]",
"=",
"[",
"pname",
"for",
"pname",
"in",
"list",
"(",
"par",
".",
"loc",
"[",
"idxs",
",",
"\"parnme\"",
"]",
")",
"if",
"pname",
"in",
"self",
".",
"jco",
".",
"col_names",
"and",
"pname",
"in",
"self",
".",
"parcov",
".",
"row_names",
"]",
"return",
"self",
".",
"get_par_contribution",
"(",
"pargrp_dict",
",",
"include_prior_results",
"=",
"include_prior_results",
")"
] | get the forecast uncertainty contribution from each parameter
group. Just some sugar for get_contribution_dataframe() - this method
automatically constructs the parlist_dict argument where the keys are the
group names and the values are the adjustable parameters in the groups
Parameters
----------
include_prior_results : bool
flag to return a multi-indexed dataframe with both conditional
prior and posterior forecast uncertainty estimates. Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe that summarizes the parameter contribution analysis.
The dataframe has index (row labels) that are the parameter groups
and a column labels of forecast names. The values in the dataframe
are the posterior variance of the forecast conditional on perfect
knowledge of the adjustable parameters in each parameter groups
Varies depending on `include_prior_results`. | [
"get",
"the",
"forecast",
"uncertainty",
"contribution",
"from",
"each",
"parameter",
"group",
".",
"Just",
"some",
"sugar",
"for",
"get_contribution_dataframe",
"()",
"-",
"this",
"method",
"automatically",
"constructs",
"the",
"parlist_dict",
"argument",
"where",
"the",
"keys",
"are",
"the",
"group",
"names",
"and",
"the",
"values",
"are",
"the",
"adjustable",
"parameters",
"in",
"the",
"groups"
] | python | train |
DAI-Lab/Copulas | copulas/multivariate/vine.py | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/vine.py#L139-L219 | def _sample_row(self):
"""Generate a single sampled row from vine model.
Returns:
numpy.ndarray
"""
unis = np.random.uniform(0, 1, self.n_var)
# randomly select a node to start with
first_ind = np.random.randint(0, self.n_var)
adj = self.trees[0].get_adjacent_matrix()
visited = []
explore = [first_ind]
sampled = np.zeros(self.n_var)
itr = 0
while explore:
current = explore.pop(0)
neighbors = np.where(adj[current, :] == 1)[0].tolist()
if itr == 0:
new_x = self.ppfs[current](unis[current])
else:
for i in range(itr - 1, -1, -1):
current_ind = -1
if i >= self.truncated:
continue
current_tree = self.trees[i].edges
# get index of edge to retrieve
for edge in current_tree:
if i == 0:
if (edge.L == current and edge.R == visited[0]) or\
(edge.R == current and edge.L == visited[0]):
current_ind = edge.index
break
else:
if edge.L == current or edge.R == current:
condition = set(edge.D)
condition.add(edge.L)
condition.add(edge.R)
visit_set = set(visited)
visit_set.add(current)
if condition.issubset(visit_set):
current_ind = edge.index
break
if current_ind != -1:
# the node is not indepedent contional on visited node
copula_type = current_tree[current_ind].name
copula = Bivariate(CopulaTypes(copula_type))
copula.theta = current_tree[current_ind].theta
derivative = copula.partial_derivative_scalar
if i == itr - 1:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], unis[current])
)
else:
tmp = optimize.fminbound(
derivative, EPSILON, 1.0,
args=(unis[visited[0]], tmp)
)
tmp = min(max(tmp, EPSILON), 0.99)
new_x = self.ppfs[current](tmp)
sampled[current] = new_x
for s in neighbors:
if s not in visited:
explore.insert(0, s)
itr += 1
visited.insert(0, current)
return sampled | [
"def",
"_sample_row",
"(",
"self",
")",
":",
"unis",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
",",
"self",
".",
"n_var",
")",
"# randomly select a node to start with",
"first_ind",
"=",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"self",
".",
"n_var",
")",
"adj",
"=",
"self",
".",
"trees",
"[",
"0",
"]",
".",
"get_adjacent_matrix",
"(",
")",
"visited",
"=",
"[",
"]",
"explore",
"=",
"[",
"first_ind",
"]",
"sampled",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"n_var",
")",
"itr",
"=",
"0",
"while",
"explore",
":",
"current",
"=",
"explore",
".",
"pop",
"(",
"0",
")",
"neighbors",
"=",
"np",
".",
"where",
"(",
"adj",
"[",
"current",
",",
":",
"]",
"==",
"1",
")",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"if",
"itr",
"==",
"0",
":",
"new_x",
"=",
"self",
".",
"ppfs",
"[",
"current",
"]",
"(",
"unis",
"[",
"current",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"itr",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"current_ind",
"=",
"-",
"1",
"if",
"i",
">=",
"self",
".",
"truncated",
":",
"continue",
"current_tree",
"=",
"self",
".",
"trees",
"[",
"i",
"]",
".",
"edges",
"# get index of edge to retrieve",
"for",
"edge",
"in",
"current_tree",
":",
"if",
"i",
"==",
"0",
":",
"if",
"(",
"edge",
".",
"L",
"==",
"current",
"and",
"edge",
".",
"R",
"==",
"visited",
"[",
"0",
"]",
")",
"or",
"(",
"edge",
".",
"R",
"==",
"current",
"and",
"edge",
".",
"L",
"==",
"visited",
"[",
"0",
"]",
")",
":",
"current_ind",
"=",
"edge",
".",
"index",
"break",
"else",
":",
"if",
"edge",
".",
"L",
"==",
"current",
"or",
"edge",
".",
"R",
"==",
"current",
":",
"condition",
"=",
"set",
"(",
"edge",
".",
"D",
")",
"condition",
".",
"add",
"(",
"edge",
".",
"L",
")",
"condition",
".",
"add",
"(",
"edge",
".",
"R",
")",
"visit_set",
"=",
"set",
"(",
"visited",
")",
"visit_set",
".",
"add",
"(",
"current",
")",
"if",
"condition",
".",
"issubset",
"(",
"visit_set",
")",
":",
"current_ind",
"=",
"edge",
".",
"index",
"break",
"if",
"current_ind",
"!=",
"-",
"1",
":",
"# the node is not indepedent contional on visited node",
"copula_type",
"=",
"current_tree",
"[",
"current_ind",
"]",
".",
"name",
"copula",
"=",
"Bivariate",
"(",
"CopulaTypes",
"(",
"copula_type",
")",
")",
"copula",
".",
"theta",
"=",
"current_tree",
"[",
"current_ind",
"]",
".",
"theta",
"derivative",
"=",
"copula",
".",
"partial_derivative_scalar",
"if",
"i",
"==",
"itr",
"-",
"1",
":",
"tmp",
"=",
"optimize",
".",
"fminbound",
"(",
"derivative",
",",
"EPSILON",
",",
"1.0",
",",
"args",
"=",
"(",
"unis",
"[",
"visited",
"[",
"0",
"]",
"]",
",",
"unis",
"[",
"current",
"]",
")",
")",
"else",
":",
"tmp",
"=",
"optimize",
".",
"fminbound",
"(",
"derivative",
",",
"EPSILON",
",",
"1.0",
",",
"args",
"=",
"(",
"unis",
"[",
"visited",
"[",
"0",
"]",
"]",
",",
"tmp",
")",
")",
"tmp",
"=",
"min",
"(",
"max",
"(",
"tmp",
",",
"EPSILON",
")",
",",
"0.99",
")",
"new_x",
"=",
"self",
".",
"ppfs",
"[",
"current",
"]",
"(",
"tmp",
")",
"sampled",
"[",
"current",
"]",
"=",
"new_x",
"for",
"s",
"in",
"neighbors",
":",
"if",
"s",
"not",
"in",
"visited",
":",
"explore",
".",
"insert",
"(",
"0",
",",
"s",
")",
"itr",
"+=",
"1",
"visited",
".",
"insert",
"(",
"0",
",",
"current",
")",
"return",
"sampled"
] | Generate a single sampled row from vine model.
Returns:
numpy.ndarray | [
"Generate",
"a",
"single",
"sampled",
"row",
"from",
"vine",
"model",
"."
] | python | train |
ns1/ns1-python | ns1/__init__.py | https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/__init__.py#L62-L69 | def addresses(self):
"""
Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses`
"""
import ns1.rest.ipam
return ns1.rest.ipam.Addresses(self.config) | [
"def",
"addresses",
"(",
"self",
")",
":",
"import",
"ns1",
".",
"rest",
".",
"ipam",
"return",
"ns1",
".",
"rest",
".",
"ipam",
".",
"Addresses",
"(",
"self",
".",
"config",
")"
] | Return a new raw REST interface to address resources
:rtype: :py:class:`ns1.rest.ipam.Adresses` | [
"Return",
"a",
"new",
"raw",
"REST",
"interface",
"to",
"address",
"resources"
] | python | train |
cbrand/vpnchooser | src/vpnchooser/connection/client.py | https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L137-L151 | def _write_to_server(self, rules: list):
"""
Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule]
"""
self._create_directory_structure()
config_data = '\n'.join(rule.config_string for rule in rules)
sftp = self.client.open_sftp()
try:
with sftp.open(self.rule_location, 'w') as file_handle:
file_handle.write(config_data)
file_handle.write('\n')
finally:
sftp.close() | [
"def",
"_write_to_server",
"(",
"self",
",",
"rules",
":",
"list",
")",
":",
"self",
".",
"_create_directory_structure",
"(",
")",
"config_data",
"=",
"'\\n'",
".",
"join",
"(",
"rule",
".",
"config_string",
"for",
"rule",
"in",
"rules",
")",
"sftp",
"=",
"self",
".",
"client",
".",
"open_sftp",
"(",
")",
"try",
":",
"with",
"sftp",
".",
"open",
"(",
"self",
".",
"rule_location",
",",
"'w'",
")",
"as",
"file_handle",
":",
"file_handle",
".",
"write",
"(",
"config_data",
")",
"file_handle",
".",
"write",
"(",
"'\\n'",
")",
"finally",
":",
"sftp",
".",
"close",
"(",
")"
] | Writes the given ruleset to the
server configuration file.
:type rules: collections.Iterable[Rule] | [
"Writes",
"the",
"given",
"ruleset",
"to",
"the",
"server",
"configuration",
"file",
".",
":",
"type",
"rules",
":",
"collections",
".",
"Iterable",
"[",
"Rule",
"]"
] | python | train |
ton/stash | stash/stash.py | https://github.com/ton/stash/blob/31cd8269aa8e051f094eccb094946eda6f6d428e/stash/stash.py#L39-L47 | def remove_patch(cls, patch_name):
"""Removes patch *patch_name* from the stash (in case it exists).
:raises: :py:exc:`~stash.exception.StashException` in case *patch_name* does not exist.
"""
try:
os.unlink(cls._get_patch_path(patch_name))
except:
raise StashException("patch '%s' does not exist" % patch_name) | [
"def",
"remove_patch",
"(",
"cls",
",",
"patch_name",
")",
":",
"try",
":",
"os",
".",
"unlink",
"(",
"cls",
".",
"_get_patch_path",
"(",
"patch_name",
")",
")",
"except",
":",
"raise",
"StashException",
"(",
"\"patch '%s' does not exist\"",
"%",
"patch_name",
")"
] | Removes patch *patch_name* from the stash (in case it exists).
:raises: :py:exc:`~stash.exception.StashException` in case *patch_name* does not exist. | [
"Removes",
"patch",
"*",
"patch_name",
"*",
"from",
"the",
"stash",
"(",
"in",
"case",
"it",
"exists",
")",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/objects/timeperiod.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/timeperiod.py#L899-L914 | def check_exclude_rec(self):
# pylint: disable=access-member-before-definition
"""
Check if this timeperiod is tagged
:return: if tagged return false, if not true
:rtype: bool
"""
if self.rec_tag:
msg = "[timeentry::%s] is in a loop in exclude parameter" % (self.get_name())
self.add_error(msg)
return False
self.rec_tag = True
for timeperiod in self.exclude:
timeperiod.check_exclude_rec()
return True | [
"def",
"check_exclude_rec",
"(",
"self",
")",
":",
"# pylint: disable=access-member-before-definition",
"if",
"self",
".",
"rec_tag",
":",
"msg",
"=",
"\"[timeentry::%s] is in a loop in exclude parameter\"",
"%",
"(",
"self",
".",
"get_name",
"(",
")",
")",
"self",
".",
"add_error",
"(",
"msg",
")",
"return",
"False",
"self",
".",
"rec_tag",
"=",
"True",
"for",
"timeperiod",
"in",
"self",
".",
"exclude",
":",
"timeperiod",
".",
"check_exclude_rec",
"(",
")",
"return",
"True"
] | Check if this timeperiod is tagged
:return: if tagged return false, if not true
:rtype: bool | [
"Check",
"if",
"this",
"timeperiod",
"is",
"tagged"
] | python | train |
miku/gluish | gluish/common.py | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/common.py#L51-L60 | def getfirstline(file, default):
"""
Returns the first line of a file.
"""
with open(file, 'rb') as fh:
content = fh.readlines()
if len(content) == 1:
return content[0].decode('utf-8').strip('\n')
return default | [
"def",
"getfirstline",
"(",
"file",
",",
"default",
")",
":",
"with",
"open",
"(",
"file",
",",
"'rb'",
")",
"as",
"fh",
":",
"content",
"=",
"fh",
".",
"readlines",
"(",
")",
"if",
"len",
"(",
"content",
")",
"==",
"1",
":",
"return",
"content",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
"'\\n'",
")",
"return",
"default"
] | Returns the first line of a file. | [
"Returns",
"the",
"first",
"line",
"of",
"a",
"file",
"."
] | python | train |
TestInABox/stackInABox | stackinabox/services/service.py | https://github.com/TestInABox/stackInABox/blob/63ee457401e9a88d987f85f513eb512dcb12d984/stackinabox/services/service.py#L425-L438 | def register(self, method, uri, call_back):
"""Register a class instance function to handle a request.
:param method: string - HTTP Verb
:param uri: string - URI for the request
:param call_back: class instance function that handles the request
:returns: n/a
"""
found = False
self.create_route(uri, False)
self.routes[uri]['handlers'].register_method(method,
call_back) | [
"def",
"register",
"(",
"self",
",",
"method",
",",
"uri",
",",
"call_back",
")",
":",
"found",
"=",
"False",
"self",
".",
"create_route",
"(",
"uri",
",",
"False",
")",
"self",
".",
"routes",
"[",
"uri",
"]",
"[",
"'handlers'",
"]",
".",
"register_method",
"(",
"method",
",",
"call_back",
")"
] | Register a class instance function to handle a request.
:param method: string - HTTP Verb
:param uri: string - URI for the request
:param call_back: class instance function that handles the request
:returns: n/a | [
"Register",
"a",
"class",
"instance",
"function",
"to",
"handle",
"a",
"request",
"."
] | python | train |
python-diamond/Diamond | src/collectors/nfsd/nfsd.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/nfsd/nfsd.py#L36-L199 | def collect(self):
"""
Collect stats
"""
if os.access(self.PROC, os.R_OK):
results = {}
# Open file
file = open(self.PROC)
for line in file:
line = line.split()
if line[0] == 'rc':
results['reply_cache.hits'] = line[1]
results['reply_cache.misses'] = line[2]
results['reply_cache.nocache'] = line[3]
elif line[0] == 'fh':
results['filehandle.stale'] = line[1]
results['filehandle.total-lookups'] = line[2]
results['filehandle.anonlookups'] = line[3]
results['filehandle.dir-not-in-cache'] = line[4]
results['filehandle.nodir-not-in-cache'] = line[5]
elif line[0] == 'io':
results['input_output.bytes-read'] = line[1]
results['input_output.bytes-written'] = line[2]
elif line[0] == 'th':
results['threads.threads'] = line[1]
results['threads.fullcnt'] = line[2]
results['threads.10-20-pct'] = line[3]
results['threads.20-30-pct'] = line[4]
results['threads.30-40-pct'] = line[5]
results['threads.40-50-pct'] = line[6]
results['threads.50-60-pct'] = line[7]
results['threads.60-70-pct'] = line[8]
results['threads.70-80-pct'] = line[9]
results['threads.80-90-pct'] = line[10]
results['threads.90-100-pct'] = line[11]
results['threads.100-pct'] = line[12]
elif line[0] == 'ra':
results['read-ahead.cache-size'] = line[1]
results['read-ahead.10-pct'] = line[2]
results['read-ahead.20-pct'] = line[3]
results['read-ahead.30-pct'] = line[4]
results['read-ahead.40-pct'] = line[5]
results['read-ahead.50-pct'] = line[6]
results['read-ahead.60-pct'] = line[7]
results['read-ahead.70-pct'] = line[8]
results['read-ahead.80-pct'] = line[9]
results['read-ahead.90-pct'] = line[10]
results['read-ahead.100-pct'] = line[11]
results['read-ahead.not-found'] = line[12]
elif line[0] == 'net':
results['net.cnt'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif line[0] == 'rpc':
results['rpc.cnt'] = line[1]
results['rpc.badfmt'] = line[2]
results['rpc.badauth'] = line[3]
results['rpc.badclnt'] = line[4]
elif line[0] == 'proc2':
results['v2.unknown'] = line[1]
results['v2.null'] = line[2]
results['v2.getattr'] = line[3]
results['v2.setattr'] = line[4]
results['v2.root'] = line[5]
results['v2.lookup'] = line[6]
results['v2.readlink'] = line[7]
results['v2.read'] = line[8]
results['v2.wrcache'] = line[9]
results['v2.write'] = line[10]
results['v2.create'] = line[11]
results['v2.remove'] = line[12]
results['v2.rename'] = line[13]
results['v2.link'] = line[14]
results['v2.symlink'] = line[15]
results['v2.mkdir'] = line[16]
results['v2.rmdir'] = line[17]
results['v2.readdir'] = line[18]
results['v2.fsstat'] = line[19]
elif line[0] == 'proc3':
results['v3.unknown'] = line[1]
results['v3.null'] = line[2]
results['v3.getattr'] = line[3]
results['v3.setattr'] = line[4]
results['v3.lookup'] = line[5]
results['v3.access'] = line[6]
results['v3.readlink'] = line[7]
results['v3.read'] = line[8]
results['v3.write'] = line[9]
results['v3.create'] = line[10]
results['v3.mkdir'] = line[11]
results['v3.symlink'] = line[12]
results['v3.mknod'] = line[13]
results['v3.remove'] = line[14]
results['v3.rmdir'] = line[15]
results['v3.rename'] = line[16]
results['v3.link'] = line[17]
results['v3.readdir'] = line[18]
results['v3.readdirplus'] = line[19]
results['v3.fsstat'] = line[20]
results['v3.fsinfo'] = line[21]
results['v3.pathconf'] = line[22]
results['v3.commit'] = line[23]
elif line[0] == 'proc4':
results['v4.unknown'] = line[1]
results['v4.null'] = line[2]
results['v4.compound'] = line[3]
elif line[0] == 'proc4ops':
results['v4.ops.unknown'] = line[1]
results['v4.ops.op0-unused'] = line[2]
results['v4.ops.op1-unused'] = line[3]
results['v4.ops.op2-future'] = line[4]
results['v4.ops.access'] = line[5]
results['v4.ops.close'] = line[6]
results['v4.ops.commit'] = line[7]
results['v4.ops.create'] = line[8]
results['v4.ops.delegpurge'] = line[9]
results['v4.ops.delegreturn'] = line[10]
results['v4.ops.getattr'] = line[11]
results['v4.ops.getfh'] = line[12]
results['v4.ops.link'] = line[13]
results['v4.ops.lock'] = line[14]
results['v4.ops.lockt'] = line[15]
results['v4.ops.locku'] = line[16]
results['v4.ops.lookup'] = line[17]
results['v4.ops.lookup_root'] = line[18]
results['v4.ops.nverify'] = line[19]
results['v4.ops.open'] = line[20]
results['v4.ops.openattr'] = line[21]
results['v4.ops.open_conf'] = line[22]
results['v4.ops.open_dgrd'] = line[23]
results['v4.ops.putfh'] = line[24]
results['v4.ops.putpubfh'] = line[25]
results['v4.ops.putrootfh'] = line[26]
results['v4.ops.read'] = line[27]
results['v4.ops.readdir'] = line[28]
results['v4.ops.readlink'] = line[29]
results['v4.ops.remove'] = line[30]
results['v4.ops.rename'] = line[31]
results['v4.ops.renew'] = line[32]
results['v4.ops.restorefh'] = line[33]
results['v4.ops.savefh'] = line[34]
results['v4.ops.secinfo'] = line[35]
results['v4.ops.setattr'] = line[36]
results['v4.ops.setcltid'] = line[37]
results['v4.ops.setcltidconf'] = line[38]
results['v4.ops.verify'] = line[39]
results['v4.ops.write'] = line[40]
results['v4.ops.rellockowner'] = line[41]
# Close File
file.close()
for stat in results.keys():
metric_name = '.' + stat
metric_value = long(float(results[stat]))
metric_value = self.derivative(metric_name, metric_value)
self.publish(metric_name, metric_value, precision=3)
return True
return False | [
"def",
"collect",
"(",
"self",
")",
":",
"if",
"os",
".",
"access",
"(",
"self",
".",
"PROC",
",",
"os",
".",
"R_OK",
")",
":",
"results",
"=",
"{",
"}",
"# Open file",
"file",
"=",
"open",
"(",
"self",
".",
"PROC",
")",
"for",
"line",
"in",
"file",
":",
"line",
"=",
"line",
".",
"split",
"(",
")",
"if",
"line",
"[",
"0",
"]",
"==",
"'rc'",
":",
"results",
"[",
"'reply_cache.hits'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'reply_cache.misses'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'reply_cache.nocache'",
"]",
"=",
"line",
"[",
"3",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'fh'",
":",
"results",
"[",
"'filehandle.stale'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'filehandle.total-lookups'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'filehandle.anonlookups'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'filehandle.dir-not-in-cache'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'filehandle.nodir-not-in-cache'",
"]",
"=",
"line",
"[",
"5",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'io'",
":",
"results",
"[",
"'input_output.bytes-read'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'input_output.bytes-written'",
"]",
"=",
"line",
"[",
"2",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'th'",
":",
"results",
"[",
"'threads.threads'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'threads.fullcnt'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'threads.10-20-pct'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'threads.20-30-pct'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'threads.30-40-pct'",
"]",
"=",
"line",
"[",
"5",
"]",
"results",
"[",
"'threads.40-50-pct'",
"]",
"=",
"line",
"[",
"6",
"]",
"results",
"[",
"'threads.50-60-pct'",
"]",
"=",
"line",
"[",
"7",
"]",
"results",
"[",
"'threads.60-70-pct'",
"]",
"=",
"line",
"[",
"8",
"]",
"results",
"[",
"'threads.70-80-pct'",
"]",
"=",
"line",
"[",
"9",
"]",
"results",
"[",
"'threads.80-90-pct'",
"]",
"=",
"line",
"[",
"10",
"]",
"results",
"[",
"'threads.90-100-pct'",
"]",
"=",
"line",
"[",
"11",
"]",
"results",
"[",
"'threads.100-pct'",
"]",
"=",
"line",
"[",
"12",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'ra'",
":",
"results",
"[",
"'read-ahead.cache-size'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'read-ahead.10-pct'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'read-ahead.20-pct'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'read-ahead.30-pct'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'read-ahead.40-pct'",
"]",
"=",
"line",
"[",
"5",
"]",
"results",
"[",
"'read-ahead.50-pct'",
"]",
"=",
"line",
"[",
"6",
"]",
"results",
"[",
"'read-ahead.60-pct'",
"]",
"=",
"line",
"[",
"7",
"]",
"results",
"[",
"'read-ahead.70-pct'",
"]",
"=",
"line",
"[",
"8",
"]",
"results",
"[",
"'read-ahead.80-pct'",
"]",
"=",
"line",
"[",
"9",
"]",
"results",
"[",
"'read-ahead.90-pct'",
"]",
"=",
"line",
"[",
"10",
"]",
"results",
"[",
"'read-ahead.100-pct'",
"]",
"=",
"line",
"[",
"11",
"]",
"results",
"[",
"'read-ahead.not-found'",
"]",
"=",
"line",
"[",
"12",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'net'",
":",
"results",
"[",
"'net.cnt'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'net.udpcnt'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'net.tcpcnt'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'net.tcpconn'",
"]",
"=",
"line",
"[",
"4",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'rpc'",
":",
"results",
"[",
"'rpc.cnt'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'rpc.badfmt'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'rpc.badauth'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'rpc.badclnt'",
"]",
"=",
"line",
"[",
"4",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'proc2'",
":",
"results",
"[",
"'v2.unknown'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'v2.null'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'v2.getattr'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'v2.setattr'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'v2.root'",
"]",
"=",
"line",
"[",
"5",
"]",
"results",
"[",
"'v2.lookup'",
"]",
"=",
"line",
"[",
"6",
"]",
"results",
"[",
"'v2.readlink'",
"]",
"=",
"line",
"[",
"7",
"]",
"results",
"[",
"'v2.read'",
"]",
"=",
"line",
"[",
"8",
"]",
"results",
"[",
"'v2.wrcache'",
"]",
"=",
"line",
"[",
"9",
"]",
"results",
"[",
"'v2.write'",
"]",
"=",
"line",
"[",
"10",
"]",
"results",
"[",
"'v2.create'",
"]",
"=",
"line",
"[",
"11",
"]",
"results",
"[",
"'v2.remove'",
"]",
"=",
"line",
"[",
"12",
"]",
"results",
"[",
"'v2.rename'",
"]",
"=",
"line",
"[",
"13",
"]",
"results",
"[",
"'v2.link'",
"]",
"=",
"line",
"[",
"14",
"]",
"results",
"[",
"'v2.symlink'",
"]",
"=",
"line",
"[",
"15",
"]",
"results",
"[",
"'v2.mkdir'",
"]",
"=",
"line",
"[",
"16",
"]",
"results",
"[",
"'v2.rmdir'",
"]",
"=",
"line",
"[",
"17",
"]",
"results",
"[",
"'v2.readdir'",
"]",
"=",
"line",
"[",
"18",
"]",
"results",
"[",
"'v2.fsstat'",
"]",
"=",
"line",
"[",
"19",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'proc3'",
":",
"results",
"[",
"'v3.unknown'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'v3.null'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'v3.getattr'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'v3.setattr'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'v3.lookup'",
"]",
"=",
"line",
"[",
"5",
"]",
"results",
"[",
"'v3.access'",
"]",
"=",
"line",
"[",
"6",
"]",
"results",
"[",
"'v3.readlink'",
"]",
"=",
"line",
"[",
"7",
"]",
"results",
"[",
"'v3.read'",
"]",
"=",
"line",
"[",
"8",
"]",
"results",
"[",
"'v3.write'",
"]",
"=",
"line",
"[",
"9",
"]",
"results",
"[",
"'v3.create'",
"]",
"=",
"line",
"[",
"10",
"]",
"results",
"[",
"'v3.mkdir'",
"]",
"=",
"line",
"[",
"11",
"]",
"results",
"[",
"'v3.symlink'",
"]",
"=",
"line",
"[",
"12",
"]",
"results",
"[",
"'v3.mknod'",
"]",
"=",
"line",
"[",
"13",
"]",
"results",
"[",
"'v3.remove'",
"]",
"=",
"line",
"[",
"14",
"]",
"results",
"[",
"'v3.rmdir'",
"]",
"=",
"line",
"[",
"15",
"]",
"results",
"[",
"'v3.rename'",
"]",
"=",
"line",
"[",
"16",
"]",
"results",
"[",
"'v3.link'",
"]",
"=",
"line",
"[",
"17",
"]",
"results",
"[",
"'v3.readdir'",
"]",
"=",
"line",
"[",
"18",
"]",
"results",
"[",
"'v3.readdirplus'",
"]",
"=",
"line",
"[",
"19",
"]",
"results",
"[",
"'v3.fsstat'",
"]",
"=",
"line",
"[",
"20",
"]",
"results",
"[",
"'v3.fsinfo'",
"]",
"=",
"line",
"[",
"21",
"]",
"results",
"[",
"'v3.pathconf'",
"]",
"=",
"line",
"[",
"22",
"]",
"results",
"[",
"'v3.commit'",
"]",
"=",
"line",
"[",
"23",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'proc4'",
":",
"results",
"[",
"'v4.unknown'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'v4.null'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'v4.compound'",
"]",
"=",
"line",
"[",
"3",
"]",
"elif",
"line",
"[",
"0",
"]",
"==",
"'proc4ops'",
":",
"results",
"[",
"'v4.ops.unknown'",
"]",
"=",
"line",
"[",
"1",
"]",
"results",
"[",
"'v4.ops.op0-unused'",
"]",
"=",
"line",
"[",
"2",
"]",
"results",
"[",
"'v4.ops.op1-unused'",
"]",
"=",
"line",
"[",
"3",
"]",
"results",
"[",
"'v4.ops.op2-future'",
"]",
"=",
"line",
"[",
"4",
"]",
"results",
"[",
"'v4.ops.access'",
"]",
"=",
"line",
"[",
"5",
"]",
"results",
"[",
"'v4.ops.close'",
"]",
"=",
"line",
"[",
"6",
"]",
"results",
"[",
"'v4.ops.commit'",
"]",
"=",
"line",
"[",
"7",
"]",
"results",
"[",
"'v4.ops.create'",
"]",
"=",
"line",
"[",
"8",
"]",
"results",
"[",
"'v4.ops.delegpurge'",
"]",
"=",
"line",
"[",
"9",
"]",
"results",
"[",
"'v4.ops.delegreturn'",
"]",
"=",
"line",
"[",
"10",
"]",
"results",
"[",
"'v4.ops.getattr'",
"]",
"=",
"line",
"[",
"11",
"]",
"results",
"[",
"'v4.ops.getfh'",
"]",
"=",
"line",
"[",
"12",
"]",
"results",
"[",
"'v4.ops.link'",
"]",
"=",
"line",
"[",
"13",
"]",
"results",
"[",
"'v4.ops.lock'",
"]",
"=",
"line",
"[",
"14",
"]",
"results",
"[",
"'v4.ops.lockt'",
"]",
"=",
"line",
"[",
"15",
"]",
"results",
"[",
"'v4.ops.locku'",
"]",
"=",
"line",
"[",
"16",
"]",
"results",
"[",
"'v4.ops.lookup'",
"]",
"=",
"line",
"[",
"17",
"]",
"results",
"[",
"'v4.ops.lookup_root'",
"]",
"=",
"line",
"[",
"18",
"]",
"results",
"[",
"'v4.ops.nverify'",
"]",
"=",
"line",
"[",
"19",
"]",
"results",
"[",
"'v4.ops.open'",
"]",
"=",
"line",
"[",
"20",
"]",
"results",
"[",
"'v4.ops.openattr'",
"]",
"=",
"line",
"[",
"21",
"]",
"results",
"[",
"'v4.ops.open_conf'",
"]",
"=",
"line",
"[",
"22",
"]",
"results",
"[",
"'v4.ops.open_dgrd'",
"]",
"=",
"line",
"[",
"23",
"]",
"results",
"[",
"'v4.ops.putfh'",
"]",
"=",
"line",
"[",
"24",
"]",
"results",
"[",
"'v4.ops.putpubfh'",
"]",
"=",
"line",
"[",
"25",
"]",
"results",
"[",
"'v4.ops.putrootfh'",
"]",
"=",
"line",
"[",
"26",
"]",
"results",
"[",
"'v4.ops.read'",
"]",
"=",
"line",
"[",
"27",
"]",
"results",
"[",
"'v4.ops.readdir'",
"]",
"=",
"line",
"[",
"28",
"]",
"results",
"[",
"'v4.ops.readlink'",
"]",
"=",
"line",
"[",
"29",
"]",
"results",
"[",
"'v4.ops.remove'",
"]",
"=",
"line",
"[",
"30",
"]",
"results",
"[",
"'v4.ops.rename'",
"]",
"=",
"line",
"[",
"31",
"]",
"results",
"[",
"'v4.ops.renew'",
"]",
"=",
"line",
"[",
"32",
"]",
"results",
"[",
"'v4.ops.restorefh'",
"]",
"=",
"line",
"[",
"33",
"]",
"results",
"[",
"'v4.ops.savefh'",
"]",
"=",
"line",
"[",
"34",
"]",
"results",
"[",
"'v4.ops.secinfo'",
"]",
"=",
"line",
"[",
"35",
"]",
"results",
"[",
"'v4.ops.setattr'",
"]",
"=",
"line",
"[",
"36",
"]",
"results",
"[",
"'v4.ops.setcltid'",
"]",
"=",
"line",
"[",
"37",
"]",
"results",
"[",
"'v4.ops.setcltidconf'",
"]",
"=",
"line",
"[",
"38",
"]",
"results",
"[",
"'v4.ops.verify'",
"]",
"=",
"line",
"[",
"39",
"]",
"results",
"[",
"'v4.ops.write'",
"]",
"=",
"line",
"[",
"40",
"]",
"results",
"[",
"'v4.ops.rellockowner'",
"]",
"=",
"line",
"[",
"41",
"]",
"# Close File",
"file",
".",
"close",
"(",
")",
"for",
"stat",
"in",
"results",
".",
"keys",
"(",
")",
":",
"metric_name",
"=",
"'.'",
"+",
"stat",
"metric_value",
"=",
"long",
"(",
"float",
"(",
"results",
"[",
"stat",
"]",
")",
")",
"metric_value",
"=",
"self",
".",
"derivative",
"(",
"metric_name",
",",
"metric_value",
")",
"self",
".",
"publish",
"(",
"metric_name",
",",
"metric_value",
",",
"precision",
"=",
"3",
")",
"return",
"True",
"return",
"False"
] | Collect stats | [
"Collect",
"stats"
] | python | train |
log2timeline/plaso | plaso/engine/zeromq_queue.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/zeromq_queue.py#L751-L783 | def PushItem(self, item, block=True):
"""Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
QueueFull: if the internal buffer was full and it was not possible to
push the item to the buffer within the timeout.
RuntimeError: if closed event is missing.
"""
if not self._closed_event:
raise RuntimeError('Missing closed event.')
if self._closed_event.is_set():
raise errors.QueueAlreadyClosed()
if not self._zmq_socket:
self._CreateZMQSocket()
try:
if block:
self._queue.put(item, timeout=self.timeout_seconds)
else:
self._queue.put(item, block=False)
except Queue.Full as exception:
raise errors.QueueFull(exception) | [
"def",
"PushItem",
"(",
"self",
",",
"item",
",",
"block",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"_closed_event",
":",
"raise",
"RuntimeError",
"(",
"'Missing closed event.'",
")",
"if",
"self",
".",
"_closed_event",
".",
"is_set",
"(",
")",
":",
"raise",
"errors",
".",
"QueueAlreadyClosed",
"(",
")",
"if",
"not",
"self",
".",
"_zmq_socket",
":",
"self",
".",
"_CreateZMQSocket",
"(",
")",
"try",
":",
"if",
"block",
":",
"self",
".",
"_queue",
".",
"put",
"(",
"item",
",",
"timeout",
"=",
"self",
".",
"timeout_seconds",
")",
"else",
":",
"self",
".",
"_queue",
".",
"put",
"(",
"item",
",",
"block",
"=",
"False",
")",
"except",
"Queue",
".",
"Full",
"as",
"exception",
":",
"raise",
"errors",
".",
"QueueFull",
"(",
"exception",
")"
] | Push an item on to the queue.
If no ZeroMQ socket has been created, one will be created the first time
this method is called.
Args:
item (object): item to push on the queue.
block (Optional[bool]): whether the push should be performed in blocking
or non-blocking mode.
Raises:
QueueAlreadyClosed: if the queue is closed.
QueueFull: if the internal buffer was full and it was not possible to
push the item to the buffer within the timeout.
RuntimeError: if closed event is missing. | [
"Push",
"an",
"item",
"on",
"to",
"the",
"queue",
"."
] | python | train |
mdiener/grace | grace/py27/slimit/parser.py | https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L375-L382 | def p_new_expr_nobf(self, p):
"""new_expr_nobf : member_expr_nobf
| NEW new_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.NewExpr(p[2]) | [
"def",
"p_new_expr_nobf",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"NewExpr",
"(",
"p",
"[",
"2",
"]",
")"
] | new_expr_nobf : member_expr_nobf
| NEW new_expr | [
"new_expr_nobf",
":",
"member_expr_nobf",
"|",
"NEW",
"new_expr"
] | python | train |
MacHu-GWU/single_file_module-project | sfm/matplot_mate.py | https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L207-L226 | def preprocess_x_y(x, y):
"""Preprocess x, y input data. Returns list of list style.
**中文文档**
预处理输入的x, y数据。
"""
def is_iterable_slicable(a):
if hasattr(a, "__iter__") and hasattr(a, "__getitem__"):
return True
else:
return False
if is_iterable_slicable(x):
if is_iterable_slicable(x[0]):
return x, y
else:
return (x,), (y,)
else:
raise ValueError("invalid input!") | [
"def",
"preprocess_x_y",
"(",
"x",
",",
"y",
")",
":",
"def",
"is_iterable_slicable",
"(",
"a",
")",
":",
"if",
"hasattr",
"(",
"a",
",",
"\"__iter__\"",
")",
"and",
"hasattr",
"(",
"a",
",",
"\"__getitem__\"",
")",
":",
"return",
"True",
"else",
":",
"return",
"False",
"if",
"is_iterable_slicable",
"(",
"x",
")",
":",
"if",
"is_iterable_slicable",
"(",
"x",
"[",
"0",
"]",
")",
":",
"return",
"x",
",",
"y",
"else",
":",
"return",
"(",
"x",
",",
")",
",",
"(",
"y",
",",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid input!\"",
")"
] | Preprocess x, y input data. Returns list of list style.
**中文文档**
预处理输入的x, y数据。 | [
"Preprocess",
"x",
"y",
"input",
"data",
".",
"Returns",
"list",
"of",
"list",
"style",
"."
] | python | train |
dmlc/gluon-nlp | src/gluonnlp/data/transforms.py | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L1144-L1157 | def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() | [
"def",
"_truncate_seq_pair",
"(",
"self",
",",
"tokens_a",
",",
"tokens_b",
",",
"max_length",
")",
":",
"# This is a simple heuristic which will always truncate the longer sequence",
"# one token at a time. This makes more sense than truncating an equal percent",
"# of tokens from each, since if one sequence is very short then each token",
"# that's truncated likely contains more information than a longer sequence.",
"while",
"True",
":",
"total_length",
"=",
"len",
"(",
"tokens_a",
")",
"+",
"len",
"(",
"tokens_b",
")",
"if",
"total_length",
"<=",
"max_length",
":",
"break",
"if",
"len",
"(",
"tokens_a",
")",
">",
"len",
"(",
"tokens_b",
")",
":",
"tokens_a",
".",
"pop",
"(",
")",
"else",
":",
"tokens_b",
".",
"pop",
"(",
")"
] | Truncates a sequence pair in place to the maximum length. | [
"Truncates",
"a",
"sequence",
"pair",
"in",
"place",
"to",
"the",
"maximum",
"length",
"."
] | python | train |
brian-rose/climlab | climlab/radiation/cam3/cam3.py | https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/radiation/cam3/cam3.py#L115-L128 | def _cam3_to_climlab(self, field):
''' Output is either (KM, JM, 1) or (JM, 1).
Transform this to...
- (KM,) or (1,) if JM==1
- (KM, JM) or (JM, 1) if JM>1
(longitude dimension IM not yet implemented).'''
if self.JM > 1:
if len(field.shape)==2:
return field
elif len(field.shape)==3:
return np.squeeze(np.transpose(field))
else:
return np.squeeze(field) | [
"def",
"_cam3_to_climlab",
"(",
"self",
",",
"field",
")",
":",
"if",
"self",
".",
"JM",
">",
"1",
":",
"if",
"len",
"(",
"field",
".",
"shape",
")",
"==",
"2",
":",
"return",
"field",
"elif",
"len",
"(",
"field",
".",
"shape",
")",
"==",
"3",
":",
"return",
"np",
".",
"squeeze",
"(",
"np",
".",
"transpose",
"(",
"field",
")",
")",
"else",
":",
"return",
"np",
".",
"squeeze",
"(",
"field",
")"
] | Output is either (KM, JM, 1) or (JM, 1).
Transform this to...
- (KM,) or (1,) if JM==1
- (KM, JM) or (JM, 1) if JM>1
(longitude dimension IM not yet implemented). | [
"Output",
"is",
"either",
"(",
"KM",
"JM",
"1",
")",
"or",
"(",
"JM",
"1",
")",
".",
"Transform",
"this",
"to",
"...",
"-",
"(",
"KM",
")",
"or",
"(",
"1",
")",
"if",
"JM",
"==",
"1",
"-",
"(",
"KM",
"JM",
")",
"or",
"(",
"JM",
"1",
")",
"if",
"JM",
">",
"1"
] | python | train |
AirSage/Petrel | petrel/petrel/rdebug.py | https://github.com/AirSage/Petrel/blob/c4be9b7da5916dcc028ddb88850e7703203eeb79/petrel/petrel/rdebug.py#L125-L135 | def debug_process(pid):
"""Interrupt a running process and debug it."""
os.kill(pid, signal.SIGUSR1) # Signal process.
pipe = NamedPipe(pipename(pid), 1)
try:
while pipe.is_open():
txt=raw_input(pipe.get()) + '\n'
pipe.put(txt)
except EOFError:
pass # Exit.
pipe.close() | [
"def",
"debug_process",
"(",
"pid",
")",
":",
"os",
".",
"kill",
"(",
"pid",
",",
"signal",
".",
"SIGUSR1",
")",
"# Signal process.",
"pipe",
"=",
"NamedPipe",
"(",
"pipename",
"(",
"pid",
")",
",",
"1",
")",
"try",
":",
"while",
"pipe",
".",
"is_open",
"(",
")",
":",
"txt",
"=",
"raw_input",
"(",
"pipe",
".",
"get",
"(",
")",
")",
"+",
"'\\n'",
"pipe",
".",
"put",
"(",
"txt",
")",
"except",
"EOFError",
":",
"pass",
"# Exit.",
"pipe",
".",
"close",
"(",
")"
] | Interrupt a running process and debug it. | [
"Interrupt",
"a",
"running",
"process",
"and",
"debug",
"it",
"."
] | python | train |
rauenzi/discordbot.py | discordbot/cogs/botadmin.py | https://github.com/rauenzi/discordbot.py/blob/39bb98dae4e49487e6c6c597f85fc41c74b62bb8/discordbot/cogs/botadmin.py#L297-L304 | async def unignore_all(self, ctx):
"""Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role.
"""
channels = [c for c in ctx.message.server.channels if c.type is discord.ChannelType.text]
await ctx.invoke(self.unignore, *channels) | [
"async",
"def",
"unignore_all",
"(",
"self",
",",
"ctx",
")",
":",
"channels",
"=",
"[",
"c",
"for",
"c",
"in",
"ctx",
".",
"message",
".",
"server",
".",
"channels",
"if",
"c",
".",
"type",
"is",
"discord",
".",
"ChannelType",
".",
"text",
"]",
"await",
"ctx",
".",
"invoke",
"(",
"self",
".",
"unignore",
",",
"*",
"channels",
")"
] | Unignores all channels in this server from being processed.
To use this command you must have the Manage Channels permission or have the
Bot Admin role. | [
"Unignores",
"all",
"channels",
"in",
"this",
"server",
"from",
"being",
"processed",
"."
] | python | train |
jonathf/chaospy | chaospy/quad/collection/leja.py | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/quad/collection/leja.py#L29-L76 | def quad_leja(order, dist):
"""
Generate Leja quadrature node.
Example:
>>> abscisas, weights = quad_leja(3, chaospy.Normal(0, 1))
>>> print(numpy.around(abscisas, 4))
[[-2.7173 -1.4142 0. 1.7635]]
>>> print(numpy.around(weights, 4))
[0.022 0.1629 0.6506 0.1645]
"""
from chaospy.distributions import evaluation
if len(dist) > 1 and evaluation.get_dependencies(*list(dist)):
raise evaluation.DependencyError(
"Leja quadrature do not supper distribution with dependencies.")
if len(dist) > 1:
if isinstance(order, int):
out = [quad_leja(order, _) for _ in dist]
else:
out = [quad_leja(order[_], dist[_]) for _ in range(len(dist))]
abscissas = [_[0][0] for _ in out]
weights = [_[1] for _ in out]
abscissas = chaospy.quad.combine(abscissas).T
weights = chaospy.quad.combine(weights)
weights = numpy.prod(weights, -1)
return abscissas, weights
lower, upper = dist.range()
abscissas = [lower, dist.mom(1), upper]
for _ in range(int(order)):
obj = create_objective(dist, abscissas)
opts, vals = zip(
*[fminbound(
obj, abscissas[idx], abscissas[idx+1], full_output=1)[:2]
for idx in range(len(abscissas)-1)]
)
index = numpy.argmin(vals)
abscissas.insert(index+1, opts[index])
abscissas = numpy.asfarray(abscissas).flatten()[1:-1]
weights = create_weights(abscissas, dist)
abscissas = abscissas.reshape(1, abscissas.size)
return numpy.array(abscissas), numpy.array(weights) | [
"def",
"quad_leja",
"(",
"order",
",",
"dist",
")",
":",
"from",
"chaospy",
".",
"distributions",
"import",
"evaluation",
"if",
"len",
"(",
"dist",
")",
">",
"1",
"and",
"evaluation",
".",
"get_dependencies",
"(",
"*",
"list",
"(",
"dist",
")",
")",
":",
"raise",
"evaluation",
".",
"DependencyError",
"(",
"\"Leja quadrature do not supper distribution with dependencies.\"",
")",
"if",
"len",
"(",
"dist",
")",
">",
"1",
":",
"if",
"isinstance",
"(",
"order",
",",
"int",
")",
":",
"out",
"=",
"[",
"quad_leja",
"(",
"order",
",",
"_",
")",
"for",
"_",
"in",
"dist",
"]",
"else",
":",
"out",
"=",
"[",
"quad_leja",
"(",
"order",
"[",
"_",
"]",
",",
"dist",
"[",
"_",
"]",
")",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"dist",
")",
")",
"]",
"abscissas",
"=",
"[",
"_",
"[",
"0",
"]",
"[",
"0",
"]",
"for",
"_",
"in",
"out",
"]",
"weights",
"=",
"[",
"_",
"[",
"1",
"]",
"for",
"_",
"in",
"out",
"]",
"abscissas",
"=",
"chaospy",
".",
"quad",
".",
"combine",
"(",
"abscissas",
")",
".",
"T",
"weights",
"=",
"chaospy",
".",
"quad",
".",
"combine",
"(",
"weights",
")",
"weights",
"=",
"numpy",
".",
"prod",
"(",
"weights",
",",
"-",
"1",
")",
"return",
"abscissas",
",",
"weights",
"lower",
",",
"upper",
"=",
"dist",
".",
"range",
"(",
")",
"abscissas",
"=",
"[",
"lower",
",",
"dist",
".",
"mom",
"(",
"1",
")",
",",
"upper",
"]",
"for",
"_",
"in",
"range",
"(",
"int",
"(",
"order",
")",
")",
":",
"obj",
"=",
"create_objective",
"(",
"dist",
",",
"abscissas",
")",
"opts",
",",
"vals",
"=",
"zip",
"(",
"*",
"[",
"fminbound",
"(",
"obj",
",",
"abscissas",
"[",
"idx",
"]",
",",
"abscissas",
"[",
"idx",
"+",
"1",
"]",
",",
"full_output",
"=",
"1",
")",
"[",
":",
"2",
"]",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"abscissas",
")",
"-",
"1",
")",
"]",
")",
"index",
"=",
"numpy",
".",
"argmin",
"(",
"vals",
")",
"abscissas",
".",
"insert",
"(",
"index",
"+",
"1",
",",
"opts",
"[",
"index",
"]",
")",
"abscissas",
"=",
"numpy",
".",
"asfarray",
"(",
"abscissas",
")",
".",
"flatten",
"(",
")",
"[",
"1",
":",
"-",
"1",
"]",
"weights",
"=",
"create_weights",
"(",
"abscissas",
",",
"dist",
")",
"abscissas",
"=",
"abscissas",
".",
"reshape",
"(",
"1",
",",
"abscissas",
".",
"size",
")",
"return",
"numpy",
".",
"array",
"(",
"abscissas",
")",
",",
"numpy",
".",
"array",
"(",
"weights",
")"
] | Generate Leja quadrature node.
Example:
>>> abscisas, weights = quad_leja(3, chaospy.Normal(0, 1))
>>> print(numpy.around(abscisas, 4))
[[-2.7173 -1.4142 0. 1.7635]]
>>> print(numpy.around(weights, 4))
[0.022 0.1629 0.6506 0.1645] | [
"Generate",
"Leja",
"quadrature",
"node",
"."
] | python | train |
Kortemme-Lab/klab | klab/rosetta/input_files.py | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/rosetta/input_files.py#L123-L126 | def add(self, start, end, cut_point = None, skip_rate = None, extend_loop = None):
'''Add a new loop definition.'''
self.data.append(self.parse_loop_line(['LOOP', start, end, cut_point, skip_rate, extend_loop]))
assert(start <= end) | [
"def",
"add",
"(",
"self",
",",
"start",
",",
"end",
",",
"cut_point",
"=",
"None",
",",
"skip_rate",
"=",
"None",
",",
"extend_loop",
"=",
"None",
")",
":",
"self",
".",
"data",
".",
"append",
"(",
"self",
".",
"parse_loop_line",
"(",
"[",
"'LOOP'",
",",
"start",
",",
"end",
",",
"cut_point",
",",
"skip_rate",
",",
"extend_loop",
"]",
")",
")",
"assert",
"(",
"start",
"<=",
"end",
")"
] | Add a new loop definition. | [
"Add",
"a",
"new",
"loop",
"definition",
"."
] | python | train |
mila/pyoo | pyoo.py | https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1865-L1883 | def open_spreadsheet(self, path, as_template=False, read_only=False):
"""
Opens an exiting spreadsheet document on the local file system.
"""
extra = ()
if as_template:
pv = uno.createUnoStruct('com.sun.star.beans.PropertyValue')
pv.Name = 'AsTemplate'
pv.Value = True
extra += (pv,)
if read_only:
pv = uno.createUnoStruct('com.sun.star.beans.PropertyValue')
pv.Name = 'ReadOnly'
pv.Value = True
extra += (pv,)
# UNO requires absolute paths
url = uno.systemPathToFileUrl(os.path.abspath(path))
document = self._open_url(url, extra)
return SpreadsheetDocument(document) | [
"def",
"open_spreadsheet",
"(",
"self",
",",
"path",
",",
"as_template",
"=",
"False",
",",
"read_only",
"=",
"False",
")",
":",
"extra",
"=",
"(",
")",
"if",
"as_template",
":",
"pv",
"=",
"uno",
".",
"createUnoStruct",
"(",
"'com.sun.star.beans.PropertyValue'",
")",
"pv",
".",
"Name",
"=",
"'AsTemplate'",
"pv",
".",
"Value",
"=",
"True",
"extra",
"+=",
"(",
"pv",
",",
")",
"if",
"read_only",
":",
"pv",
"=",
"uno",
".",
"createUnoStruct",
"(",
"'com.sun.star.beans.PropertyValue'",
")",
"pv",
".",
"Name",
"=",
"'ReadOnly'",
"pv",
".",
"Value",
"=",
"True",
"extra",
"+=",
"(",
"pv",
",",
")",
"# UNO requires absolute paths",
"url",
"=",
"uno",
".",
"systemPathToFileUrl",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"document",
"=",
"self",
".",
"_open_url",
"(",
"url",
",",
"extra",
")",
"return",
"SpreadsheetDocument",
"(",
"document",
")"
] | Opens an exiting spreadsheet document on the local file system. | [
"Opens",
"an",
"exiting",
"spreadsheet",
"document",
"on",
"the",
"local",
"file",
"system",
"."
] | python | train |
mkoura/dump2polarion | dump2polarion/results/csvtools.py | https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/results/csvtools.py#L83-L107 | def _get_results(csv_reader, fieldnames):
"""Maps data to fieldnames.
The reader needs to be at position after fieldnames, before the results data.
"""
fieldnames_count = len(fieldnames)
results = []
for row in csv_reader:
for col in row:
if col:
break
else:
# empty row, skip it
continue
record = OrderedDict(list(zip(fieldnames, row)))
# skip rows that were already exported
if record.get("exported") == "yes":
continue
row_len = len(row)
if fieldnames_count > row_len:
for key in fieldnames[row_len:]:
record[key] = None
results.append(record)
return results | [
"def",
"_get_results",
"(",
"csv_reader",
",",
"fieldnames",
")",
":",
"fieldnames_count",
"=",
"len",
"(",
"fieldnames",
")",
"results",
"=",
"[",
"]",
"for",
"row",
"in",
"csv_reader",
":",
"for",
"col",
"in",
"row",
":",
"if",
"col",
":",
"break",
"else",
":",
"# empty row, skip it",
"continue",
"record",
"=",
"OrderedDict",
"(",
"list",
"(",
"zip",
"(",
"fieldnames",
",",
"row",
")",
")",
")",
"# skip rows that were already exported",
"if",
"record",
".",
"get",
"(",
"\"exported\"",
")",
"==",
"\"yes\"",
":",
"continue",
"row_len",
"=",
"len",
"(",
"row",
")",
"if",
"fieldnames_count",
">",
"row_len",
":",
"for",
"key",
"in",
"fieldnames",
"[",
"row_len",
":",
"]",
":",
"record",
"[",
"key",
"]",
"=",
"None",
"results",
".",
"append",
"(",
"record",
")",
"return",
"results"
] | Maps data to fieldnames.
The reader needs to be at position after fieldnames, before the results data. | [
"Maps",
"data",
"to",
"fieldnames",
"."
] | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/console_widget.py#L835-L859 | def _append_custom(self, insert, input, before_prompt=False):
""" A low-level method for appending content to the end of the buffer.
If 'before_prompt' is enabled, the content will be inserted before the
current prompt, if there is one.
"""
# Determine where to insert the content.
cursor = self._control.textCursor()
if before_prompt and (self._reading or not self._executing):
cursor.setPosition(self._append_before_prompt_pos)
else:
cursor.movePosition(QtGui.QTextCursor.End)
start_pos = cursor.position()
# Perform the insertion.
result = insert(cursor, input)
# Adjust the prompt position if we have inserted before it. This is safe
# because buffer truncation is disabled when not executing.
if before_prompt and not self._executing:
diff = cursor.position() - start_pos
self._append_before_prompt_pos += diff
self._prompt_pos += diff
return result | [
"def",
"_append_custom",
"(",
"self",
",",
"insert",
",",
"input",
",",
"before_prompt",
"=",
"False",
")",
":",
"# Determine where to insert the content.",
"cursor",
"=",
"self",
".",
"_control",
".",
"textCursor",
"(",
")",
"if",
"before_prompt",
"and",
"(",
"self",
".",
"_reading",
"or",
"not",
"self",
".",
"_executing",
")",
":",
"cursor",
".",
"setPosition",
"(",
"self",
".",
"_append_before_prompt_pos",
")",
"else",
":",
"cursor",
".",
"movePosition",
"(",
"QtGui",
".",
"QTextCursor",
".",
"End",
")",
"start_pos",
"=",
"cursor",
".",
"position",
"(",
")",
"# Perform the insertion.",
"result",
"=",
"insert",
"(",
"cursor",
",",
"input",
")",
"# Adjust the prompt position if we have inserted before it. This is safe",
"# because buffer truncation is disabled when not executing.",
"if",
"before_prompt",
"and",
"not",
"self",
".",
"_executing",
":",
"diff",
"=",
"cursor",
".",
"position",
"(",
")",
"-",
"start_pos",
"self",
".",
"_append_before_prompt_pos",
"+=",
"diff",
"self",
".",
"_prompt_pos",
"+=",
"diff",
"return",
"result"
] | A low-level method for appending content to the end of the buffer.
If 'before_prompt' is enabled, the content will be inserted before the
current prompt, if there is one. | [
"A",
"low",
"-",
"level",
"method",
"for",
"appending",
"content",
"to",
"the",
"end",
"of",
"the",
"buffer",
"."
] | python | test |
crcresearch/py-utils | crc_nd/utils/file_io.py | https://github.com/crcresearch/py-utils/blob/04caf0425a047baf900da726cf47c42413b0dd81/crc_nd/utils/file_io.py#L14-L23 | def clean_out_dir(directory):
"""
Delete all the files and subdirectories in a directory.
"""
if not isinstance(directory, path):
directory = path(directory)
for file_path in directory.files():
file_path.remove()
for dir_path in directory.dirs():
dir_path.rmtree() | [
"def",
"clean_out_dir",
"(",
"directory",
")",
":",
"if",
"not",
"isinstance",
"(",
"directory",
",",
"path",
")",
":",
"directory",
"=",
"path",
"(",
"directory",
")",
"for",
"file_path",
"in",
"directory",
".",
"files",
"(",
")",
":",
"file_path",
".",
"remove",
"(",
")",
"for",
"dir_path",
"in",
"directory",
".",
"dirs",
"(",
")",
":",
"dir_path",
".",
"rmtree",
"(",
")"
] | Delete all the files and subdirectories in a directory. | [
"Delete",
"all",
"the",
"files",
"and",
"subdirectories",
"in",
"a",
"directory",
"."
] | python | train |
RiotGames/cloud-inquisitor | backend/cloud_inquisitor/schema/resource.py | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/schema/resource.py#L72-L101 | def get(cls, resource_type):
"""Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
resource_type (str): Resource type name
Returns:
:obj:`ResourceType`
"""
if isinstance(resource_type, str):
obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type)
elif isinstance(resource_type, int):
obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type)
elif isinstance(resource_type, cls):
return resource_type
else:
obj = None
if not obj:
obj = cls()
obj.resource_type = resource_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj | [
"def",
"get",
"(",
"cls",
",",
"resource_type",
")",
":",
"if",
"isinstance",
"(",
"resource_type",
",",
"str",
")",
":",
"obj",
"=",
"getattr",
"(",
"db",
",",
"cls",
".",
"__name__",
")",
".",
"find_one",
"(",
"cls",
".",
"resource_type",
"==",
"resource_type",
")",
"elif",
"isinstance",
"(",
"resource_type",
",",
"int",
")",
":",
"obj",
"=",
"getattr",
"(",
"db",
",",
"cls",
".",
"__name__",
")",
".",
"find_one",
"(",
"cls",
".",
"resource_type_id",
"==",
"resource_type",
")",
"elif",
"isinstance",
"(",
"resource_type",
",",
"cls",
")",
":",
"return",
"resource_type",
"else",
":",
"obj",
"=",
"None",
"if",
"not",
"obj",
":",
"obj",
"=",
"cls",
"(",
")",
"obj",
".",
"resource_type",
"=",
"resource_type",
"db",
".",
"session",
".",
"add",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"db",
".",
"session",
".",
"refresh",
"(",
"obj",
")",
"return",
"obj"
] | Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
resource_type (str): Resource type name
Returns:
:obj:`ResourceType` | [
"Returns",
"the",
"ResourceType",
"object",
"for",
"resource_type",
".",
"If",
"no",
"existing",
"object",
"was",
"found",
"a",
"new",
"type",
"will",
"be",
"created",
"in",
"the",
"database",
"and",
"returned"
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/websitemanagementservice.py#L210-L237 | def get_historical_usage_metrics(self, webspace_name, website_name,
metrics = None, start_time=None, end_time=None, time_grain=None):
'''
Get historical usage metrics.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
metrics:
Optional. List of metrics name. Otherwise, all metrics returned.
start_time:
Optional. An ISO8601 date. Otherwise, current hour is used.
end_time:
Optional. An ISO8601 date. Otherwise, current time is used.
time_grain:
Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.
More information and metrics name at:
http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx
'''
metrics = ('names='+','.join(metrics)) if metrics else ''
start_time = ('StartTime='+start_time) if start_time else ''
end_time = ('EndTime='+end_time) if end_time else ''
time_grain = ('TimeGrain='+time_grain) if time_grain else ''
parameters = ('&'.join(v for v in (metrics, start_time, end_time, time_grain) if v))
parameters = '?'+parameters if parameters else ''
return self._perform_get(self._get_historical_usage_metrics_path(webspace_name, website_name) + parameters,
MetricResponses) | [
"def",
"get_historical_usage_metrics",
"(",
"self",
",",
"webspace_name",
",",
"website_name",
",",
"metrics",
"=",
"None",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"time_grain",
"=",
"None",
")",
":",
"metrics",
"=",
"(",
"'names='",
"+",
"','",
".",
"join",
"(",
"metrics",
")",
")",
"if",
"metrics",
"else",
"''",
"start_time",
"=",
"(",
"'StartTime='",
"+",
"start_time",
")",
"if",
"start_time",
"else",
"''",
"end_time",
"=",
"(",
"'EndTime='",
"+",
"end_time",
")",
"if",
"end_time",
"else",
"''",
"time_grain",
"=",
"(",
"'TimeGrain='",
"+",
"time_grain",
")",
"if",
"time_grain",
"else",
"''",
"parameters",
"=",
"(",
"'&'",
".",
"join",
"(",
"v",
"for",
"v",
"in",
"(",
"metrics",
",",
"start_time",
",",
"end_time",
",",
"time_grain",
")",
"if",
"v",
")",
")",
"parameters",
"=",
"'?'",
"+",
"parameters",
"if",
"parameters",
"else",
"''",
"return",
"self",
".",
"_perform_get",
"(",
"self",
".",
"_get_historical_usage_metrics_path",
"(",
"webspace_name",
",",
"website_name",
")",
"+",
"parameters",
",",
"MetricResponses",
")"
] | Get historical usage metrics.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
metrics:
Optional. List of metrics name. Otherwise, all metrics returned.
start_time:
Optional. An ISO8601 date. Otherwise, current hour is used.
end_time:
Optional. An ISO8601 date. Otherwise, current time is used.
time_grain:
Optional. A rollup name, as P1D. OTherwise, default rollup for the metrics is used.
More information and metrics name at:
http://msdn.microsoft.com/en-us/library/azure/dn166964.aspx | [
"Get",
"historical",
"usage",
"metrics",
"."
] | python | test |
databio/pypiper | pypiper/ngstk.py | https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/ngstk.py#L1426-L1440 | def calc_frip(self, input_bam, input_bed, threads=4):
"""
Calculate fraction of reads in peaks.
A file of with a pool of sequencing reads and a file with peak call
regions define the operation that will be performed. Thread count
for samtools can be specified as well.
:param str input_bam: sequencing reads file
:param str input_bed: file with called peak regions
:param int threads: number of threads samtools may use
:return float: fraction of reads in peaks defined in given peaks file
"""
cmd = self.simple_frip(input_bam, input_bed, threads)
return subprocess.check_output(cmd.split(" "), shell=True) | [
"def",
"calc_frip",
"(",
"self",
",",
"input_bam",
",",
"input_bed",
",",
"threads",
"=",
"4",
")",
":",
"cmd",
"=",
"self",
".",
"simple_frip",
"(",
"input_bam",
",",
"input_bed",
",",
"threads",
")",
"return",
"subprocess",
".",
"check_output",
"(",
"cmd",
".",
"split",
"(",
"\" \"",
")",
",",
"shell",
"=",
"True",
")"
] | Calculate fraction of reads in peaks.
A file of with a pool of sequencing reads and a file with peak call
regions define the operation that will be performed. Thread count
for samtools can be specified as well.
:param str input_bam: sequencing reads file
:param str input_bed: file with called peak regions
:param int threads: number of threads samtools may use
:return float: fraction of reads in peaks defined in given peaks file | [
"Calculate",
"fraction",
"of",
"reads",
"in",
"peaks",
"."
] | python | train |
timothycrosley/deprecated.frosted | frosted/checker.py | https://github.com/timothycrosley/deprecated.frosted/blob/61ba7f341fc55676c3580c8c4e52117986cd5e12/frosted/checker.py#L716-L733 | def NAME(self, node):
"""Handle occurrence of Name (which can be a load/store/delete
access.)"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
self.handle_node_load(node)
if (node.id == 'locals' and isinstance(self.scope, FunctionScope)
and isinstance(node.parent, ast.Call)):
# we are doing locals() call in current scope
self.scope.uses_locals = True
elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
self.handle_node_store(node)
elif isinstance(node.ctx, ast.Del):
self.handle_node_delete(node)
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError("Got impossible expression context: %r" % (node.ctx,)) | [
"def",
"NAME",
"(",
"self",
",",
"node",
")",
":",
"# Locate the name in locals / function / globals scopes.",
"if",
"isinstance",
"(",
"node",
".",
"ctx",
",",
"(",
"ast",
".",
"Load",
",",
"ast",
".",
"AugLoad",
")",
")",
":",
"self",
".",
"handle_node_load",
"(",
"node",
")",
"if",
"(",
"node",
".",
"id",
"==",
"'locals'",
"and",
"isinstance",
"(",
"self",
".",
"scope",
",",
"FunctionScope",
")",
"and",
"isinstance",
"(",
"node",
".",
"parent",
",",
"ast",
".",
"Call",
")",
")",
":",
"# we are doing locals() call in current scope",
"self",
".",
"scope",
".",
"uses_locals",
"=",
"True",
"elif",
"isinstance",
"(",
"node",
".",
"ctx",
",",
"(",
"ast",
".",
"Store",
",",
"ast",
".",
"AugStore",
")",
")",
":",
"self",
".",
"handle_node_store",
"(",
"node",
")",
"elif",
"isinstance",
"(",
"node",
".",
"ctx",
",",
"ast",
".",
"Del",
")",
":",
"self",
".",
"handle_node_delete",
"(",
"node",
")",
"else",
":",
"# must be a Param context -- this only happens for names in function",
"# arguments, but these aren't dispatched through here",
"raise",
"RuntimeError",
"(",
"\"Got impossible expression context: %r\"",
"%",
"(",
"node",
".",
"ctx",
",",
")",
")"
] | Handle occurrence of Name (which can be a load/store/delete
access.) | [
"Handle",
"occurrence",
"of",
"Name",
"(",
"which",
"can",
"be",
"a",
"load",
"/",
"store",
"/",
"delete",
"access",
".",
")"
] | python | train |
Azure/blobxfer | blobxfer/operations/azure/__init__.py | https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/azure/__init__.py#L176-L185 | def endpoint(self, value):
# type: (StorageAccount, str) -> None
"""Set endpoint
:param StorageAccount self: this
:param str value: endpoint
"""
tmp = value.split('.')
if (len(tmp) <= 1 or not tmp[0].isalnum()):
raise ValueError('endpoint is invalid: {}'.format(value))
self._endpoint = value | [
"def",
"endpoint",
"(",
"self",
",",
"value",
")",
":",
"# type: (StorageAccount, str) -> None",
"tmp",
"=",
"value",
".",
"split",
"(",
"'.'",
")",
"if",
"(",
"len",
"(",
"tmp",
")",
"<=",
"1",
"or",
"not",
"tmp",
"[",
"0",
"]",
".",
"isalnum",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'endpoint is invalid: {}'",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_endpoint",
"=",
"value"
] | Set endpoint
:param StorageAccount self: this
:param str value: endpoint | [
"Set",
"endpoint",
":",
"param",
"StorageAccount",
"self",
":",
"this",
":",
"param",
"str",
"value",
":",
"endpoint"
] | python | train |
uogbuji/amara3-xml | pylib/uxml/writer.py | https://github.com/uogbuji/amara3-xml/blob/88c18876418cffc89bb85b4a3193e5002b6b39a6/pylib/uxml/writer.py#L178-L191 | def write(elem, a_writer):
'''
Write a MicroXML element node (yes, even one representign a whole document)
elem - Amara MicroXML element node to be written out
writer - instance of amara3.uxml.writer to implement the writing process
'''
a_writer.start_element(elem.xml_name, attribs=elem.xml_attributes)
for node in elem.xml_children:
if isinstance(node, tree.element):
write(node, a_writer)
elif isinstance(node, tree.text):
a_writer.text(node)
a_writer.end_element(elem.xml_name)
return | [
"def",
"write",
"(",
"elem",
",",
"a_writer",
")",
":",
"a_writer",
".",
"start_element",
"(",
"elem",
".",
"xml_name",
",",
"attribs",
"=",
"elem",
".",
"xml_attributes",
")",
"for",
"node",
"in",
"elem",
".",
"xml_children",
":",
"if",
"isinstance",
"(",
"node",
",",
"tree",
".",
"element",
")",
":",
"write",
"(",
"node",
",",
"a_writer",
")",
"elif",
"isinstance",
"(",
"node",
",",
"tree",
".",
"text",
")",
":",
"a_writer",
".",
"text",
"(",
"node",
")",
"a_writer",
".",
"end_element",
"(",
"elem",
".",
"xml_name",
")",
"return"
] | Write a MicroXML element node (yes, even one representign a whole document)
elem - Amara MicroXML element node to be written out
writer - instance of amara3.uxml.writer to implement the writing process | [
"Write",
"a",
"MicroXML",
"element",
"node",
"(",
"yes",
"even",
"one",
"representign",
"a",
"whole",
"document",
")",
"elem",
"-",
"Amara",
"MicroXML",
"element",
"node",
"to",
"be",
"written",
"out",
"writer",
"-",
"instance",
"of",
"amara3",
".",
"uxml",
".",
"writer",
"to",
"implement",
"the",
"writing",
"process"
] | python | test |
trickvi/economics | economics/inflation.py | https://github.com/trickvi/economics/blob/18da5ce7169472ca1ba6022272a389b933f76edd/economics/inflation.py#L54-L70 | def get(self, reference, country, target=datetime.date.today()):
"""
Get the inflation/deflation value change for the target date based
on the reference date. Target defaults to today and the instance's
reference and country will be used if they are not provided as
parameters
"""
# Set country & reference to object's country & reference respectively
reference = self.reference if reference is None else reference
# Get the reference and target indices (values) from the source
reference_value = self.data.get(reference, country).value
target_value = self.data.get(target, country).value
# Compute the inflation value and return it
return self._compute_inflation(target_value, reference_value) | [
"def",
"get",
"(",
"self",
",",
"reference",
",",
"country",
",",
"target",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
":",
"# Set country & reference to object's country & reference respectively",
"reference",
"=",
"self",
".",
"reference",
"if",
"reference",
"is",
"None",
"else",
"reference",
"# Get the reference and target indices (values) from the source",
"reference_value",
"=",
"self",
".",
"data",
".",
"get",
"(",
"reference",
",",
"country",
")",
".",
"value",
"target_value",
"=",
"self",
".",
"data",
".",
"get",
"(",
"target",
",",
"country",
")",
".",
"value",
"# Compute the inflation value and return it",
"return",
"self",
".",
"_compute_inflation",
"(",
"target_value",
",",
"reference_value",
")"
] | Get the inflation/deflation value change for the target date based
on the reference date. Target defaults to today and the instance's
reference and country will be used if they are not provided as
parameters | [
"Get",
"the",
"inflation",
"/",
"deflation",
"value",
"change",
"for",
"the",
"target",
"date",
"based",
"on",
"the",
"reference",
"date",
".",
"Target",
"defaults",
"to",
"today",
"and",
"the",
"instance",
"s",
"reference",
"and",
"country",
"will",
"be",
"used",
"if",
"they",
"are",
"not",
"provided",
"as",
"parameters"
] | python | train |
saltstack/salt | salt/roster/terraform.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/roster/terraform.py#L119-L125 | def _cast_output_to_type(value, typ):
'''cast the value depending on the terraform type'''
if typ == 'b':
return bool(value)
if typ == 'i':
return int(value)
return value | [
"def",
"_cast_output_to_type",
"(",
"value",
",",
"typ",
")",
":",
"if",
"typ",
"==",
"'b'",
":",
"return",
"bool",
"(",
"value",
")",
"if",
"typ",
"==",
"'i'",
":",
"return",
"int",
"(",
"value",
")",
"return",
"value"
] | cast the value depending on the terraform type | [
"cast",
"the",
"value",
"depending",
"on",
"the",
"terraform",
"type"
] | python | train |
StackStorm/pybind | pybind/nos/v6_0_2f/brocade_system_rpc/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_system_rpc/__init__.py#L97-L121 | def _set_get_system_uptime(self, v, load=False):
"""
Setter method for get_system_uptime, mapped from YANG variable /brocade_system_rpc/get_system_uptime (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_system_uptime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_system_uptime() directly.
YANG Description: This is a functions that returns time
since this managed entity was last re-initialized.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_system_uptime.get_system_uptime, is_leaf=True, yang_name="get-system-uptime", rest_name="get-system-uptime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show-sys-uptime'}}, namespace='urn:brocade.com:mgmt:brocade-system', defining_module='brocade-system', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_system_uptime must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_system_uptime.get_system_uptime, is_leaf=True, yang_name="get-system-uptime", rest_name="get-system-uptime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show-sys-uptime'}}, namespace='urn:brocade.com:mgmt:brocade-system', defining_module='brocade-system', yang_type='rpc', is_config=True)""",
})
self.__get_system_uptime = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_get_system_uptime",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"get_system_uptime",
".",
"get_system_uptime",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"get-system-uptime\"",
",",
"rest_name",
"=",
"\"get-system-uptime\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'hidden'",
":",
"u'rpccmd'",
",",
"u'actionpoint'",
":",
"u'show-sys-uptime'",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-system'",
",",
"defining_module",
"=",
"'brocade-system'",
",",
"yang_type",
"=",
"'rpc'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"get_system_uptime must be of a type compatible with rpc\"\"\"",
",",
"'defined-type'",
":",
"\"rpc\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=get_system_uptime.get_system_uptime, is_leaf=True, yang_name=\"get-system-uptime\", rest_name=\"get-system-uptime\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'show-sys-uptime'}}, namespace='urn:brocade.com:mgmt:brocade-system', defining_module='brocade-system', yang_type='rpc', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__get_system_uptime",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for get_system_uptime, mapped from YANG variable /brocade_system_rpc/get_system_uptime (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_system_uptime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_system_uptime() directly.
YANG Description: This is a functions that returns time
since this managed entity was last re-initialized. | [
"Setter",
"method",
"for",
"get_system_uptime",
"mapped",
"from",
"YANG",
"variable",
"/",
"brocade_system_rpc",
"/",
"get_system_uptime",
"(",
"rpc",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_get_system_uptime",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_get_system_uptime",
"()",
"directly",
"."
] | python | train |
hyperledger/sawtooth-core | validator/sawtooth_validator/execution/scheduler_parallel.py | https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/execution/scheduler_parallel.py#L91-L105 | def update(self, address, updater, prune=False):
'''
Walk to ADDRESS, creating nodes if necessary, and set the data
there to UPDATER(data).
Arguments:
address (str): the address to be updated
'''
node = self._get_or_create(address)
node.data = updater(node.data)
if prune:
node.children.clear() | [
"def",
"update",
"(",
"self",
",",
"address",
",",
"updater",
",",
"prune",
"=",
"False",
")",
":",
"node",
"=",
"self",
".",
"_get_or_create",
"(",
"address",
")",
"node",
".",
"data",
"=",
"updater",
"(",
"node",
".",
"data",
")",
"if",
"prune",
":",
"node",
".",
"children",
".",
"clear",
"(",
")"
] | Walk to ADDRESS, creating nodes if necessary, and set the data
there to UPDATER(data).
Arguments:
address (str): the address to be updated | [
"Walk",
"to",
"ADDRESS",
"creating",
"nodes",
"if",
"necessary",
"and",
"set",
"the",
"data",
"there",
"to",
"UPDATER",
"(",
"data",
")",
"."
] | python | train |
grahambell/pymoc | lib/pymoc/io/json.py | https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/io/json.py#L22-L39 | def write_moc_json(moc, filename=None, file=None):
"""Write a MOC in JSON encoding.
Either a filename, or an open file object can be specified.
"""
moc.normalize()
obj = {}
for (order, cells) in moc:
obj['{0}'.format(order)] = sorted(cells)
if file is not None:
_write_json(obj, file)
else:
with open(filename, 'wb') as f:
_write_json(obj, f) | [
"def",
"write_moc_json",
"(",
"moc",
",",
"filename",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"moc",
".",
"normalize",
"(",
")",
"obj",
"=",
"{",
"}",
"for",
"(",
"order",
",",
"cells",
")",
"in",
"moc",
":",
"obj",
"[",
"'{0}'",
".",
"format",
"(",
"order",
")",
"]",
"=",
"sorted",
"(",
"cells",
")",
"if",
"file",
"is",
"not",
"None",
":",
"_write_json",
"(",
"obj",
",",
"file",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"_write_json",
"(",
"obj",
",",
"f",
")"
] | Write a MOC in JSON encoding.
Either a filename, or an open file object can be specified. | [
"Write",
"a",
"MOC",
"in",
"JSON",
"encoding",
"."
] | python | train |
acutesoftware/AIKIF | scripts/run.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/run.py#L49-L60 | def start_aikif():
"""
starts the web interface and possibly other processes
"""
if sys.platform[0:3] == 'win':
os.system("start go_web_aikif.bat")
else:
os.system("../aikif/web_app/web_aikif.py")
import webbrowser
import time
time.sleep(1)
webbrowser.open('http://127.0.0.1:5000') | [
"def",
"start_aikif",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"[",
"0",
":",
"3",
"]",
"==",
"'win'",
":",
"os",
".",
"system",
"(",
"\"start go_web_aikif.bat\"",
")",
"else",
":",
"os",
".",
"system",
"(",
"\"../aikif/web_app/web_aikif.py\"",
")",
"import",
"webbrowser",
"import",
"time",
"time",
".",
"sleep",
"(",
"1",
")",
"webbrowser",
".",
"open",
"(",
"'http://127.0.0.1:5000'",
")"
] | starts the web interface and possibly other processes | [
"starts",
"the",
"web",
"interface",
"and",
"possibly",
"other",
"processes"
] | python | train |
edeposit/edeposit.amqp.storage | src/edeposit/amqp/storage/web_tools.py | https://github.com/edeposit/edeposit.amqp.storage/blob/fb6bd326249847de04b17b64e856c878665cea92/src/edeposit/amqp/storage/web_tools.py#L107-L124 | def compose_tree_url(tree, issn_url=False):
"""
Compose full url for given `tree`, with protocol, server's address and
port.
Args:
tree (obj): :class:`.Tree` instance.
issn_url (bool, default False): Compose URL using ISSN.
Returns:
str: URL of the tree
"""
url = compose_tree_path(tree, issn_url)
if WEB_PORT == 80:
return "%s://%s%s" % (_PROTOCOL, WEB_ADDR, url)
return "%s://%s:%d%s" % (_PROTOCOL, WEB_ADDR, WEB_PORT, url) | [
"def",
"compose_tree_url",
"(",
"tree",
",",
"issn_url",
"=",
"False",
")",
":",
"url",
"=",
"compose_tree_path",
"(",
"tree",
",",
"issn_url",
")",
"if",
"WEB_PORT",
"==",
"80",
":",
"return",
"\"%s://%s%s\"",
"%",
"(",
"_PROTOCOL",
",",
"WEB_ADDR",
",",
"url",
")",
"return",
"\"%s://%s:%d%s\"",
"%",
"(",
"_PROTOCOL",
",",
"WEB_ADDR",
",",
"WEB_PORT",
",",
"url",
")"
] | Compose full url for given `tree`, with protocol, server's address and
port.
Args:
tree (obj): :class:`.Tree` instance.
issn_url (bool, default False): Compose URL using ISSN.
Returns:
str: URL of the tree | [
"Compose",
"full",
"url",
"for",
"given",
"tree",
"with",
"protocol",
"server",
"s",
"address",
"and",
"port",
"."
] | python | train |
BenjaminSchubert/NitPycker | nitpycker/runner.py | https://github.com/BenjaminSchubert/NitPycker/blob/3ac2b3bf06f1d704b4853167a967311b0465a76f/nitpycker/runner.py#L182-L217 | def print_summary(self, result, time_taken):
"""
Prints the test summary, how many tests failed, how long it took, etc
:param result: result class to use to print summary
:param time_taken: the time all tests took to run
"""
if hasattr(result, "separator2"):
self.stream.writeln(result.separator2)
self.stream.writeln("Ran {number_of_tests} test{s} in {time:.3f}s\n".format(
number_of_tests=result.testsRun, s="s" if result.testsRun != 1 else "", time=time_taken
))
info = []
if not result.wasSuccessful():
self.stream.write("FAILED")
if result.failures:
info.append("failures={}".format(len(result.failures)))
if result.errors:
info.append("errors={}".format(len(result.errors)))
else:
self.stream.write("OK")
if result.skipped:
info.append("skipped={}".format(len(result.skipped)))
if result.expectedFailures:
info.append("expected failures={}".format(len(result.expectedFailures)))
if result.unexpectedSuccesses:
info.append("unexpected successes={}".format(len(result.unexpectedSuccesses)))
if info:
self.stream.writeln(" ({})".format(", ".join(info)))
else:
self.stream.write("\n") | [
"def",
"print_summary",
"(",
"self",
",",
"result",
",",
"time_taken",
")",
":",
"if",
"hasattr",
"(",
"result",
",",
"\"separator2\"",
")",
":",
"self",
".",
"stream",
".",
"writeln",
"(",
"result",
".",
"separator2",
")",
"self",
".",
"stream",
".",
"writeln",
"(",
"\"Ran {number_of_tests} test{s} in {time:.3f}s\\n\"",
".",
"format",
"(",
"number_of_tests",
"=",
"result",
".",
"testsRun",
",",
"s",
"=",
"\"s\"",
"if",
"result",
".",
"testsRun",
"!=",
"1",
"else",
"\"\"",
",",
"time",
"=",
"time_taken",
")",
")",
"info",
"=",
"[",
"]",
"if",
"not",
"result",
".",
"wasSuccessful",
"(",
")",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"FAILED\"",
")",
"if",
"result",
".",
"failures",
":",
"info",
".",
"append",
"(",
"\"failures={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"failures",
")",
")",
")",
"if",
"result",
".",
"errors",
":",
"info",
".",
"append",
"(",
"\"errors={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"errors",
")",
")",
")",
"else",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"OK\"",
")",
"if",
"result",
".",
"skipped",
":",
"info",
".",
"append",
"(",
"\"skipped={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"skipped",
")",
")",
")",
"if",
"result",
".",
"expectedFailures",
":",
"info",
".",
"append",
"(",
"\"expected failures={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"expectedFailures",
")",
")",
")",
"if",
"result",
".",
"unexpectedSuccesses",
":",
"info",
".",
"append",
"(",
"\"unexpected successes={}\"",
".",
"format",
"(",
"len",
"(",
"result",
".",
"unexpectedSuccesses",
")",
")",
")",
"if",
"info",
":",
"self",
".",
"stream",
".",
"writeln",
"(",
"\" ({})\"",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"info",
")",
")",
")",
"else",
":",
"self",
".",
"stream",
".",
"write",
"(",
"\"\\n\"",
")"
] | Prints the test summary, how many tests failed, how long it took, etc
:param result: result class to use to print summary
:param time_taken: the time all tests took to run | [
"Prints",
"the",
"test",
"summary",
"how",
"many",
"tests",
"failed",
"how",
"long",
"it",
"took",
"etc"
] | python | train |
wummel/dosage | dosagelib/events.py | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/events.py#L256-L264 | def getComicData(self, comic):
"""Return dictionary with comic info."""
if comic not in self.data:
if os.path.exists(self.jsonFn(comic)):
with codecs.open(self.jsonFn(comic), 'r', self.encoding) as f:
self.data[comic] = json.load(f)
else:
self.data[comic] = {'pages':{}}
return self.data[comic] | [
"def",
"getComicData",
"(",
"self",
",",
"comic",
")",
":",
"if",
"comic",
"not",
"in",
"self",
".",
"data",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"jsonFn",
"(",
"comic",
")",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"self",
".",
"jsonFn",
"(",
"comic",
")",
",",
"'r'",
",",
"self",
".",
"encoding",
")",
"as",
"f",
":",
"self",
".",
"data",
"[",
"comic",
"]",
"=",
"json",
".",
"load",
"(",
"f",
")",
"else",
":",
"self",
".",
"data",
"[",
"comic",
"]",
"=",
"{",
"'pages'",
":",
"{",
"}",
"}",
"return",
"self",
".",
"data",
"[",
"comic",
"]"
] | Return dictionary with comic info. | [
"Return",
"dictionary",
"with",
"comic",
"info",
"."
] | python | train |
ghukill/pyfc4 | pyfc4/models.py | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L269-L314 | def get_txn(self, txn_name, txn_uri):
'''
Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri]
'''
# parse uri
txn_uri = self.parse_uri(txn_uri)
# request new transaction
txn_response = self.api.http_request('GET',txn_uri, data=None, headers=None)
# if 200, transaction exists
if txn_response.status_code == 200:
logger.debug("transactoin found: %s" % txn_uri)
# init new Transaction, and pass Expires header
txn = Transaction(
self, # pass the repository
txn_name,
txn_uri,
expires = None)
# append to self
self.txns[txn_name] = txn
# return
return txn
# if 404, transaction does not exist
elif txn_response.status_code in [404, 410]:
logger.debug("transaction does not exist: %s" % txn_uri)
return False
else:
raise Exception('HTTP %s, could not retrieve transaction' % txn_response.status_code) | [
"def",
"get_txn",
"(",
"self",
",",
"txn_name",
",",
"txn_uri",
")",
":",
"# parse uri",
"txn_uri",
"=",
"self",
".",
"parse_uri",
"(",
"txn_uri",
")",
"# request new transaction",
"txn_response",
"=",
"self",
".",
"api",
".",
"http_request",
"(",
"'GET'",
",",
"txn_uri",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
"# if 200, transaction exists",
"if",
"txn_response",
".",
"status_code",
"==",
"200",
":",
"logger",
".",
"debug",
"(",
"\"transactoin found: %s\"",
"%",
"txn_uri",
")",
"# init new Transaction, and pass Expires header",
"txn",
"=",
"Transaction",
"(",
"self",
",",
"# pass the repository",
"txn_name",
",",
"txn_uri",
",",
"expires",
"=",
"None",
")",
"# append to self",
"self",
".",
"txns",
"[",
"txn_name",
"]",
"=",
"txn",
"# return",
"return",
"txn",
"# if 404, transaction does not exist",
"elif",
"txn_response",
".",
"status_code",
"in",
"[",
"404",
",",
"410",
"]",
":",
"logger",
".",
"debug",
"(",
"\"transaction does not exist: %s\"",
"%",
"txn_uri",
")",
"return",
"False",
"else",
":",
"raise",
"Exception",
"(",
"'HTTP %s, could not retrieve transaction'",
"%",
"txn_response",
".",
"status_code",
")"
] | Retrieves known transaction and adds to self.txns.
TODO:
Perhaps this should send a keep-alive request as well? Obviously still needed, and would reset timer.
Args:
txn_prefix (str, rdflib.term.URIRef): uri of the transaction. e.g. http://localhost:8080/rest/txn:123456789
txn_name (str): local, human name for transaction
Return:
(Transaction) local instance of transactions from self.txns[txn_uri] | [
"Retrieves",
"known",
"transaction",
"and",
"adds",
"to",
"self",
".",
"txns",
"."
] | python | train |
saltstack/salt | salt/modules/xml.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/xml.py#L66-L82 | def get_attribute(file, element):
'''
Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.attrib
except AttributeError:
log.error("Unable to find element matching %s", element)
return False | [
"def",
"get_attribute",
"(",
"file",
",",
"element",
")",
":",
"try",
":",
"root",
"=",
"ET",
".",
"parse",
"(",
"file",
")",
"element",
"=",
"root",
".",
"find",
"(",
"element",
")",
"return",
"element",
".",
"attrib",
"except",
"AttributeError",
":",
"log",
".",
"error",
"(",
"\"Unable to find element matching %s\"",
",",
"element",
")",
"return",
"False"
] | Return the attributes of the matched xpath element.
CLI Example:
.. code-block:: bash
salt '*' xml.get_attribute /tmp/test.xml ".//element[@id='3']" | [
"Return",
"the",
"attributes",
"of",
"the",
"matched",
"xpath",
"element",
"."
] | python | train |
jbasko/configmanager | configmanager/sections.py | https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/sections.py#L228-L244 | def get_item(self, *key):
"""
The recommended way of retrieving an item by key when extending configmanager's behaviour.
Attribute and dictionary key access is configurable and may not always return items
(see PlainConfig for example), whereas this method will always return the corresponding
Item as long as NOT_FOUND hook callbacks don't break this convention.
Args:
*key
Returns:
item (:class:`.Item`):
"""
item = self._get_item_or_section(key)
if not item.is_item:
raise RuntimeError('{} is a section, not an item'.format(key))
return item | [
"def",
"get_item",
"(",
"self",
",",
"*",
"key",
")",
":",
"item",
"=",
"self",
".",
"_get_item_or_section",
"(",
"key",
")",
"if",
"not",
"item",
".",
"is_item",
":",
"raise",
"RuntimeError",
"(",
"'{} is a section, not an item'",
".",
"format",
"(",
"key",
")",
")",
"return",
"item"
] | The recommended way of retrieving an item by key when extending configmanager's behaviour.
Attribute and dictionary key access is configurable and may not always return items
(see PlainConfig for example), whereas this method will always return the corresponding
Item as long as NOT_FOUND hook callbacks don't break this convention.
Args:
*key
Returns:
item (:class:`.Item`): | [
"The",
"recommended",
"way",
"of",
"retrieving",
"an",
"item",
"by",
"key",
"when",
"extending",
"configmanager",
"s",
"behaviour",
".",
"Attribute",
"and",
"dictionary",
"key",
"access",
"is",
"configurable",
"and",
"may",
"not",
"always",
"return",
"items",
"(",
"see",
"PlainConfig",
"for",
"example",
")",
"whereas",
"this",
"method",
"will",
"always",
"return",
"the",
"corresponding",
"Item",
"as",
"long",
"as",
"NOT_FOUND",
"hook",
"callbacks",
"don",
"t",
"break",
"this",
"convention",
"."
] | python | train |
pmacosta/pexdoc | pexdoc/pinspect.py | https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pinspect.py#L129-L156 | def get_module_name(module_obj):
r"""
Retrieve the module name from a module object.
:param module_obj: Module object
:type module_obj: object
:rtype: string
:raises:
* RuntimeError (Argument \`module_obj\` is not valid)
* RuntimeError (Module object \`*[module_name]*\` could not be found in
loaded modules)
For example:
>>> import pexdoc.pinspect
>>> pexdoc.pinspect.get_module_name(sys.modules['pexdoc.pinspect'])
'pexdoc.pinspect'
"""
if not is_object_module(module_obj):
raise RuntimeError("Argument `module_obj` is not valid")
name = module_obj.__name__
msg = "Module object `{name}` could not be found in loaded modules"
if name not in sys.modules:
raise RuntimeError(msg.format(name=name))
return name | [
"def",
"get_module_name",
"(",
"module_obj",
")",
":",
"if",
"not",
"is_object_module",
"(",
"module_obj",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Argument `module_obj` is not valid\"",
")",
"name",
"=",
"module_obj",
".",
"__name__",
"msg",
"=",
"\"Module object `{name}` could not be found in loaded modules\"",
"if",
"name",
"not",
"in",
"sys",
".",
"modules",
":",
"raise",
"RuntimeError",
"(",
"msg",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"return",
"name"
] | r"""
Retrieve the module name from a module object.
:param module_obj: Module object
:type module_obj: object
:rtype: string
:raises:
* RuntimeError (Argument \`module_obj\` is not valid)
* RuntimeError (Module object \`*[module_name]*\` could not be found in
loaded modules)
For example:
>>> import pexdoc.pinspect
>>> pexdoc.pinspect.get_module_name(sys.modules['pexdoc.pinspect'])
'pexdoc.pinspect' | [
"r",
"Retrieve",
"the",
"module",
"name",
"from",
"a",
"module",
"object",
"."
] | python | train |
apache/incubator-heron | heron/tools/tracker/src/python/handlers/metricsqueryhandler.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/handlers/metricsqueryhandler.py#L78-L120 | def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):
"""
Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
}
"""
query = Query(self.tracker)
metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)
# Parse the response
ret = {}
ret["starttime"] = start_time
ret["endtime"] = end_time
ret["timeline"] = []
for metric in metrics:
tl = {
"data": metric.timeline
}
if metric.instance:
tl["instance"] = metric.instance
ret["timeline"].append(tl)
raise tornado.gen.Return(ret) | [
"def",
"executeMetricsQuery",
"(",
"self",
",",
"tmaster",
",",
"queryString",
",",
"start_time",
",",
"end_time",
",",
"callback",
"=",
"None",
")",
":",
"query",
"=",
"Query",
"(",
"self",
".",
"tracker",
")",
"metrics",
"=",
"yield",
"query",
".",
"execute_query",
"(",
"tmaster",
",",
"queryString",
",",
"start_time",
",",
"end_time",
")",
"# Parse the response",
"ret",
"=",
"{",
"}",
"ret",
"[",
"\"starttime\"",
"]",
"=",
"start_time",
"ret",
"[",
"\"endtime\"",
"]",
"=",
"end_time",
"ret",
"[",
"\"timeline\"",
"]",
"=",
"[",
"]",
"for",
"metric",
"in",
"metrics",
":",
"tl",
"=",
"{",
"\"data\"",
":",
"metric",
".",
"timeline",
"}",
"if",
"metric",
".",
"instance",
":",
"tl",
"[",
"\"instance\"",
"]",
"=",
"metric",
".",
"instance",
"ret",
"[",
"\"timeline\"",
"]",
".",
"append",
"(",
"tl",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"ret",
")"
] | Get the specified metrics for the given query in this topology.
Returns the following dict on success:
{
"timeline": [{
"instance": <instance>,
"data": {
<start_time> : <numeric value>,
<start_time> : <numeric value>,
...
}
}, {
...
}, ...
"starttime": <numeric value>,
"endtime": <numeric value>,
},
Returns the following dict on failure:
{
"message": "..."
} | [
"Get",
"the",
"specified",
"metrics",
"for",
"the",
"given",
"query",
"in",
"this",
"topology",
".",
"Returns",
"the",
"following",
"dict",
"on",
"success",
":",
"{",
"timeline",
":",
"[",
"{",
"instance",
":",
"<instance",
">",
"data",
":",
"{",
"<start_time",
">",
":",
"<numeric",
"value",
">",
"<start_time",
">",
":",
"<numeric",
"value",
">",
"...",
"}",
"}",
"{",
"...",
"}",
"...",
"starttime",
":",
"<numeric",
"value",
">",
"endtime",
":",
"<numeric",
"value",
">",
"}"
] | python | valid |
slundberg/shap | shap/benchmark/metrics.py | https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L56-L90 | def local_accuracy(X, y, model_generator, method_name):
""" Local Accuracy
transform = "identity"
sort_order = 2
"""
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model, random_state):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name) | [
"def",
"local_accuracy",
"(",
"X",
",",
"y",
",",
"model_generator",
",",
"method_name",
")",
":",
"def",
"score_map",
"(",
"true",
",",
"pred",
")",
":",
"\"\"\" Converts local accuracy from % of standard deviation to numerical scores for coloring.\n \"\"\"",
"v",
"=",
"min",
"(",
"1.0",
",",
"np",
".",
"std",
"(",
"pred",
"-",
"true",
")",
"/",
"(",
"np",
".",
"std",
"(",
"true",
")",
"+",
"1e-8",
")",
")",
"if",
"v",
"<",
"1e-6",
":",
"return",
"1.0",
"elif",
"v",
"<",
"0.01",
":",
"return",
"0.9",
"elif",
"v",
"<",
"0.05",
":",
"return",
"0.75",
"elif",
"v",
"<",
"0.1",
":",
"return",
"0.6",
"elif",
"v",
"<",
"0.2",
":",
"return",
"0.4",
"elif",
"v",
"<",
"0.3",
":",
"return",
"0.3",
"elif",
"v",
"<",
"0.5",
":",
"return",
"0.2",
"elif",
"v",
"<",
"0.7",
":",
"return",
"0.1",
"else",
":",
"return",
"0.0",
"def",
"score_function",
"(",
"X_train",
",",
"X_test",
",",
"y_train",
",",
"y_test",
",",
"attr_function",
",",
"trained_model",
",",
"random_state",
")",
":",
"return",
"measures",
".",
"local_accuracy",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_function",
"(",
"X_test",
")",
",",
"model_generator",
",",
"score_map",
",",
"trained_model",
")",
"return",
"None",
",",
"__score_method",
"(",
"X",
",",
"y",
",",
"None",
",",
"model_generator",
",",
"score_function",
",",
"method_name",
")"
] | Local Accuracy
transform = "identity"
sort_order = 2 | [
"Local",
"Accuracy",
"transform",
"=",
"identity",
"sort_order",
"=",
"2"
] | python | train |
dossier/dossier.web | dossier/web/routes.py | https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/routes.py#L394-L412 | def v1_subfolder_list(request, response, kvlclient, fid):
'''Retrieves a list of subfolders in a folder for the current user.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of subfolder identifiers.
'''
fid = urllib.unquote(fid)
try:
return sorted(imap(attrgetter('name'),
ifilter(lambda it: it.is_folder(),
new_folders(kvlclient, request).list(fid))))
except KeyError:
response.status = 404
return [] | [
"def",
"v1_subfolder_list",
"(",
"request",
",",
"response",
",",
"kvlclient",
",",
"fid",
")",
":",
"fid",
"=",
"urllib",
".",
"unquote",
"(",
"fid",
")",
"try",
":",
"return",
"sorted",
"(",
"imap",
"(",
"attrgetter",
"(",
"'name'",
")",
",",
"ifilter",
"(",
"lambda",
"it",
":",
"it",
".",
"is_folder",
"(",
")",
",",
"new_folders",
"(",
"kvlclient",
",",
"request",
")",
".",
"list",
"(",
"fid",
")",
")",
")",
")",
"except",
"KeyError",
":",
"response",
".",
"status",
"=",
"404",
"return",
"[",
"]"
] | Retrieves a list of subfolders in a folder for the current user.
The route for this endpoint is:
``GET /dossier/v1/folder/<fid>/subfolder``.
(Temporarily, the "current user" can be set via the
``annotator_id`` query parameter.)
The payload returned is a list of subfolder identifiers. | [
"Retrieves",
"a",
"list",
"of",
"subfolders",
"in",
"a",
"folder",
"for",
"the",
"current",
"user",
"."
] | python | train |
ronaldguillen/wave | wave/views.py | https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L369-L386 | def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_permissions(request)
self.check_throttles(request)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
# Determine the API version, if versioning is in use.
version, scheme = self.determine_version(request, *args, **kwargs)
request.version, request.versioning_scheme = version, scheme | [
"def",
"initial",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"format_kwarg",
"=",
"self",
".",
"get_format_suffix",
"(",
"*",
"*",
"kwargs",
")",
"# Ensure that the incoming request is permitted",
"self",
".",
"perform_authentication",
"(",
"request",
")",
"self",
".",
"check_permissions",
"(",
"request",
")",
"self",
".",
"check_throttles",
"(",
"request",
")",
"# Perform content negotiation and store the accepted info on the request",
"neg",
"=",
"self",
".",
"perform_content_negotiation",
"(",
"request",
")",
"request",
".",
"accepted_renderer",
",",
"request",
".",
"accepted_media_type",
"=",
"neg",
"# Determine the API version, if versioning is in use.",
"version",
",",
"scheme",
"=",
"self",
".",
"determine_version",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"request",
".",
"version",
",",
"request",
".",
"versioning_scheme",
"=",
"version",
",",
"scheme"
] | Runs anything that needs to occur prior to calling the method handler. | [
"Runs",
"anything",
"that",
"needs",
"to",
"occur",
"prior",
"to",
"calling",
"the",
"method",
"handler",
"."
] | python | train |
saltstack/salt | salt/modules/restartcheck.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/restartcheck.py#L348-L357 | def _check_timeout(start_time, timeout):
'''
Name of the last installed kernel, for Red Hat based systems.
Returns:
List with name of last installed kernel as it is interpreted in output of `uname -a` command.
'''
timeout_milisec = timeout * 60000
if timeout_milisec < (int(round(time.time() * 1000)) - start_time):
raise salt.exceptions.TimeoutError('Timeout expired.') | [
"def",
"_check_timeout",
"(",
"start_time",
",",
"timeout",
")",
":",
"timeout_milisec",
"=",
"timeout",
"*",
"60000",
"if",
"timeout_milisec",
"<",
"(",
"int",
"(",
"round",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
")",
"-",
"start_time",
")",
":",
"raise",
"salt",
".",
"exceptions",
".",
"TimeoutError",
"(",
"'Timeout expired.'",
")"
] | Name of the last installed kernel, for Red Hat based systems.
Returns:
List with name of last installed kernel as it is interpreted in output of `uname -a` command. | [
"Name",
"of",
"the",
"last",
"installed",
"kernel",
"for",
"Red",
"Hat",
"based",
"systems",
"."
] | python | train |
yhat/db.py | db/db.py | https://github.com/yhat/db.py/blob/df2dbb8ef947c2d4253d31f29eb58c4084daffc5/db/db.py#L279-L305 | def load_credentials(self, profile="default"):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not
to say this a secure way to store sensitive data, but it will probably
stop your little sister from stealing your passwords.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
"""
f = profile_path(DBPY_PROFILE_ID, profile)
if f:
creds = load_from_json(f)
self.username = creds.get('username')
self.password = creds.get('password')
self.hostname = creds.get('hostname')
self.port = creds.get('port')
self.filename = creds.get('filename')
self.dbname = creds.get('dbname')
self.dbtype = creds.get('dbtype')
self.schemas = creds.get('schemas')
self.limit = creds.get('limit')
self.keys_per_column = creds.get('keys_per_column')
else:
raise Exception("Credentials not configured!") | [
"def",
"load_credentials",
"(",
"self",
",",
"profile",
"=",
"\"default\"",
")",
":",
"f",
"=",
"profile_path",
"(",
"DBPY_PROFILE_ID",
",",
"profile",
")",
"if",
"f",
":",
"creds",
"=",
"load_from_json",
"(",
"f",
")",
"self",
".",
"username",
"=",
"creds",
".",
"get",
"(",
"'username'",
")",
"self",
".",
"password",
"=",
"creds",
".",
"get",
"(",
"'password'",
")",
"self",
".",
"hostname",
"=",
"creds",
".",
"get",
"(",
"'hostname'",
")",
"self",
".",
"port",
"=",
"creds",
".",
"get",
"(",
"'port'",
")",
"self",
".",
"filename",
"=",
"creds",
".",
"get",
"(",
"'filename'",
")",
"self",
".",
"dbname",
"=",
"creds",
".",
"get",
"(",
"'dbname'",
")",
"self",
".",
"dbtype",
"=",
"creds",
".",
"get",
"(",
"'dbtype'",
")",
"self",
".",
"schemas",
"=",
"creds",
".",
"get",
"(",
"'schemas'",
")",
"self",
".",
"limit",
"=",
"creds",
".",
"get",
"(",
"'limit'",
")",
"self",
".",
"keys_per_column",
"=",
"creds",
".",
"get",
"(",
"'keys_per_column'",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Credentials not configured!\"",
")"
] | Loads crentials for a given profile. Profiles are stored in
~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not
to say this a secure way to store sensitive data, but it will probably
stop your little sister from stealing your passwords.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod") | [
"Loads",
"crentials",
"for",
"a",
"given",
"profile",
".",
"Profiles",
"are",
"stored",
"in",
"~",
"/",
".",
"db",
".",
"py_",
"{",
"profile_name",
"}",
"and",
"are",
"a",
"base64",
"encoded",
"JSON",
"file",
".",
"This",
"is",
"not",
"to",
"say",
"this",
"a",
"secure",
"way",
"to",
"store",
"sensitive",
"data",
"but",
"it",
"will",
"probably",
"stop",
"your",
"little",
"sister",
"from",
"stealing",
"your",
"passwords",
"."
] | python | train |
gem/oq-engine | openquake/commonlib/source.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/source.py#L419-L424 | def get_weight(self, weight=operator.attrgetter('weight')):
"""
:param weight: source weight function
:returns: total weight of the source model
"""
return sum(weight(src) for src in self.get_sources()) | [
"def",
"get_weight",
"(",
"self",
",",
"weight",
"=",
"operator",
".",
"attrgetter",
"(",
"'weight'",
")",
")",
":",
"return",
"sum",
"(",
"weight",
"(",
"src",
")",
"for",
"src",
"in",
"self",
".",
"get_sources",
"(",
")",
")"
] | :param weight: source weight function
:returns: total weight of the source model | [
":",
"param",
"weight",
":",
"source",
"weight",
"function",
":",
"returns",
":",
"total",
"weight",
"of",
"the",
"source",
"model"
] | python | train |
rkhleics/wagtailmenus | wagtailmenus/models/menus.py | https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/models/menus.py#L385-L412 | def get_context_data(self, **kwargs):
"""
Return a dictionary containing all of the values needed to render the
menu instance to a template, including values that might be used by
the 'sub_menu' tag to render any additional levels.
"""
ctx_vals = self._contextual_vals
opt_vals = self._option_vals
data = self.create_dict_from_parent_context()
data.update(ctx_vals._asdict())
data.update({
'apply_active_classes': opt_vals.apply_active_classes,
'allow_repeating_parents': opt_vals.allow_repeating_parents,
'use_absolute_page_urls': opt_vals.use_absolute_page_urls,
'max_levels': self.max_levels,
'use_specific': self.use_specific,
'menu_instance': self,
self.menu_instance_context_name: self,
# Repeat some vals with backwards-compatible keys
'section_root': data['current_section_root_page'],
'current_ancestor_ids': data['current_page_ancestor_ids'],
})
if not ctx_vals.original_menu_instance and ctx_vals.current_level == 1:
data['original_menu_instance'] = self
if 'menu_items' not in kwargs:
data['menu_items'] = self.get_menu_items_for_rendering()
data.update(kwargs)
return data | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"ctx_vals",
"=",
"self",
".",
"_contextual_vals",
"opt_vals",
"=",
"self",
".",
"_option_vals",
"data",
"=",
"self",
".",
"create_dict_from_parent_context",
"(",
")",
"data",
".",
"update",
"(",
"ctx_vals",
".",
"_asdict",
"(",
")",
")",
"data",
".",
"update",
"(",
"{",
"'apply_active_classes'",
":",
"opt_vals",
".",
"apply_active_classes",
",",
"'allow_repeating_parents'",
":",
"opt_vals",
".",
"allow_repeating_parents",
",",
"'use_absolute_page_urls'",
":",
"opt_vals",
".",
"use_absolute_page_urls",
",",
"'max_levels'",
":",
"self",
".",
"max_levels",
",",
"'use_specific'",
":",
"self",
".",
"use_specific",
",",
"'menu_instance'",
":",
"self",
",",
"self",
".",
"menu_instance_context_name",
":",
"self",
",",
"# Repeat some vals with backwards-compatible keys",
"'section_root'",
":",
"data",
"[",
"'current_section_root_page'",
"]",
",",
"'current_ancestor_ids'",
":",
"data",
"[",
"'current_page_ancestor_ids'",
"]",
",",
"}",
")",
"if",
"not",
"ctx_vals",
".",
"original_menu_instance",
"and",
"ctx_vals",
".",
"current_level",
"==",
"1",
":",
"data",
"[",
"'original_menu_instance'",
"]",
"=",
"self",
"if",
"'menu_items'",
"not",
"in",
"kwargs",
":",
"data",
"[",
"'menu_items'",
"]",
"=",
"self",
".",
"get_menu_items_for_rendering",
"(",
")",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"data"
] | Return a dictionary containing all of the values needed to render the
menu instance to a template, including values that might be used by
the 'sub_menu' tag to render any additional levels. | [
"Return",
"a",
"dictionary",
"containing",
"all",
"of",
"the",
"values",
"needed",
"to",
"render",
"the",
"menu",
"instance",
"to",
"a",
"template",
"including",
"values",
"that",
"might",
"be",
"used",
"by",
"the",
"sub_menu",
"tag",
"to",
"render",
"any",
"additional",
"levels",
"."
] | python | train |
tensorforce/tensorforce | tensorforce/core/preprocessors/preprocessor.py | https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/core/preprocessors/preprocessor.py#L139-L150 | def processed_shape(self, shape):
"""
Shape of preprocessed state given original shape.
Args:
shape: original state shape
Returns: processed state shape
"""
for processor in self.preprocessors:
shape = processor.processed_shape(shape=shape)
return shape | [
"def",
"processed_shape",
"(",
"self",
",",
"shape",
")",
":",
"for",
"processor",
"in",
"self",
".",
"preprocessors",
":",
"shape",
"=",
"processor",
".",
"processed_shape",
"(",
"shape",
"=",
"shape",
")",
"return",
"shape"
] | Shape of preprocessed state given original shape.
Args:
shape: original state shape
Returns: processed state shape | [
"Shape",
"of",
"preprocessed",
"state",
"given",
"original",
"shape",
"."
] | python | valid |
knipknap/SpiffWorkflow | SpiffWorkflow/bpmn/parser/TaskParser.py | https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/parser/TaskParser.py#L58-L129 | def parse_node(self):
"""
Parse this node, and all children, returning the connected task spec.
"""
try:
self.task = self.create_task()
self.task.documentation = self.parser._parse_documentation(
self.node, xpath=self.xpath, task_parser=self)
boundary_event_nodes = self.process_xpath(
'.//bpmn:boundaryEvent[@attachedToRef="%s"]' % self.get_id())
if boundary_event_nodes:
parent_task = _BoundaryEventParent(
self.spec, '%s.BoundaryEventParent' % self.get_id(),
self.task, lane=self.task.lane)
self.process_parser.parsed_nodes[
self.node.get('id')] = parent_task
parent_task.connect_outgoing(
self.task, '%s.FromBoundaryEventParent' % self.get_id(),
None, None)
for boundary_event in boundary_event_nodes:
b = self.process_parser.parse_node(boundary_event)
parent_task.connect_outgoing(
b,
'%s.FromBoundaryEventParent' % boundary_event.get(
'id'),
None, None)
else:
self.process_parser.parsed_nodes[
self.node.get('id')] = self.task
children = []
outgoing = self.process_xpath(
'.//bpmn:sequenceFlow[@sourceRef="%s"]' % self.get_id())
if len(outgoing) > 1 and not self.handles_multiple_outgoing():
raise ValidationException(
'Multiple outgoing flows are not supported for '
'tasks of type',
node=self.node,
filename=self.process_parser.filename)
for sequence_flow in outgoing:
target_ref = sequence_flow.get('targetRef')
target_node = one(
self.process_xpath('.//*[@id="%s"]' % target_ref))
c = self.process_parser.parse_node(target_node)
children.append((c, target_node, sequence_flow))
if children:
default_outgoing = self.node.get('default')
if not default_outgoing:
(c, target_node, sequence_flow) = children[0]
default_outgoing = sequence_flow.get('id')
for (c, target_node, sequence_flow) in children:
self.connect_outgoing(
c, target_node, sequence_flow,
sequence_flow.get('id') == default_outgoing)
return parent_task if boundary_event_nodes else self.task
except ValidationException:
raise
except Exception as ex:
exc_info = sys.exc_info()
tb = "".join(traceback.format_exception(
exc_info[0], exc_info[1], exc_info[2]))
LOG.error("%r\n%s", ex, tb)
raise ValidationException(
"%r" % (ex), node=self.node,
filename=self.process_parser.filename) | [
"def",
"parse_node",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"task",
"=",
"self",
".",
"create_task",
"(",
")",
"self",
".",
"task",
".",
"documentation",
"=",
"self",
".",
"parser",
".",
"_parse_documentation",
"(",
"self",
".",
"node",
",",
"xpath",
"=",
"self",
".",
"xpath",
",",
"task_parser",
"=",
"self",
")",
"boundary_event_nodes",
"=",
"self",
".",
"process_xpath",
"(",
"'.//bpmn:boundaryEvent[@attachedToRef=\"%s\"]'",
"%",
"self",
".",
"get_id",
"(",
")",
")",
"if",
"boundary_event_nodes",
":",
"parent_task",
"=",
"_BoundaryEventParent",
"(",
"self",
".",
"spec",
",",
"'%s.BoundaryEventParent'",
"%",
"self",
".",
"get_id",
"(",
")",
",",
"self",
".",
"task",
",",
"lane",
"=",
"self",
".",
"task",
".",
"lane",
")",
"self",
".",
"process_parser",
".",
"parsed_nodes",
"[",
"self",
".",
"node",
".",
"get",
"(",
"'id'",
")",
"]",
"=",
"parent_task",
"parent_task",
".",
"connect_outgoing",
"(",
"self",
".",
"task",
",",
"'%s.FromBoundaryEventParent'",
"%",
"self",
".",
"get_id",
"(",
")",
",",
"None",
",",
"None",
")",
"for",
"boundary_event",
"in",
"boundary_event_nodes",
":",
"b",
"=",
"self",
".",
"process_parser",
".",
"parse_node",
"(",
"boundary_event",
")",
"parent_task",
".",
"connect_outgoing",
"(",
"b",
",",
"'%s.FromBoundaryEventParent'",
"%",
"boundary_event",
".",
"get",
"(",
"'id'",
")",
",",
"None",
",",
"None",
")",
"else",
":",
"self",
".",
"process_parser",
".",
"parsed_nodes",
"[",
"self",
".",
"node",
".",
"get",
"(",
"'id'",
")",
"]",
"=",
"self",
".",
"task",
"children",
"=",
"[",
"]",
"outgoing",
"=",
"self",
".",
"process_xpath",
"(",
"'.//bpmn:sequenceFlow[@sourceRef=\"%s\"]'",
"%",
"self",
".",
"get_id",
"(",
")",
")",
"if",
"len",
"(",
"outgoing",
")",
">",
"1",
"and",
"not",
"self",
".",
"handles_multiple_outgoing",
"(",
")",
":",
"raise",
"ValidationException",
"(",
"'Multiple outgoing flows are not supported for '",
"'tasks of type'",
",",
"node",
"=",
"self",
".",
"node",
",",
"filename",
"=",
"self",
".",
"process_parser",
".",
"filename",
")",
"for",
"sequence_flow",
"in",
"outgoing",
":",
"target_ref",
"=",
"sequence_flow",
".",
"get",
"(",
"'targetRef'",
")",
"target_node",
"=",
"one",
"(",
"self",
".",
"process_xpath",
"(",
"'.//*[@id=\"%s\"]'",
"%",
"target_ref",
")",
")",
"c",
"=",
"self",
".",
"process_parser",
".",
"parse_node",
"(",
"target_node",
")",
"children",
".",
"append",
"(",
"(",
"c",
",",
"target_node",
",",
"sequence_flow",
")",
")",
"if",
"children",
":",
"default_outgoing",
"=",
"self",
".",
"node",
".",
"get",
"(",
"'default'",
")",
"if",
"not",
"default_outgoing",
":",
"(",
"c",
",",
"target_node",
",",
"sequence_flow",
")",
"=",
"children",
"[",
"0",
"]",
"default_outgoing",
"=",
"sequence_flow",
".",
"get",
"(",
"'id'",
")",
"for",
"(",
"c",
",",
"target_node",
",",
"sequence_flow",
")",
"in",
"children",
":",
"self",
".",
"connect_outgoing",
"(",
"c",
",",
"target_node",
",",
"sequence_flow",
",",
"sequence_flow",
".",
"get",
"(",
"'id'",
")",
"==",
"default_outgoing",
")",
"return",
"parent_task",
"if",
"boundary_event_nodes",
"else",
"self",
".",
"task",
"except",
"ValidationException",
":",
"raise",
"except",
"Exception",
"as",
"ex",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"tb",
"=",
"\"\"",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"exc_info",
"[",
"0",
"]",
",",
"exc_info",
"[",
"1",
"]",
",",
"exc_info",
"[",
"2",
"]",
")",
")",
"LOG",
".",
"error",
"(",
"\"%r\\n%s\"",
",",
"ex",
",",
"tb",
")",
"raise",
"ValidationException",
"(",
"\"%r\"",
"%",
"(",
"ex",
")",
",",
"node",
"=",
"self",
".",
"node",
",",
"filename",
"=",
"self",
".",
"process_parser",
".",
"filename",
")"
] | Parse this node, and all children, returning the connected task spec. | [
"Parse",
"this",
"node",
"and",
"all",
"children",
"returning",
"the",
"connected",
"task",
"spec",
"."
] | python | valid |
wiheto/teneto | teneto/communitydetection/tctc.py | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/tctc.py#L8-L114 | def partition_inference(tctc_mat, comp, tau, sigma, kappa):
r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops.
"""
communityinfo = {}
communityinfo['community'] = []
communityinfo['start'] = np.empty(0)
communityinfo['end'] = np.empty(0)
communityinfo['size'] = np.empty(0)
for i, tcomp in enumerate(comp):
# This can go in parallel loop
if len(tcomp) > 0:
for traj in tcomp:
# Check it does not already exist.
ignore = 0
preexisting = 0
if i != 0:
cutoff = i-1-kappa
if cutoff < 0:
cutoff = 0
if np.any(np.sum(np.sum(tctc_mat[traj, :, cutoff:i][:, traj], axis=0), axis=0) == np.power(len(traj), 2)):
# Make sure that a small trajectory could exist
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if traj == communityinfo['community'][checknode]:
ignore = 1
if ignore == 0:
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if set(communityinfo['community'][checknode]).issuperset(traj):
preexisting = 1
if ignore == 0:
# Check how long it continues
# For efficiency, increase in blocks
approxmaxlength = tau*2
a = np.sum(
np.sum(tctc_mat[traj, :, i:i+approxmaxlength][:, traj], axis=0), axis=0)
if len(traj)*len(traj)*approxmaxlength == a.sum():
ok = 0
ii = 1
while ok == 0:
b = np.sum(np.sum(
tctc_mat[traj, :, i+(approxmaxlength*ii):i+(approxmaxlength*(ii+1))][:, traj], axis=0), axis=0)
a = np.append(a, b)
if len(traj)*len(traj)*approxmaxlength != b.sum():
ok = 1
if i+(approxmaxlength*(ii+1)) > tctc_mat.shape[-1]:
ok = 1
ii += 1
a = np.where(a == np.power(len(traj), 2))[0]
# Add an additional value that is false in case end of time series
if len(a) == 1:
stopind = i + 1
else:
a = np.append(a, a.max()+kappa+2)
# Find the stop index (if stopind = 4 and start = 0, then tctc_mat[:,:,start:stopind]==1)
stopind = i + np.split(a, np.where(
np.diff(a) > kappa+1)[0]+1)[0][-1] + 1
# Add trajectory to dictionary
if ((stopind - i) >= tau or preexisting == 1) and len(traj) >= sigma:
communityinfo['community'].append(sorted(traj))
communityinfo['start'] = np.append(communityinfo['start'], int(i))
communityinfo['end'] = np.append(
communityinfo['end'], int(stopind))
communityinfo['size'] = np.append(communityinfo['size'], len(traj))
communityinfo = pd.DataFrame(communityinfo)
communityinfo['start'] = communityinfo['start'].astype(int)
communityinfo['end'] = communityinfo['end'].astype(int)
# First check that there is not already a trajectory that is ongoing
badrows = []
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if u[1]['start'] > v[1]['start'] and sorted(u[1]['community']) == sorted(v[1]['community']):
badrows.append(u[0])
communityinfo = communityinfo.drop(badrows)
# Then see if any subset trajectory can be placed earlier in time.
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] <= v[1]['start']) & (
communityinfo['end']+kappa >= v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if set(u[1]['community']).issuperset(v[1]['community']):
communityinfo.loc[v[0], 'start'] = u[1]['start']
# It is possible to make the condition below effective_length
communityinfo['length'] = np.array(communityinfo['end']) - np.array(communityinfo['start'])
communityinfo = communityinfo[communityinfo['length'] >= tau]
communityinfo = communityinfo[communityinfo['size'] >= sigma]
# Make sure that the traj is not completely enguled by another
badrows = []
if kappa > 0:
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end']) & (
communityinfo['start'] < v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
if set(v[1]['community']).issubset(u[1]['community']):
badrows.append(v[0])
communityinfo = communityinfo.drop(badrows)
return communityinfo | [
"def",
"partition_inference",
"(",
"tctc_mat",
",",
"comp",
",",
"tau",
",",
"sigma",
",",
"kappa",
")",
":",
"communityinfo",
"=",
"{",
"}",
"communityinfo",
"[",
"'community'",
"]",
"=",
"[",
"]",
"communityinfo",
"[",
"'start'",
"]",
"=",
"np",
".",
"empty",
"(",
"0",
")",
"communityinfo",
"[",
"'end'",
"]",
"=",
"np",
".",
"empty",
"(",
"0",
")",
"communityinfo",
"[",
"'size'",
"]",
"=",
"np",
".",
"empty",
"(",
"0",
")",
"for",
"i",
",",
"tcomp",
"in",
"enumerate",
"(",
"comp",
")",
":",
"# This can go in parallel loop",
"if",
"len",
"(",
"tcomp",
")",
">",
"0",
":",
"for",
"traj",
"in",
"tcomp",
":",
"# Check it does not already exist.",
"ignore",
"=",
"0",
"preexisting",
"=",
"0",
"if",
"i",
"!=",
"0",
":",
"cutoff",
"=",
"i",
"-",
"1",
"-",
"kappa",
"if",
"cutoff",
"<",
"0",
":",
"cutoff",
"=",
"0",
"if",
"np",
".",
"any",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"sum",
"(",
"tctc_mat",
"[",
"traj",
",",
":",
",",
"cutoff",
":",
"i",
"]",
"[",
":",
",",
"traj",
"]",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
"==",
"np",
".",
"power",
"(",
"len",
"(",
"traj",
")",
",",
"2",
")",
")",
":",
"# Make sure that a small trajectory could exist ",
"for",
"checknode",
"in",
"np",
".",
"where",
"(",
"communityinfo",
"[",
"'end'",
"]",
">=",
"cutoff",
")",
"[",
"0",
"]",
":",
"if",
"traj",
"==",
"communityinfo",
"[",
"'community'",
"]",
"[",
"checknode",
"]",
":",
"ignore",
"=",
"1",
"if",
"ignore",
"==",
"0",
":",
"for",
"checknode",
"in",
"np",
".",
"where",
"(",
"communityinfo",
"[",
"'end'",
"]",
">=",
"cutoff",
")",
"[",
"0",
"]",
":",
"if",
"set",
"(",
"communityinfo",
"[",
"'community'",
"]",
"[",
"checknode",
"]",
")",
".",
"issuperset",
"(",
"traj",
")",
":",
"preexisting",
"=",
"1",
"if",
"ignore",
"==",
"0",
":",
"# Check how long it continues",
"# For efficiency, increase in blocks",
"approxmaxlength",
"=",
"tau",
"*",
"2",
"a",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"sum",
"(",
"tctc_mat",
"[",
"traj",
",",
":",
",",
"i",
":",
"i",
"+",
"approxmaxlength",
"]",
"[",
":",
",",
"traj",
"]",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
"if",
"len",
"(",
"traj",
")",
"*",
"len",
"(",
"traj",
")",
"*",
"approxmaxlength",
"==",
"a",
".",
"sum",
"(",
")",
":",
"ok",
"=",
"0",
"ii",
"=",
"1",
"while",
"ok",
"==",
"0",
":",
"b",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"sum",
"(",
"tctc_mat",
"[",
"traj",
",",
":",
",",
"i",
"+",
"(",
"approxmaxlength",
"*",
"ii",
")",
":",
"i",
"+",
"(",
"approxmaxlength",
"*",
"(",
"ii",
"+",
"1",
")",
")",
"]",
"[",
":",
",",
"traj",
"]",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
"a",
"=",
"np",
".",
"append",
"(",
"a",
",",
"b",
")",
"if",
"len",
"(",
"traj",
")",
"*",
"len",
"(",
"traj",
")",
"*",
"approxmaxlength",
"!=",
"b",
".",
"sum",
"(",
")",
":",
"ok",
"=",
"1",
"if",
"i",
"+",
"(",
"approxmaxlength",
"*",
"(",
"ii",
"+",
"1",
")",
")",
">",
"tctc_mat",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"ok",
"=",
"1",
"ii",
"+=",
"1",
"a",
"=",
"np",
".",
"where",
"(",
"a",
"==",
"np",
".",
"power",
"(",
"len",
"(",
"traj",
")",
",",
"2",
")",
")",
"[",
"0",
"]",
"# Add an additional value that is false in case end of time series",
"if",
"len",
"(",
"a",
")",
"==",
"1",
":",
"stopind",
"=",
"i",
"+",
"1",
"else",
":",
"a",
"=",
"np",
".",
"append",
"(",
"a",
",",
"a",
".",
"max",
"(",
")",
"+",
"kappa",
"+",
"2",
")",
"# Find the stop index (if stopind = 4 and start = 0, then tctc_mat[:,:,start:stopind]==1)",
"stopind",
"=",
"i",
"+",
"np",
".",
"split",
"(",
"a",
",",
"np",
".",
"where",
"(",
"np",
".",
"diff",
"(",
"a",
")",
">",
"kappa",
"+",
"1",
")",
"[",
"0",
"]",
"+",
"1",
")",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"+",
"1",
"# Add trajectory to dictionary",
"if",
"(",
"(",
"stopind",
"-",
"i",
")",
">=",
"tau",
"or",
"preexisting",
"==",
"1",
")",
"and",
"len",
"(",
"traj",
")",
">=",
"sigma",
":",
"communityinfo",
"[",
"'community'",
"]",
".",
"append",
"(",
"sorted",
"(",
"traj",
")",
")",
"communityinfo",
"[",
"'start'",
"]",
"=",
"np",
".",
"append",
"(",
"communityinfo",
"[",
"'start'",
"]",
",",
"int",
"(",
"i",
")",
")",
"communityinfo",
"[",
"'end'",
"]",
"=",
"np",
".",
"append",
"(",
"communityinfo",
"[",
"'end'",
"]",
",",
"int",
"(",
"stopind",
")",
")",
"communityinfo",
"[",
"'size'",
"]",
"=",
"np",
".",
"append",
"(",
"communityinfo",
"[",
"'size'",
"]",
",",
"len",
"(",
"traj",
")",
")",
"communityinfo",
"=",
"pd",
".",
"DataFrame",
"(",
"communityinfo",
")",
"communityinfo",
"[",
"'start'",
"]",
"=",
"communityinfo",
"[",
"'start'",
"]",
".",
"astype",
"(",
"int",
")",
"communityinfo",
"[",
"'end'",
"]",
"=",
"communityinfo",
"[",
"'end'",
"]",
".",
"astype",
"(",
"int",
")",
"# First check that there is not already a trajectory that is ongoing",
"badrows",
"=",
"[",
"]",
"for",
"v",
"in",
"communityinfo",
".",
"iterrows",
"(",
")",
":",
"skipselrule",
"=",
"(",
"communityinfo",
"[",
"'end'",
"]",
"==",
"v",
"[",
"1",
"]",
"[",
"'end'",
"]",
")",
"for",
"u",
"in",
"communityinfo",
"[",
"skipselrule",
"]",
".",
"iterrows",
"(",
")",
":",
"a",
"=",
"1",
"if",
"u",
"[",
"1",
"]",
"[",
"'start'",
"]",
">",
"v",
"[",
"1",
"]",
"[",
"'start'",
"]",
"and",
"sorted",
"(",
"u",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
"==",
"sorted",
"(",
"v",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
":",
"badrows",
".",
"append",
"(",
"u",
"[",
"0",
"]",
")",
"communityinfo",
"=",
"communityinfo",
".",
"drop",
"(",
"badrows",
")",
"# Then see if any subset trajectory can be placed earlier in time.",
"for",
"v",
"in",
"communityinfo",
".",
"iterrows",
"(",
")",
":",
"skipselrule",
"=",
"(",
"communityinfo",
"[",
"'end'",
"]",
"<=",
"v",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
"&",
"(",
"communityinfo",
"[",
"'end'",
"]",
"+",
"kappa",
">=",
"v",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
"for",
"u",
"in",
"communityinfo",
"[",
"skipselrule",
"]",
".",
"iterrows",
"(",
")",
":",
"a",
"=",
"1",
"if",
"set",
"(",
"u",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
".",
"issuperset",
"(",
"v",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
":",
"communityinfo",
".",
"loc",
"[",
"v",
"[",
"0",
"]",
",",
"'start'",
"]",
"=",
"u",
"[",
"1",
"]",
"[",
"'start'",
"]",
"# It is possible to make the condition below effective_length",
"communityinfo",
"[",
"'length'",
"]",
"=",
"np",
".",
"array",
"(",
"communityinfo",
"[",
"'end'",
"]",
")",
"-",
"np",
".",
"array",
"(",
"communityinfo",
"[",
"'start'",
"]",
")",
"communityinfo",
"=",
"communityinfo",
"[",
"communityinfo",
"[",
"'length'",
"]",
">=",
"tau",
"]",
"communityinfo",
"=",
"communityinfo",
"[",
"communityinfo",
"[",
"'size'",
"]",
">=",
"sigma",
"]",
"# Make sure that the traj is not completely enguled by another",
"badrows",
"=",
"[",
"]",
"if",
"kappa",
">",
"0",
":",
"for",
"v",
"in",
"communityinfo",
".",
"iterrows",
"(",
")",
":",
"skipselrule",
"=",
"(",
"communityinfo",
"[",
"'end'",
"]",
"==",
"v",
"[",
"1",
"]",
"[",
"'end'",
"]",
")",
"&",
"(",
"communityinfo",
"[",
"'start'",
"]",
"<",
"v",
"[",
"1",
"]",
"[",
"'start'",
"]",
")",
"for",
"u",
"in",
"communityinfo",
"[",
"skipselrule",
"]",
".",
"iterrows",
"(",
")",
":",
"if",
"set",
"(",
"v",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
".",
"issubset",
"(",
"u",
"[",
"1",
"]",
"[",
"'community'",
"]",
")",
":",
"badrows",
".",
"append",
"(",
"v",
"[",
"0",
"]",
")",
"communityinfo",
"=",
"communityinfo",
".",
"drop",
"(",
"badrows",
")",
"return",
"communityinfo"
] | r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops. | [
"r",
"Takes",
"tctc",
"trajectory",
"matrix",
"and",
"returns",
"dataframe",
"where",
"all",
"multi",
"-",
"label",
"communities",
"are",
"listed"
] | python | train |
NatLibFi/Skosify | skosify/infer.py | https://github.com/NatLibFi/Skosify/blob/1d269987f10df08e706272dcf6a86aef4abebcde/skosify/infer.py#L72-L91 | def skos_hierarchical_mappings(rdf, narrower=True):
"""Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties
skos:broader/skos:narrower (S41).
:param bool narrower: If set to False, skos:narrowMatch will not be added,
but rather removed.
"""
for s, o in rdf.subject_objects(SKOS.broadMatch):
rdf.add((s, SKOS.broader, o))
if narrower:
rdf.add((o, SKOS.narrowMatch, s))
rdf.add((o, SKOS.narrower, s))
for s, o in rdf.subject_objects(SKOS.narrowMatch):
rdf.add((o, SKOS.broadMatch, s))
rdf.add((o, SKOS.broader, s))
if narrower:
rdf.add((s, SKOS.narrower, o))
else:
rdf.remove((s, SKOS.narrowMatch, o)) | [
"def",
"skos_hierarchical_mappings",
"(",
"rdf",
",",
"narrower",
"=",
"True",
")",
":",
"for",
"s",
",",
"o",
"in",
"rdf",
".",
"subject_objects",
"(",
"SKOS",
".",
"broadMatch",
")",
":",
"rdf",
".",
"add",
"(",
"(",
"s",
",",
"SKOS",
".",
"broader",
",",
"o",
")",
")",
"if",
"narrower",
":",
"rdf",
".",
"add",
"(",
"(",
"o",
",",
"SKOS",
".",
"narrowMatch",
",",
"s",
")",
")",
"rdf",
".",
"add",
"(",
"(",
"o",
",",
"SKOS",
".",
"narrower",
",",
"s",
")",
")",
"for",
"s",
",",
"o",
"in",
"rdf",
".",
"subject_objects",
"(",
"SKOS",
".",
"narrowMatch",
")",
":",
"rdf",
".",
"add",
"(",
"(",
"o",
",",
"SKOS",
".",
"broadMatch",
",",
"s",
")",
")",
"rdf",
".",
"add",
"(",
"(",
"o",
",",
"SKOS",
".",
"broader",
",",
"s",
")",
")",
"if",
"narrower",
":",
"rdf",
".",
"add",
"(",
"(",
"s",
",",
"SKOS",
".",
"narrower",
",",
"o",
")",
")",
"else",
":",
"rdf",
".",
"remove",
"(",
"(",
"s",
",",
"SKOS",
".",
"narrowMatch",
",",
"o",
")",
")"
] | Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties
skos:broader/skos:narrower (S41).
:param bool narrower: If set to False, skos:narrowMatch will not be added,
but rather removed. | [
"Infer",
"skos",
":",
"broadMatch",
"/",
"skos",
":",
"narrowMatch",
"(",
"S43",
")",
"and",
"add",
"the",
"super",
"-",
"properties",
"skos",
":",
"broader",
"/",
"skos",
":",
"narrower",
"(",
"S41",
")",
"."
] | python | train |
shalabhms/reliable-collections-cli | rcctl/rcctl/custom_reliablecollections.py | https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/custom_reliablecollections.py#L126-L145 | def execute_reliabledictionary(client, application_name, service_name, input_file):
"""Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries.
"""
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
# call get service with headers and params
with open(input_file) as json_file:
json_data = json.load(json_file)
service.execute(json_data)
return | [
"def",
"execute_reliabledictionary",
"(",
"client",
",",
"application_name",
",",
"service_name",
",",
"input_file",
")",
":",
"cluster",
"=",
"Cluster",
".",
"from_sfclient",
"(",
"client",
")",
"service",
"=",
"cluster",
".",
"get_application",
"(",
"application_name",
")",
".",
"get_service",
"(",
"service_name",
")",
"# call get service with headers and params",
"with",
"open",
"(",
"input_file",
")",
"as",
"json_file",
":",
"json_data",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"service",
".",
"execute",
"(",
"json_data",
")",
"return"
] | Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries. | [
"Execute",
"create",
"update",
"delete",
"operations",
"on",
"existing",
"reliable",
"dictionaries",
"."
] | python | valid |
futurecolors/django-geoip | django_geoip/base.py | https://github.com/futurecolors/django-geoip/blob/f9eee4bcad40508089b184434b79826f842d7bd0/django_geoip/base.py#L73-L86 | def _get_ip_range(self):
"""
Fetches IpRange instance if request IP is found in database.
:param request: A ususal request object
:type request: HttpRequest
:return: IpRange object or None
"""
ip = self._get_real_ip()
try:
geobase_entry = IpRange.objects.by_ip(ip)
except IpRange.DoesNotExist:
geobase_entry = None
return geobase_entry | [
"def",
"_get_ip_range",
"(",
"self",
")",
":",
"ip",
"=",
"self",
".",
"_get_real_ip",
"(",
")",
"try",
":",
"geobase_entry",
"=",
"IpRange",
".",
"objects",
".",
"by_ip",
"(",
"ip",
")",
"except",
"IpRange",
".",
"DoesNotExist",
":",
"geobase_entry",
"=",
"None",
"return",
"geobase_entry"
] | Fetches IpRange instance if request IP is found in database.
:param request: A ususal request object
:type request: HttpRequest
:return: IpRange object or None | [
"Fetches",
"IpRange",
"instance",
"if",
"request",
"IP",
"is",
"found",
"in",
"database",
"."
] | python | train |
opencobra/cobrapy | cobra/core/model.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/model.py#L1055-L1082 | def optimize(self, objective_sense=None, raise_error=False):
"""
Optimize the model using flux balance analysis.
Parameters
----------
objective_sense : {None, 'maximize' 'minimize'}, optional
Whether fluxes should be maximized or minimized. In case of None,
the previous direction is used.
raise_error : bool
If true, raise an OptimizationError if solver status is not
optimal.
Notes
-----
Only the most commonly used parameters are presented here. Additional
parameters for cobra.solvers may be available and specified with the
appropriate keyword argument.
"""
original_direction = self.objective.direction
self.objective.direction = \
{"maximize": "max", "minimize": "min"}.get(
objective_sense, original_direction)
self.slim_optimize()
solution = get_solution(self, raise_error=raise_error)
self.objective.direction = original_direction
return solution | [
"def",
"optimize",
"(",
"self",
",",
"objective_sense",
"=",
"None",
",",
"raise_error",
"=",
"False",
")",
":",
"original_direction",
"=",
"self",
".",
"objective",
".",
"direction",
"self",
".",
"objective",
".",
"direction",
"=",
"{",
"\"maximize\"",
":",
"\"max\"",
",",
"\"minimize\"",
":",
"\"min\"",
"}",
".",
"get",
"(",
"objective_sense",
",",
"original_direction",
")",
"self",
".",
"slim_optimize",
"(",
")",
"solution",
"=",
"get_solution",
"(",
"self",
",",
"raise_error",
"=",
"raise_error",
")",
"self",
".",
"objective",
".",
"direction",
"=",
"original_direction",
"return",
"solution"
] | Optimize the model using flux balance analysis.
Parameters
----------
objective_sense : {None, 'maximize' 'minimize'}, optional
Whether fluxes should be maximized or minimized. In case of None,
the previous direction is used.
raise_error : bool
If true, raise an OptimizationError if solver status is not
optimal.
Notes
-----
Only the most commonly used parameters are presented here. Additional
parameters for cobra.solvers may be available and specified with the
appropriate keyword argument. | [
"Optimize",
"the",
"model",
"using",
"flux",
"balance",
"analysis",
"."
] | python | valid |
FujiMakoto/AgentML | agentml/parser/trigger/condition/types/topic.py | https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/trigger/condition/types/topic.py#L14-L33 | def get(self, agentml, user=None, key=None):
"""
Evaluate and return the current active topic
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:param key: The user id (defaults to the current user if None)
:type key: str
:return: Active topic of the user
:rtype : str or None
"""
user = agentml.get_user(key) if key else user
if not user:
return
return user.topic | [
"def",
"get",
"(",
"self",
",",
"agentml",
",",
"user",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"user",
"=",
"agentml",
".",
"get_user",
"(",
"key",
")",
"if",
"key",
"else",
"user",
"if",
"not",
"user",
":",
"return",
"return",
"user",
".",
"topic"
] | Evaluate and return the current active topic
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:param key: The user id (defaults to the current user if None)
:type key: str
:return: Active topic of the user
:rtype : str or None | [
"Evaluate",
"and",
"return",
"the",
"current",
"active",
"topic",
":",
"param",
"user",
":",
"The",
"active",
"user",
"object",
":",
"type",
"user",
":",
"agentml",
".",
"User",
"or",
"None"
] | python | train |
numenta/nupic | src/nupic/data/dict_utils.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/dict_utils.py#L128-L173 | def dictDiff(da, db):
""" Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
<sequence of keys that are in da but not in db>
'inBButNotInA':
<sequence of keys that are in db but not in da>
'differentValues':
<sequence of keys whose corresponding values differ
between da and db>
}
"""
different = False
resultDict = dict()
resultDict['inAButNotInB'] = set(da) - set(db)
if resultDict['inAButNotInB']:
different = True
resultDict['inBButNotInA'] = set(db) - set(da)
if resultDict['inBButNotInA']:
different = True
resultDict['differentValues'] = []
for key in (set(da) - resultDict['inAButNotInB']):
comparisonResult = da[key] == db[key]
if isinstance(comparisonResult, bool):
isEqual = comparisonResult
else:
# This handles numpy arrays (but only at the top level)
isEqual = comparisonResult.all()
if not isEqual:
resultDict['differentValues'].append(key)
different = True
assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or
resultDict['differentValues']) and different) or not different)
return resultDict if different else None | [
"def",
"dictDiff",
"(",
"da",
",",
"db",
")",
":",
"different",
"=",
"False",
"resultDict",
"=",
"dict",
"(",
")",
"resultDict",
"[",
"'inAButNotInB'",
"]",
"=",
"set",
"(",
"da",
")",
"-",
"set",
"(",
"db",
")",
"if",
"resultDict",
"[",
"'inAButNotInB'",
"]",
":",
"different",
"=",
"True",
"resultDict",
"[",
"'inBButNotInA'",
"]",
"=",
"set",
"(",
"db",
")",
"-",
"set",
"(",
"da",
")",
"if",
"resultDict",
"[",
"'inBButNotInA'",
"]",
":",
"different",
"=",
"True",
"resultDict",
"[",
"'differentValues'",
"]",
"=",
"[",
"]",
"for",
"key",
"in",
"(",
"set",
"(",
"da",
")",
"-",
"resultDict",
"[",
"'inAButNotInB'",
"]",
")",
":",
"comparisonResult",
"=",
"da",
"[",
"key",
"]",
"==",
"db",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"comparisonResult",
",",
"bool",
")",
":",
"isEqual",
"=",
"comparisonResult",
"else",
":",
"# This handles numpy arrays (but only at the top level)",
"isEqual",
"=",
"comparisonResult",
".",
"all",
"(",
")",
"if",
"not",
"isEqual",
":",
"resultDict",
"[",
"'differentValues'",
"]",
".",
"append",
"(",
"key",
")",
"different",
"=",
"True",
"assert",
"(",
"(",
"(",
"resultDict",
"[",
"'inAButNotInB'",
"]",
"or",
"resultDict",
"[",
"'inBButNotInA'",
"]",
"or",
"resultDict",
"[",
"'differentValues'",
"]",
")",
"and",
"different",
")",
"or",
"not",
"different",
")",
"return",
"resultDict",
"if",
"different",
"else",
"None"
] | Compares two python dictionaries at the top level and return differences
da: first dictionary
db: second dictionary
Returns: None if dictionaries test equal; otherwise returns a
dictionary as follows:
{
'inAButNotInB':
<sequence of keys that are in da but not in db>
'inBButNotInA':
<sequence of keys that are in db but not in da>
'differentValues':
<sequence of keys whose corresponding values differ
between da and db>
} | [
"Compares",
"two",
"python",
"dictionaries",
"at",
"the",
"top",
"level",
"and",
"return",
"differences"
] | python | valid |
childsish/lhc-python | lhc/misc/performance_measures.py | https://github.com/childsish/lhc-python/blob/0a669f46a40a39f24d28665e8b5b606dc7e86beb/lhc/misc/performance_measures.py#L64-L79 | def mcc(tp, tn, fp, fn):
""" Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
"""
if tp + fp == 0 or tp + fn == 0 or tn + fp == 0 or tn + fn == 0:
den = 1.0
else:
den = math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return (tp * tn - fp * fn) / den | [
"def",
"mcc",
"(",
"tp",
",",
"tn",
",",
"fp",
",",
"fn",
")",
":",
"if",
"tp",
"+",
"fp",
"==",
"0",
"or",
"tp",
"+",
"fn",
"==",
"0",
"or",
"tn",
"+",
"fp",
"==",
"0",
"or",
"tn",
"+",
"fn",
"==",
"0",
":",
"den",
"=",
"1.0",
"else",
":",
"den",
"=",
"math",
".",
"sqrt",
"(",
"(",
"tp",
"+",
"fp",
")",
"*",
"(",
"tp",
"+",
"fn",
")",
"*",
"(",
"tn",
"+",
"fp",
")",
"*",
"(",
"tn",
"+",
"fn",
")",
")",
"return",
"(",
"tp",
"*",
"tn",
"-",
"fp",
"*",
"fn",
")",
"/",
"den"
] | Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float | [
"Matthew",
"s",
"Correlation",
"Coefficient",
"[",
"-",
"1",
"1",
"]",
"0",
"=",
"you",
"re",
"just",
"guessing",
":",
"param",
"int",
"tp",
":",
"number",
"of",
"true",
"positives",
":",
"param",
"int",
"tn",
":",
"number",
"of",
"true",
"negatives",
":",
"param",
"int",
"fp",
":",
"number",
"of",
"false",
"positives",
":",
"param",
"int",
"fn",
":",
"number",
"of",
"false",
"negatives",
":",
"rtype",
":",
"float"
] | python | train |
pyQode/pyqode.core | pyqode/core/widgets/menu_recents.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/menu_recents.py#L34-L42 | def remove(self, filename):
"""
Remove a file path from the list of recent files.
:param filename: Path of the file to remove
"""
files = self.get_value('list', [])
files.remove(filename)
self.set_value('list', files)
self.updated.emit() | [
"def",
"remove",
"(",
"self",
",",
"filename",
")",
":",
"files",
"=",
"self",
".",
"get_value",
"(",
"'list'",
",",
"[",
"]",
")",
"files",
".",
"remove",
"(",
"filename",
")",
"self",
".",
"set_value",
"(",
"'list'",
",",
"files",
")",
"self",
".",
"updated",
".",
"emit",
"(",
")"
] | Remove a file path from the list of recent files.
:param filename: Path of the file to remove | [
"Remove",
"a",
"file",
"path",
"from",
"the",
"list",
"of",
"recent",
"files",
".",
":",
"param",
"filename",
":",
"Path",
"of",
"the",
"file",
"to",
"remove"
] | python | train |
quodlibet/mutagen | mutagen/id3/_file.py | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_file.py#L223-L274 | def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional.
"""
f = filething.fileobj
try:
header = ID3Header(filething.fileobj)
except ID3NoHeaderError:
old_size = 0
else:
old_size = header.size
data = self._prepare_data(
f, 0, old_size, v2_version, v23_sep, padding)
new_size = len(data)
if (old_size < new_size):
insert_bytes(f, new_size - old_size, old_size)
elif (old_size > new_size):
delete_bytes(f, old_size - new_size, new_size)
f.seek(0)
f.write(data)
self.__save_v1(f, v1) | [
"def",
"save",
"(",
"self",
",",
"filething",
"=",
"None",
",",
"v1",
"=",
"1",
",",
"v2_version",
"=",
"4",
",",
"v23_sep",
"=",
"'/'",
",",
"padding",
"=",
"None",
")",
":",
"f",
"=",
"filething",
".",
"fileobj",
"try",
":",
"header",
"=",
"ID3Header",
"(",
"filething",
".",
"fileobj",
")",
"except",
"ID3NoHeaderError",
":",
"old_size",
"=",
"0",
"else",
":",
"old_size",
"=",
"header",
".",
"size",
"data",
"=",
"self",
".",
"_prepare_data",
"(",
"f",
",",
"0",
",",
"old_size",
",",
"v2_version",
",",
"v23_sep",
",",
"padding",
")",
"new_size",
"=",
"len",
"(",
"data",
")",
"if",
"(",
"old_size",
"<",
"new_size",
")",
":",
"insert_bytes",
"(",
"f",
",",
"new_size",
"-",
"old_size",
",",
"old_size",
")",
"elif",
"(",
"old_size",
">",
"new_size",
")",
":",
"delete_bytes",
"(",
"f",
",",
"old_size",
"-",
"new_size",
",",
"new_size",
")",
"f",
".",
"seek",
"(",
"0",
")",
"f",
".",
"write",
"(",
"data",
")",
"self",
".",
"__save_v1",
"(",
"f",
",",
"v1",
")"
] | save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
Args:
filething (filething):
Filename to save the tag to. If no filename is given,
the one most recently loaded is used.
v1 (ID3v1SaveOptions):
if 0, ID3v1 tags will be removed.
if 1, ID3v1 tags will be updated but not added.
if 2, ID3v1 tags will be created and/or updated
v2 (int):
version of ID3v2 tags (3 or 4).
v23_sep (text):
the separator used to join multiple text values
if v2_version == 3. Defaults to '/' but if it's None
will be the ID3v2v2.4 null separator.
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3
tags, you must call method update_to_v23 before saving the file.
The lack of a way to update only an ID3v1 tag is intentional. | [
"save",
"(",
"filething",
"=",
"None",
"v1",
"=",
"1",
"v2_version",
"=",
"4",
"v23_sep",
"=",
"/",
"padding",
"=",
"None",
")"
] | python | train |
numenta/nupic | src/nupic/algorithms/knn_classifier.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/knn_classifier.py#L309-L321 | def removeIds(self, idsToRemove):
"""
There are two caveats. First, this is a potentially slow operation. Second,
pattern indices will shift if patterns before them are removed.
:param idsToRemove: A list of row indices to remove.
"""
# Form a list of all categories to remove
rowsToRemove = [k for k, rowID in enumerate(self._categoryRecencyList) \
if rowID in idsToRemove]
# Remove rows from the classifier
self._removeRows(rowsToRemove) | [
"def",
"removeIds",
"(",
"self",
",",
"idsToRemove",
")",
":",
"# Form a list of all categories to remove",
"rowsToRemove",
"=",
"[",
"k",
"for",
"k",
",",
"rowID",
"in",
"enumerate",
"(",
"self",
".",
"_categoryRecencyList",
")",
"if",
"rowID",
"in",
"idsToRemove",
"]",
"# Remove rows from the classifier",
"self",
".",
"_removeRows",
"(",
"rowsToRemove",
")"
] | There are two caveats. First, this is a potentially slow operation. Second,
pattern indices will shift if patterns before them are removed.
:param idsToRemove: A list of row indices to remove. | [
"There",
"are",
"two",
"caveats",
".",
"First",
"this",
"is",
"a",
"potentially",
"slow",
"operation",
".",
"Second",
"pattern",
"indices",
"will",
"shift",
"if",
"patterns",
"before",
"them",
"are",
"removed",
"."
] | python | valid |
flying-sheep/smart-progress | smart_progress.py | https://github.com/flying-sheep/smart-progress/blob/1091a0a9cc2d7a6304f992d13cb718d5150a64c6/smart_progress.py#L58-L74 | def progressbar(
iterable=None, length=None, label=None,
show_eta=True, show_percent=None, show_pos=False, item_show_func=None,
fill_char='#', empty_char='-', bar_template='%(label)s [%(bar)s] %(info)s', info_sep=' ',
width=36, file=None, color=None):
"""Create a progressbar that works in Jupyter/IPython notebooks and the terminal"""
try:
return IPyBackend(iterable, length, label=label,
show_eta=show_eta, show_percent=show_percent, show_pos=show_pos,
item_show_func=item_show_func, info_sep=info_sep)
except (ImportError, RuntimeError): #fall back if ipython is not installed or no notebook is running
return click.progressbar(
iterable, length, label,
show_eta, show_percent, show_pos, item_show_func,
fill_char, empty_char, bar_template, info_sep,
width, file, color) | [
"def",
"progressbar",
"(",
"iterable",
"=",
"None",
",",
"length",
"=",
"None",
",",
"label",
"=",
"None",
",",
"show_eta",
"=",
"True",
",",
"show_percent",
"=",
"None",
",",
"show_pos",
"=",
"False",
",",
"item_show_func",
"=",
"None",
",",
"fill_char",
"=",
"'#'",
",",
"empty_char",
"=",
"'-'",
",",
"bar_template",
"=",
"'%(label)s [%(bar)s] %(info)s'",
",",
"info_sep",
"=",
"' '",
",",
"width",
"=",
"36",
",",
"file",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"try",
":",
"return",
"IPyBackend",
"(",
"iterable",
",",
"length",
",",
"label",
"=",
"label",
",",
"show_eta",
"=",
"show_eta",
",",
"show_percent",
"=",
"show_percent",
",",
"show_pos",
"=",
"show_pos",
",",
"item_show_func",
"=",
"item_show_func",
",",
"info_sep",
"=",
"info_sep",
")",
"except",
"(",
"ImportError",
",",
"RuntimeError",
")",
":",
"#fall back if ipython is not installed or no notebook is running",
"return",
"click",
".",
"progressbar",
"(",
"iterable",
",",
"length",
",",
"label",
",",
"show_eta",
",",
"show_percent",
",",
"show_pos",
",",
"item_show_func",
",",
"fill_char",
",",
"empty_char",
",",
"bar_template",
",",
"info_sep",
",",
"width",
",",
"file",
",",
"color",
")"
] | Create a progressbar that works in Jupyter/IPython notebooks and the terminal | [
"Create",
"a",
"progressbar",
"that",
"works",
"in",
"Jupyter",
"/",
"IPython",
"notebooks",
"and",
"the",
"terminal"
] | python | train |
basho/riak-python-client | riak/client/transport.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/client/transport.py#L76-L95 | def retry_count(self, retries):
"""
retry_count(retries)
Modifies the number of retries for the scope of the ``with``
statement (in the current thread).
Example::
with client.retry_count(10):
client.ping()
"""
if not isinstance(retries, int):
raise TypeError("retries must be an integer")
old_retries, self.retries = self.retries, retries
try:
yield
finally:
self.retries = old_retries | [
"def",
"retry_count",
"(",
"self",
",",
"retries",
")",
":",
"if",
"not",
"isinstance",
"(",
"retries",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"retries must be an integer\"",
")",
"old_retries",
",",
"self",
".",
"retries",
"=",
"self",
".",
"retries",
",",
"retries",
"try",
":",
"yield",
"finally",
":",
"self",
".",
"retries",
"=",
"old_retries"
] | retry_count(retries)
Modifies the number of retries for the scope of the ``with``
statement (in the current thread).
Example::
with client.retry_count(10):
client.ping() | [
"retry_count",
"(",
"retries",
")"
] | python | train |
pandas-dev/pandas | pandas/io/pytables.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L975-L1055 | def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs) | [
"def",
"append_to_multiple",
"(",
"self",
",",
"d",
",",
"value",
",",
"selector",
",",
"data_columns",
"=",
"None",
",",
"axes",
"=",
"None",
",",
"dropna",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axes",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"axes is currently not accepted as a parameter to\"",
"\" append_to_multiple; you can create the \"",
"\"tables independently instead\"",
")",
"if",
"not",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"append_to_multiple must have a dictionary specified as the \"",
"\"way to split the value\"",
")",
"if",
"selector",
"not",
"in",
"d",
":",
"raise",
"ValueError",
"(",
"\"append_to_multiple requires a selector that is in passed dict\"",
")",
"# figure out the splitting axis (the non_index_axis)",
"axis",
"=",
"list",
"(",
"set",
"(",
"range",
"(",
"value",
".",
"ndim",
")",
")",
"-",
"set",
"(",
"_AXES_MAP",
"[",
"type",
"(",
"value",
")",
"]",
")",
")",
"[",
"0",
"]",
"# figure out how to split the value",
"remain_key",
"=",
"None",
"remain_values",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"None",
":",
"if",
"remain_key",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"append_to_multiple can only have one value in d that \"",
"\"is None\"",
")",
"remain_key",
"=",
"k",
"else",
":",
"remain_values",
".",
"extend",
"(",
"v",
")",
"if",
"remain_key",
"is",
"not",
"None",
":",
"ordered",
"=",
"value",
".",
"axes",
"[",
"axis",
"]",
"ordd",
"=",
"ordered",
".",
"difference",
"(",
"Index",
"(",
"remain_values",
")",
")",
"ordd",
"=",
"sorted",
"(",
"ordered",
".",
"get_indexer",
"(",
"ordd",
")",
")",
"d",
"[",
"remain_key",
"]",
"=",
"ordered",
".",
"take",
"(",
"ordd",
")",
"# data_columns",
"if",
"data_columns",
"is",
"None",
":",
"data_columns",
"=",
"d",
"[",
"selector",
"]",
"# ensure rows are synchronized across the tables",
"if",
"dropna",
":",
"idxs",
"=",
"(",
"value",
"[",
"cols",
"]",
".",
"dropna",
"(",
"how",
"=",
"'all'",
")",
".",
"index",
"for",
"cols",
"in",
"d",
".",
"values",
"(",
")",
")",
"valid_index",
"=",
"next",
"(",
"idxs",
")",
"for",
"index",
"in",
"idxs",
":",
"valid_index",
"=",
"valid_index",
".",
"intersection",
"(",
"index",
")",
"value",
"=",
"value",
".",
"loc",
"[",
"valid_index",
"]",
"# append",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"dc",
"=",
"data_columns",
"if",
"k",
"==",
"selector",
"else",
"None",
"# compute the val",
"val",
"=",
"value",
".",
"reindex",
"(",
"v",
",",
"axis",
"=",
"axis",
")",
"self",
".",
"append",
"(",
"k",
",",
"val",
",",
"data_columns",
"=",
"dc",
",",
"*",
"*",
"kwargs",
")"
] | Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted | [
"Append",
"to",
"multiple",
"tables"
] | python | train |
ikegami-yukino/madoka-python | madoka/madoka.py | https://github.com/ikegami-yukino/madoka-python/blob/a9a1efecbc85ac4a24a78cbb19f9aed77b7162d3/madoka/madoka.py#L588-L597 | def create(self, width=0, max_value=0, path=None, flags=0, seed=0):
"""Create new sketch
Params:
<int> width
<int> max_value
<str> path
<int> flags
<int> seed
"""
return _madoka.Sketch_create(self, width, max_value, path, flags, seed) | [
"def",
"create",
"(",
"self",
",",
"width",
"=",
"0",
",",
"max_value",
"=",
"0",
",",
"path",
"=",
"None",
",",
"flags",
"=",
"0",
",",
"seed",
"=",
"0",
")",
":",
"return",
"_madoka",
".",
"Sketch_create",
"(",
"self",
",",
"width",
",",
"max_value",
",",
"path",
",",
"flags",
",",
"seed",
")"
] | Create new sketch
Params:
<int> width
<int> max_value
<str> path
<int> flags
<int> seed | [
"Create",
"new",
"sketch",
"Params",
":",
"<int",
">",
"width",
"<int",
">",
"max_value",
"<str",
">",
"path",
"<int",
">",
"flags",
"<int",
">",
"seed"
] | python | train |