text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Spy usage of given `fn`.
<END_TASK>
<USER_TASK:>
Description:
def spy2(fn): # type: (...) -> None
"""Spy usage of given `fn`.
Patches the module, class or object `fn` lives in, so that all
interactions can be recorded; otherwise executes `fn` as before, so
that all side effects happen as before.
E.g.::
import time
spy(time.time)
do_work(...) # nothing injected, uses global patched `time` module
verify(time).time()
Note that builtins often cannot be patched because they're read-only.
""" |
if isinstance(fn, str):
answer = get_obj(fn)
else:
answer = fn
when2(fn, Ellipsis).thenAnswer(answer) |
<SYSTEM_TASK:>
Return a list of templates usable by a model.
<END_TASK>
<USER_TASK:>
Description:
def get_templates(model):
""" Return a list of templates usable by a model. """ |
for template_name, template in templates.items():
if issubclass(template.model, model):
yield (template_name, template.layout._meta.verbose_name) |
<SYSTEM_TASK:>
Get required API keys from environment variables.
<END_TASK>
<USER_TASK:>
Description:
def get_api_envs():
"""Get required API keys from environment variables.""" |
client_id = os.environ.get('CLIENT_ID')
user_id = os.environ.get('USER_ID')
if not client_id or not user_id:
raise ValueError('API keys are not found in the environment')
return client_id, user_id |
<SYSTEM_TASK:>
Function checkAndCreate
<END_TASK>
<USER_TASK:>
Description:
def checkAndCreate(self, key, payload, domainId):
""" Function checkAndCreate
Check if a subnet exists and create it if not
@param key: The targeted subnet
@param payload: The targeted subnet description
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: The id of the subnet
""" |
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
#~ Ensure subnet contains the domain
subnetDomainIds = []
for domain in self[key]['domains']:
subnetDomainIds.append(domain['id'])
if domainId not in subnetDomainIds:
subnetDomainIds.append(domainId)
self[key]["domain_ids"] = subnetDomainIds
if len(self[key]["domains"]) is not len(subnetDomainIds):
return False
return oid |
<SYSTEM_TASK:>
Function removeDomain
<END_TASK>
<USER_TASK:>
Description:
def removeDomain(self, subnetId, domainId):
""" Function removeDomain
Delete a domain from a subnet
@param subnetId: The subnet Id
@param domainId: The domainId to be attached wiuth the subnet
@return RETURN: boolean
""" |
subnetDomainIds = []
for domain in self[subnetId]['domains']:
subnetDomainIds.append(domain['id'])
subnetDomainIds.remove(domainId)
self[subnetId]["domain_ids"] = subnetDomainIds
return len(self[subnetId]["domains"]) is len(subnetDomainIds) |
<SYSTEM_TASK:>
Mark a callable as exclusive
<END_TASK>
<USER_TASK:>
Description:
def exclusive(via=threading.Lock):
"""
Mark a callable as exclusive
:param via: factory for a Lock to guard the callable
Guards the callable against being entered again before completion.
Explicitly raises a :py:exc:`RuntimeError` on violation.
:note: If applied to a method, it is exclusive across all instances.
""" |
def make_exclusive(fnc):
fnc_guard = via()
@functools.wraps(fnc)
def exclusive_call(*args, **kwargs):
if fnc_guard.acquire(blocking=False):
try:
return fnc(*args, **kwargs)
finally:
fnc_guard.release()
else:
raise RuntimeError('exclusive call to %s violated')
return exclusive_call
return make_exclusive |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def service(flavour):
r"""
Mark a class as implementing a Service
Each Service class must have a ``run`` method, which does not take any arguments.
This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless
* the Service has been garbage collected, or
* the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed.
For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
""" |
def service_unit_decorator(raw_cls):
__new__ = raw_cls.__new__
def __new_service__(cls, *args, **kwargs):
if __new__ is object.__new__:
self = __new__(cls)
else:
self = __new__(cls, *args, **kwargs)
service_unit = ServiceUnit(self, flavour)
self.__service_unit__ = service_unit
return self
raw_cls.__new__ = __new_service__
if raw_cls.run.__doc__ is None:
raw_cls.run.__doc__ = "Service entry point"
return raw_cls
return service_unit_decorator |
<SYSTEM_TASK:>
Synchronously run ``payload`` and provide its output
<END_TASK>
<USER_TASK:>
Description:
def execute(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Synchronously run ``payload`` and provide its output
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
""" |
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
return self._meta_runner.run_payload(payload, flavour=flavour) |
<SYSTEM_TASK:>
Concurrently run ``payload`` in the background
<END_TASK>
<USER_TASK:>
Description:
def adopt(self, payload, *args, flavour: ModuleType, **kwargs):
"""
Concurrently run ``payload`` in the background
If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
""" |
if args or kwargs:
payload = functools.partial(payload, *args, **kwargs)
self._meta_runner.register_payload(payload, flavour=flavour) |
<SYSTEM_TASK:>
Start accepting synchronous, asynchronous and service payloads
<END_TASK>
<USER_TASK:>
Description:
def accept(self):
"""
Start accepting synchronous, asynchronous and service payloads
Since services are globally defined, only one :py:class:`ServiceRunner`
may :py:meth:`accept` payloads at any time.
""" |
if self._meta_runner:
raise RuntimeError('payloads scheduled for %s before being started' % self)
self._must_shutdown = False
self._logger.info('%s starting', self.__class__.__name__)
# force collecting objects so that defunct, migrated and overwritten services are destroyed now
gc.collect()
self._adopt_services()
self.adopt(self._accept_services, flavour=trio)
self._meta_runner.run() |
<SYSTEM_TASK:>
Shutdown the accept loop and stop running payloads
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
"""Shutdown the accept loop and stop running payloads""" |
self._must_shutdown = True
self._is_shutdown.wait()
self._meta_runner.stop() |
<SYSTEM_TASK:>
Transform and push a line to the interpreter.
<END_TASK>
<USER_TASK:>
Description:
def push(self, line):
"""Transform and push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
""" |
if transforms.FROM_EXPERIMENTAL.match(line):
transforms.add_transformers(line)
self.buffer.append("\n")
else:
self.buffer.append(line)
add_pass = False
if line.rstrip(' ').endswith(":"):
add_pass = True
source = "\n".join(self.buffer)
if add_pass:
source += "pass"
source = transforms.transform(source)
if add_pass:
source = source.rstrip(' ')
if source.endswith("pass"):
source = source[:-4]
# some transformations may strip an empty line meant to end a block
if not self.buffer[-1]:
source += "\n"
try:
more = self.runsource(source, self.filename)
except SystemExit:
os._exit(1)
if not more:
self.resetbuffer()
return more |
<SYSTEM_TASK:>
Write dict object into file
<END_TASK>
<USER_TASK:>
Description:
def dump(obj, f, preserve=False):
"""Write dict object into file
:param obj: the object to be dumped into toml
:param f: the file object
:param preserve: optional flag to preserve the inline table in result
""" |
if not f.write:
raise TypeError('You can only dump an object into a file object')
encoder = Encoder(f, preserve=preserve)
return encoder.write_dict(obj) |
<SYSTEM_TASK:>
Stringifies a dict as toml
<END_TASK>
<USER_TASK:>
Description:
def dumps(obj, preserve=False):
"""Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result
""" |
f = StringIO()
dump(obj, f, preserve)
return f.getvalue() |
<SYSTEM_TASK:>
Loads licenses from the given directory.
<END_TASK>
<USER_TASK:>
Description:
def license_loader(lic_dir=LIC_DIR):
"""Loads licenses from the given directory.""" |
lics = []
for ln in os.listdir(lic_dir):
lp = os.path.join(lic_dir, ln)
with open(lp) as lf:
txt = lf.read()
lic = License(txt)
lics.append(lic)
return lics |
<SYSTEM_TASK:>
Upload application from file.
<END_TASK>
<USER_TASK:>
Description:
def import_app(files, category, overwrite, id, name):
""" Upload application from file.
By default, file name will be used as application name, with "-vXX.YYY" suffix stripped.
Application is looked up by one of these classifiers, in order of priority:
app-id, app-name, filename.
If app-id is provided, looks up existing application and updates its manifest.
If app-id is NOT specified, looks up by name, or creates new application.
""" |
platform = _get_platform()
org = platform.get_organization(QUBELL["organization"])
if category:
category = org.categories[category]
regex = re.compile(r"^(.*?)(-v(\d+)|)\.[^.]+$")
if (id or name) and len(files) > 1:
raise Exception("--id and --name are supported only for single-file mode")
for filename in files:
click.echo("Importing " + filename, nl=False)
if not name:
match = regex.match(basename(filename))
if not match:
click.echo(_color("RED", "FAIL") + " unknown filename format")
break
name = regex.match(basename(filename)).group(1)
click.echo(" => ", nl=False)
app = None
try:
app = org.get_application(id=id, name=name)
if app and not overwrite:
click.echo("%s %s already exists %s" % (
app.id, _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
except NotFoundError:
if id:
click.echo("%s %s not found %s" % (
id or "", _color("BLUE", app and app.name or name), _color("RED", "FAIL")))
break
click.echo(_color("BLUE", app and app.name or name) + " ", nl=False)
try:
with file(filename, "r") as f:
if app:
app.update(name=app.name,
category=category and category.id or app.category,
manifest=Manifest(content=f.read()))
else:
app = org.application(id=id, name=name, manifest=Manifest(content=f.read()))
if category:
app.update(category=category.id)
click.echo(app.id + _color("GREEN", " OK"))
except IOError as e:
click.echo(_color("RED", " FAIL") + " " + e.message)
break |
<SYSTEM_TASK:>
Read the minified CSS file including STATIC_URL in the references
<END_TASK>
<USER_TASK:>
Description:
def _chosen_css(self):
"""Read the minified CSS file including STATIC_URL in the references
to the sprite images.""" |
css = render_to_string(self.css_template, {})
for sprite in self.chosen_sprites: # rewrite path to sprites in the css
css = css.replace(sprite, settings.STATIC_URL + "img/" + sprite)
return css |
<SYSTEM_TASK:>
Embed Chosen.js directly in html of the response.
<END_TASK>
<USER_TASK:>
Description:
def _embed(self, request, response):
"""Embed Chosen.js directly in html of the response.""" |
if self._match(request, response):
# Render the <link> and the <script> tags to include Chosen.
head = render_to_string(
"chosenadmin/_head_css.html",
{"chosen_css": self._chosen_css()}
)
body = render_to_string(
"chosenadmin/_script.html",
{"chosen_js": self._chosen_js()}
)
# Re-write the Response's content to include our new html
content = response.rendered_content
content = content.replace('</head>', head)
content = content.replace('</body>', body)
response.content = content
return response |
<SYSTEM_TASK:>
Writes an 8-bit byte to the specified command register
<END_TASK>
<USER_TASK:>
Description:
def write_byte(self, cmd, value):
"""
Writes an 8-bit byte to the specified command register
""" |
self.bus.write_byte_data(self.address, cmd, value)
self.log.debug(
"write_byte: Wrote 0x%02X to command register 0x%02X" % (
value, cmd
)
) |
<SYSTEM_TASK:>
Writes a 16-bit word to the specified command register
<END_TASK>
<USER_TASK:>
Description:
def write_word(self, cmd, value):
"""
Writes a 16-bit word to the specified command register
""" |
self.bus.write_word_data(self.address, cmd, value)
self.log.debug(
"write_word: Wrote 0x%04X to command register 0x%02X" % (
value, cmd
)
) |
<SYSTEM_TASK:>
Writes an 8-bit byte directly to the bus
<END_TASK>
<USER_TASK:>
Description:
def write_raw_byte(self, value):
"""
Writes an 8-bit byte directly to the bus
""" |
self.bus.write_byte(self.address, value)
self.log.debug("write_raw_byte: Wrote 0x%02X" % value) |
<SYSTEM_TASK:>
Writes a block of bytes to the bus using I2C format to the specified
<END_TASK>
<USER_TASK:>
Description:
def write_block_data(self, cmd, block):
"""
Writes a block of bytes to the bus using I2C format to the specified
command register
""" |
self.bus.write_i2c_block_data(self.address, cmd, block)
self.log.debug(
"write_block_data: Wrote [%s] to command register 0x%02X" % (
', '.join(['0x%02X' % x for x in block]),
cmd
)
) |
<SYSTEM_TASK:>
Read an 8-bit byte directly from the bus
<END_TASK>
<USER_TASK:>
Description:
def read_raw_byte(self):
"""
Read an 8-bit byte directly from the bus
""" |
result = self.bus.read_byte(self.address)
self.log.debug("read_raw_byte: Read 0x%02X from the bus" % result)
return result |
<SYSTEM_TASK:>
Read a block of bytes from the bus from the specified command register
<END_TASK>
<USER_TASK:>
Description:
def read_block_data(self, cmd, length):
"""
Read a block of bytes from the bus from the specified command register
Amount of bytes read in is defined by length
""" |
results = self.bus.read_i2c_block_data(self.address, cmd, length)
self.log.debug(
"read_block_data: Read [%s] from command register 0x%02X" % (
', '.join(['0x%02X' % x for x in results]),
cmd
)
)
return results |
<SYSTEM_TASK:>
Read an unsigned byte from the specified command register
<END_TASK>
<USER_TASK:>
Description:
def read_unsigned_byte(self, cmd):
"""
Read an unsigned byte from the specified command register
""" |
result = self.bus.read_byte_data(self.address, cmd)
self.log.debug(
"read_unsigned_byte: Read 0x%02X from command register 0x%02X" % (
result, cmd
)
)
return result |
<SYSTEM_TASK:>
Read an unsigned word from the specified command register
<END_TASK>
<USER_TASK:>
Description:
def read_unsigned_word(self, cmd, little_endian=True):
"""
Read an unsigned word from the specified command register
We assume the data is in little endian mode, if it is in big endian
mode then set little_endian to False
""" |
result = self.bus.read_word_data(self.address, cmd)
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
self.log.debug(
"read_unsigned_word: Read 0x%04X from command register 0x%02X" % (
result, cmd
)
)
return result |
<SYSTEM_TASK:>
Attempt to connect to an I2C bus
<END_TASK>
<USER_TASK:>
Description:
def __connect_to_bus(self, bus):
"""
Attempt to connect to an I2C bus
""" |
def connect(bus_num):
try:
self.log.debug("Attempting to connect to bus %s..." % bus_num)
self.bus = smbus.SMBus(bus_num)
self.log.debug("Success")
except IOError:
self.log.debug("Failed")
raise
# If the bus is not explicitly stated, try 0 and then try 1 if that
# fails
if bus is None:
try:
connect(0)
return
except IOError:
pass
try:
connect(1)
return
except IOError:
raise
else:
try:
connect(bus)
return
except IOError:
raise |
<SYSTEM_TASK:>
Default user to the current version owner.
<END_TASK>
<USER_TASK:>
Description:
def get_formset(self, request, obj=None, **kwargs):
""" Default user to the current version owner. """ |
data = super().get_formset(request, obj, **kwargs)
if obj:
data.form.base_fields['user'].initial = request.user.id
return data |
<SYSTEM_TASK:>
Function reload
<END_TASK>
<USER_TASK:>
Description:
def reload(self):
""" Function reload
Reload the full object to ensure sync
""" |
realData = self.load()
self.clear()
self.update(realData) |
<SYSTEM_TASK:>
Process actions in the publishing schedule.
<END_TASK>
<USER_TASK:>
Description:
def process_actions(action_ids=None):
"""
Process actions in the publishing schedule.
Returns the number of actions processed.
""" |
actions_taken = 0
action_list = PublishAction.objects.prefetch_related(
'content_object',
).filter(
scheduled_time__lte=timezone.now(),
)
if action_ids is not None:
action_list = action_list.filter(id__in=action_ids)
for action in action_list:
action.process_action()
action.delete()
actions_taken += 1
return actions_taken |
<SYSTEM_TASK:>
Return a boolean if Celery tasks are enabled for this app.
<END_TASK>
<USER_TASK:>
Description:
def celery_enabled():
"""
Return a boolean if Celery tasks are enabled for this app.
If the ``GLITTER_PUBLISHER_CELERY`` setting is ``True`` or ``False`` - then that value will be
used. However if the setting isn't defined, then this will be enabled automatically if Celery
is installed.
""" |
enabled = getattr(settings, 'GLITTER_PUBLISHER_CELERY', None)
if enabled is None:
try:
import celery # noqa
enabled = True
except ImportError:
enabled = False
return enabled |
<SYSTEM_TASK:>
Find regexp in activitylog
<END_TASK>
<USER_TASK:>
Description:
def find(self, item, description='', event_type=''):
"""
Find regexp in activitylog
find record as if type are in description.
""" |
# TODO: should be refactored, dumb logic
if ': ' in item:
splited = item.split(': ', 1)
if splited[0] in self.TYPES:
description = item.split(': ')[1]
event_type = item.split(': ')[0]
else:
description = item
else:
if not description:
description = item
if event_type:
found = [x['time'] for x in self.log if re.search(description, x['description'])
and x['eventTypeText'] == event_type]
else:
found = [x['time'] for x in self.log if re.search(description, x['description'])]
if len(found):
return found
raise exceptions.NotFoundError("Item '{}' is not found with (description='{}', event_type='{}')".
format(item, description, event_type)) |
<SYSTEM_TASK:>
Currently a small stub to create an instance of Checker for the passed
<END_TASK>
<USER_TASK:>
Description:
def do_command_line(infile: typing.IO[str]) -> int:
"""
Currently a small stub to create an instance of Checker for the passed
``infile`` and run its test functions through linting.
Args:
infile
Returns:
int: Number of flake8 errors raised.
""" |
lines = infile.readlines()
tree = ast.parse(''.join(lines))
checker = Checker(tree, lines, infile.name)
checker.load()
errors = [] # type: typing.List[AAAError]
for func in checker.all_funcs(skip_noqa=True):
try:
errors = list(func.check_all())
except ValidationError as error:
errors = [error.to_aaa()]
print(func.__str__(errors), end='')
return len(errors) |
<SYSTEM_TASK:>
Iterate through multiple lists or arrays of equal size
<END_TASK>
<USER_TASK:>
Description:
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """ |
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators)) |
<SYSTEM_TASK:>
Check and convert any input scalar or array to numpy array
<END_TASK>
<USER_TASK:>
Description:
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """ |
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout) |
<SYSTEM_TASK:>
Find cosmological parameters for named cosmo in cosmology.py list
<END_TASK>
<USER_TASK:>
Description:
def getcosmo(cosmology):
""" Find cosmological parameters for named cosmo in cosmology.py list """ |
defaultcosmologies = {'dragons': cg.DRAGONS(), 'wmap1': cg.WMAP1_Mill(),
'wmap3': cg.WMAP3_ML(), 'wmap5': cg.WMAP5_mean(),
'wmap7': cg.WMAP7_ML(), 'wmap9': cg.WMAP9_ML(),
'wmap1_lss': cg.WMAP1_2dF_mean(),
'wmap3_mean': cg.WMAP3_mean(),
'wmap5_ml': cg.WMAP5_ML(),
'wmap5_lss': cg.WMAP5_BAO_SN_mean(),
'wmap7_lss': cg.WMAP7_BAO_H0_mean(),
'planck13': cg.Planck_2013(),
'planck15': cg.Planck_2015()}
if isinstance(cosmology, dict):
# User providing their own variables
cosmo = cosmology
if 'A_scaling' not in cosmology.keys():
A_scaling = getAscaling(cosmology, newcosmo=True)
cosmo.update({'A_scaling': A_scaling})
# Add extra variables by hand that cosmolopy requires
# note that they aren't used (set to zero)
for paramnames in cg.WMAP5_mean().keys():
if paramnames not in cosmology.keys():
cosmo.update({paramnames: 0})
elif cosmology.lower() in defaultcosmologies.keys():
# Load by name of cosmology instead
cosmo = defaultcosmologies[cosmology.lower()]
A_scaling = getAscaling(cosmology)
cosmo.update({'A_scaling': A_scaling})
else:
print("You haven't passed a dict of cosmological parameters ")
print("OR a recognised cosmology, you gave %s" % (cosmology))
# No idea why this has to be done by hand but should be O_k = 0
cosmo = cp.distance.set_omega_k_0(cosmo)
# Use the cosmology as **cosmo passed to cosmolopy routines
return(cosmo) |
<SYSTEM_TASK:>
Output the cosmology to a string for writing to file
<END_TASK>
<USER_TASK:>
Description:
def _getcosmoheader(cosmo):
""" Output the cosmology to a string for writing to file """ |
cosmoheader = ("# Cosmology (flat) Om:{0:.3f}, Ol:{1:.3f}, h:{2:.2f}, "
"sigma8:{3:.3f}, ns:{4:.2f}".format(
cosmo['omega_M_0'], cosmo['omega_lambda_0'], cosmo['h'],
cosmo['sigma_8'], cosmo['n']))
return(cosmoheader) |
<SYSTEM_TASK:>
NFW conc from Duffy 08 Table 1 for halo mass and redshift
<END_TASK>
<USER_TASK:>
Description:
def cduffy(z, M, vir='200crit', relaxed=True):
""" NFW conc from Duffy 08 Table 1 for halo mass and redshift""" |
if(vir == '200crit'):
if relaxed:
params = [6.71, -0.091, -0.44]
else:
params = [5.71, -0.084, -0.47]
elif(vir == 'tophat'):
if relaxed:
params = [9.23, -0.090, -0.69]
else:
params = [7.85, -0.081, -0.71]
elif(vir == '200mean'):
if relaxed:
params = [11.93, -0.090, -0.99]
else:
params = [10.14, -0.081, -1.01]
else:
print("Didn't recognise the halo boundary definition provided %s"
% (vir))
return(params[0] * ((M/(2e12/0.72))**params[1]) * ((1+z)**params[2])) |
<SYSTEM_TASK:>
Returns integral of the linear growth factor from z=200 to z=z
<END_TASK>
<USER_TASK:>
Description:
def _int_growth(z, **cosmo):
""" Returns integral of the linear growth factor from z=200 to z=z """ |
zmax = 200
if hasattr(z, "__len__"):
for zval in z:
assert(zval < zmax)
else:
assert(z < zmax)
y, yerr = scipy.integrate.quad(
lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +
cosmo['omega_lambda_0'])**(1.5),
z, zmax)
return(y) |
<SYSTEM_TASK:>
Returns linear growth factor at a given redshift, normalised to z=0
<END_TASK>
<USER_TASK:>
Description:
def growthfactor(z, norm=True, **cosmo):
""" Returns linear growth factor at a given redshift, normalised to z=0
by default, for a given cosmology
Parameters
----------
z : float or numpy array
The redshift at which the growth factor should be calculated
norm : boolean, optional
If true then normalise the growth factor to z=0 case defaults True
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
float or numpy array
The growth factor at a range of redshifts 'z'
Raises
------
""" |
H = np.sqrt(cosmo['omega_M_0'] * (1 + z)**3 +
cosmo['omega_lambda_0'])
growthval = H * _int_growth(z, **cosmo)
if norm:
growthval /= _int_growth(0, **cosmo)
return(growthval) |
<SYSTEM_TASK:>
Calculate growth rate indices a_tilde and b_tilde
<END_TASK>
<USER_TASK:>
Description:
def calc_ab(zi, Mi, **cosmo):
""" Calculate growth rate indices a_tilde and b_tilde
Parameters
----------
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(a_tilde, b_tilde) : float
""" |
# When zi = 0, the a_tilde becomes alpha and b_tilde becomes beta
# Eqn 23 of Correa et al 2015a (analytically solve from Eqn 16 and 17)
# Arbitray formation redshift, z_-2 in COM is more physically motivated
zf = -0.0064 * (np.log10(Mi))**2 + 0.0237 * (np.log10(Mi)) + 1.8837
# Eqn 22 of Correa et al 2015a
q = 4.137 * zf**(-0.9476)
# Radius of a mass Mi
R_Mass = cp.perturbation.mass_to_radius(Mi, **cosmo) # [Mpc]
# Radius of a mass Mi/q
Rq_Mass = cp.perturbation.mass_to_radius(Mi/q, **cosmo) # [Mpc]
# Mass variance 'sigma' evaluate at z=0 to a good approximation
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo) # [Mpc]
sigq, err_sigq = cp.perturbation.sigma_r(Rq_Mass, 0, **cosmo) # [Mpc]
f = (sigq**2 - sig**2)**(-0.5)
# Eqn 9 and 10 from Correa et al 2015c
# (generalised to zi from Correa et al 2015a's z=0 special case)
# a_tilde is power law growth rate
a_tilde = (np.sqrt(2/np.pi) * 1.686 * _deriv_growth(zi, **cosmo) /
growthfactor(zi, norm=True, **cosmo)**2 + 1)*f
# b_tilde is exponential growth rate
b_tilde = -f
return(a_tilde, b_tilde) |
<SYSTEM_TASK:>
Calculate accretion rate and mass history of a halo at any
<END_TASK>
<USER_TASK:>
Description:
def acc_rate(z, zi, Mi, **cosmo):
""" Calculate accretion rate and mass history of a halo at any
redshift 'z' with mass 'Mi' at a lower redshift 'z'
Parameters
----------
z : float
Redshift to solve acc_rate / mass history. Note zi<z
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
""" |
# Find parameters a_tilde and b_tilde for initial redshift
# use Eqn 9 and 10 of Correa et al. (2015c)
a_tilde, b_tilde = calc_ab(zi, Mi, **cosmo)
# Halo mass at z, in Msol
# use Eqn 8 in Correa et al. (2015c)
Mz = Mi * ((1 + z - zi)**a_tilde) * (np.exp(b_tilde * (z - zi)))
# Accretion rate at z, Msol yr^-1
# use Eqn 11 from Correa et al. (2015c)
dMdt = 71.6 * (Mz/1e12) * (cosmo['h']/0.7) *\
(-a_tilde / (1 + z - zi) - b_tilde) * (1 + z) *\
np.sqrt(cosmo['omega_M_0']*(1 + z)**3+cosmo['omega_lambda_0'])
return(dMdt, Mz) |
<SYSTEM_TASK:>
Calculate mass accretion history by looping function acc_rate
<END_TASK>
<USER_TASK:>
Description:
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
""" |
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array) |
<SYSTEM_TASK:>
Calculate concentration for halo of mass 'M' at redshift 'z'
<END_TASK>
<USER_TASK:>
Description:
def COM(z, M, **cosmo):
""" Calculate concentration for halo of mass 'M' at redshift 'z'
Parameters
----------
z : float / numpy array
Redshift to find concentration of halo
M : float / numpy array
Halo mass at redshift 'z'. Must be same size as 'z'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(c_array, sig_array, nu_array, zf_array) : float / numpy arrays
of equivalent size to 'z' and 'M'. Variables are
Concentration, Mass Variance 'sigma' this corresponds too,
the dimnesionless fluctuation this represents and formation redshift
""" |
# Check that z and M are arrays
z = np.array(z, ndmin=1, dtype=float)
M = np.array(M, ndmin=1, dtype=float)
# Create array
c_array = np.empty_like(z)
sig_array = np.empty_like(z)
nu_array = np.empty_like(z)
zf_array = np.empty_like(z)
for i_ind, (zval, Mval) in enumerate(_izip(z, M)):
# Evaluate the indices at each redshift and mass combination
# that you want a concentration for, different to MAH which
# uses one a_tilde and b_tilde at the starting redshift only
a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)
# Minimize equation to solve for 1 unknown, 'c'
c = scipy.optimize.brentq(_minimize_c, 2, 1000,
args=(zval, a_tilde, b_tilde,
cosmo['A_scaling'], cosmo['omega_M_0'],
cosmo['omega_lambda_0']))
if np.isclose(c, 0):
print("Error solving for concentration with given redshift and "
"(probably) too small a mass")
c = -1
sig = -1
nu = -1
zf = -1
else:
# Calculate formation redshift for this concentration,
# redshift at which the scale radius = virial radius: z_-2
zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],
omega_M_0=cosmo['omega_M_0'],
omega_lambda_0=cosmo['omega_lambda_0'])
R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)
sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)
nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))
c_array[i_ind] = c
sig_array[i_ind] = sig
nu_array[i_ind] = nu
zf_array[i_ind] = zf
return(c_array, sig_array, nu_array, zf_array) |
<SYSTEM_TASK:>
Load a configuration and keep it alive for the given context
<END_TASK>
<USER_TASK:>
Description:
def load(config_path: str):
"""
Load a configuration and keep it alive for the given context
:param config_path: path to a configuration file
""" |
# we bind the config to _ to keep it alive
if os.path.splitext(config_path)[1] in ('.yaml', '.yml'):
_ = load_yaml_configuration(config_path, translator=PipelineTranslator())
elif os.path.splitext(config_path)[1] == '.py':
_ = load_python_configuration(config_path)
else:
raise ValueError('Unknown configuration extension: %r' % os.path.splitext(config_path)[1])
yield |
<SYSTEM_TASK:>
Assets for a given release
<END_TASK>
<USER_TASK:>
Description:
def release_assets(self, release):
"""Assets for a given release
""" |
release = self.as_id(release)
return self.get_list(url='%s/%s/assets' % (self, release)) |
<SYSTEM_TASK:>
Upload a file to a release
<END_TASK>
<USER_TASK:>
Description:
def upload(self, release, filename, content_type=None):
"""Upload a file to a release
:param filename: filename to upload
:param content_type: optional content type
:return: json object from github
""" |
release = self.as_id(release)
name = os.path.basename(filename)
if not content_type:
content_type, _ = mimetypes.guess_type(name)
if not content_type:
raise ValueError('content_type not known')
inputs = {'name': name}
url = '%s%s/%s/assets' % (self.uploads_url,
urlsplit(self.api_url).path,
release)
info = os.stat(filename)
size = info[stat.ST_SIZE]
response = self.http.post(
url, data=stream_upload(filename), auth=self.auth,
params=inputs,
headers={'content-type': content_type,
'content-length': str(size)})
response.raise_for_status()
return response.json() |
<SYSTEM_TASK:>
Validate ``tag_name`` with the latest tag from github
<END_TASK>
<USER_TASK:>
Description:
def validate_tag(self, tag_name, prefix=None):
"""Validate ``tag_name`` with the latest tag from github
If ``tag_name`` is a valid candidate, return the latest tag from github
""" |
new_version = semantic_version(tag_name)
current = self.latest()
if current:
tag_name = current['tag_name']
if prefix:
tag_name = tag_name[len(prefix):]
tag_name = semantic_version(tag_name)
if tag_name >= new_version:
what = 'equal to' if tag_name == new_version else 'older than'
raise GithubException(
'Your local version "%s" is %s '
'the current github version "%s".\n'
'Bump the local version to '
'continue.' %
(
str(new_version),
what,
str(tag_name)
)
)
return current |
<SYSTEM_TASK:>
Return the full reddit URL associated with the usernote.
<END_TASK>
<USER_TASK:>
Description:
def full_url(self):
"""Return the full reddit URL associated with the usernote.
Arguments:
subreddit: the subreddit name for the note (PRAW Subreddit object)
""" |
if self.link == '':
return None
else:
return Note._expand_url(self.link, self.subreddit) |
<SYSTEM_TASK:>
Convert a reddit URL into the short-hand used by usernotes.
<END_TASK>
<USER_TASK:>
Description:
def _compress_url(link):
"""Convert a reddit URL into the short-hand used by usernotes.
Arguments:
link: a link to a comment, submission, or message (str)
Returns a String of the shorthand URL
""" |
comment_re = re.compile(r'/comments/([A-Za-z\d]{2,})(?:/[^\s]+/([A-Za-z\d]+))?')
message_re = re.compile(r'/message/messages/([A-Za-z\d]+)')
matches = re.findall(comment_re, link)
if len(matches) == 0:
matches = re.findall(message_re, link)
if len(matches) == 0:
return None
else:
return 'm,' + matches[0]
else:
if matches[0][1] == '':
return 'l,' + matches[0][0]
else:
return 'l,' + matches[0][0] + ',' + matches[0][1] |
<SYSTEM_TASK:>
Convert a usernote's URL short-hand into a full reddit URL.
<END_TASK>
<USER_TASK:>
Description:
def _expand_url(short_link, subreddit=None):
"""Convert a usernote's URL short-hand into a full reddit URL.
Arguments:
subreddit: the subreddit the URL is for (PRAW Subreddit object or str)
short_link: the compressed link from a usernote (str)
Returns a String of the full URL.
""" |
# Some URL structures for notes
message_scheme = 'https://reddit.com/message/messages/{}'
comment_scheme = 'https://reddit.com/r/{}/comments/{}/-/{}'
post_scheme = 'https://reddit.com/r/{}/comments/{}/'
if short_link == '':
return None
else:
parts = short_link.split(',')
if parts[0] == 'm':
return message_scheme.format(parts[1])
if parts[0] == 'l' and subreddit:
if len(parts) > 2:
return comment_scheme.format(subreddit, parts[1], parts[2])
else:
return post_scheme.format(subreddit, parts[1])
elif not subreddit:
raise ValueError('Subreddit name must be provided')
else:
return None |
<SYSTEM_TASK:>
Get the JSON stored on the usernotes wiki page.
<END_TASK>
<USER_TASK:>
Description:
def get_json(self):
"""Get the JSON stored on the usernotes wiki page.
Returns a dict representation of the usernotes (with the notes BLOB
decoded).
Raises:
RuntimeError if the usernotes version is incompatible with this
version of puni.
""" |
try:
usernotes = self.subreddit.wiki[self.page_name].content_md
notes = json.loads(usernotes)
except NotFound:
self._init_notes()
else:
if notes['ver'] != self.schema:
raise RuntimeError(
'Usernotes schema is v{0}, puni requires v{1}'.
format(notes['ver'], self.schema)
)
self.cached_json = self._expand_json(notes)
return self.cached_json |
<SYSTEM_TASK:>
Set up the UserNotes page with the initial JSON schema.
<END_TASK>
<USER_TASK:>
Description:
def _init_notes(self):
"""Set up the UserNotes page with the initial JSON schema.""" |
self.cached_json = {
'ver': self.schema,
'users': {},
'constants': {
'users': [x.name for x in self.subreddit.moderator()],
'warnings': Note.warnings
}
}
self.set_json('Initializing JSON via puni', True) |
<SYSTEM_TASK:>
Send the JSON from the cache to the usernotes wiki page.
<END_TASK>
<USER_TASK:>
Description:
def set_json(self, reason='', new_page=False):
"""Send the JSON from the cache to the usernotes wiki page.
Arguments:
reason: the change reason that will be posted to the wiki changelog
(str)
Raises:
OverflowError if the new JSON data is greater than max_page_size
""" |
compressed_json = json.dumps(self._compress_json(self.cached_json))
if len(compressed_json) > self.max_page_size:
raise OverflowError(
'Usernotes page is too large (>{0} characters)'.
format(self.max_page_size)
)
if new_page:
self.subreddit.wiki.create(
self.page_name,
compressed_json,
reason
)
# Set the page as hidden and available to moderators only
self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)
else:
self.subreddit.wiki[self.page_name].edit(
compressed_json,
reason
) |
<SYSTEM_TASK:>
Return a list of Note objects for the given user.
<END_TASK>
<USER_TASK:>
Description:
def get_notes(self, user):
"""Return a list of Note objects for the given user.
Return an empty list if no notes are found.
Arguments:
user: the user to search for in the usernotes (str)
""" |
# Try to search for all notes on a user, return an empty list if none
# are found.
try:
users_notes = []
for note in self.cached_json['users'][user]['ns']:
users_notes.append(Note(
user=user,
note=note['n'],
subreddit=self.subreddit,
mod=self._mod_from_index(note['m']),
link=note['l'],
warning=self._warning_from_index(note['w']),
note_time=note['t']
))
return users_notes
except KeyError:
# User not found
return [] |
<SYSTEM_TASK:>
Decompress the BLOB portion of the usernotes.
<END_TASK>
<USER_TASK:>
Description:
def _expand_json(self, j):
"""Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
""" |
decompressed_json = copy.copy(j)
decompressed_json.pop('blob', None) # Remove BLOB portion of JSON
# Decode and decompress JSON
compressed_data = base64.b64decode(j['blob'])
original_json = zlib.decompress(compressed_data).decode('utf-8')
decompressed_json['users'] = json.loads(original_json) # Insert users
return decompressed_json |
<SYSTEM_TASK:>
Compress the BLOB data portion of the usernotes.
<END_TASK>
<USER_TASK:>
Description:
def _compress_json(self, j):
"""Compress the BLOB data portion of the usernotes.
Arguments:
j: the JSON in Schema v5 format (dict)
Returns a dict with the 'users' key removed and 'blob' key added
""" |
compressed_json = copy.copy(j)
compressed_json.pop('users', None)
compressed_data = zlib.compress(
json.dumps(j['users']).encode('utf-8'),
self.zlib_compression_strength
)
b64_data = base64.b64encode(compressed_data).decode('utf-8')
compressed_json['blob'] = b64_data
return compressed_json |
<SYSTEM_TASK:>
Add a note to the usernotes wiki page.
<END_TASK>
<USER_TASK:>
Description:
def add_note(self, note):
"""Add a note to the usernotes wiki page.
Arguments:
note: the note to be added (Note)
Returns the update message for the usernotes wiki
Raises:
ValueError when the warning type of the note can not be found in the
stored list of warnings.
""" |
notes = self.cached_json
if not note.moderator:
note.moderator = self.r.user.me().name
# Get index of moderator in mod list from usernotes
# Add moderator to list if not already there
try:
mod_index = notes['constants']['users'].index(note.moderator)
except ValueError:
notes['constants']['users'].append(note.moderator)
mod_index = notes['constants']['users'].index(note.moderator)
# Get index of warning type from warnings list
# Add warning type to list if not already there
try:
warn_index = notes['constants']['warnings'].index(note.warning)
except ValueError:
if note.warning in Note.warnings:
notes['constants']['warnings'].append(note.warning)
warn_index = notes['constants']['warnings'].index(note.warning)
else:
raise ValueError('Warning type not valid: ' + note.warning)
new_note = {
'n': note.note,
't': note.time,
'm': mod_index,
'l': note.link,
'w': warn_index
}
try:
notes['users'][note.username]['ns'].insert(0, new_note)
except KeyError:
notes['users'][note.username] = {'ns': [new_note]}
return '"create new note on user {}" via puni'.format(note.username) |
<SYSTEM_TASK:>
Remove a single usernote from the usernotes.
<END_TASK>
<USER_TASK:>
Description:
def remove_note(self, username, index):
"""Remove a single usernote from the usernotes.
Arguments:
username: the user that for whom you're removing a note (str)
index: the index of the note which is to be removed (int)
Returns the update message for the usernotes wiki
""" |
self.cached_json['users'][username]['ns'].pop(index)
# Go ahead and remove the user's entry if they have no more notes left
if len(self.cached_json['users'][username]['ns']) == 0:
del self.cached_json['users'][username]
return '"delete note #{} on user {}" via puni'.format(index, username) |
<SYSTEM_TASK:>
Return the first matching target application in this version range.
<END_TASK>
<USER_TASK:>
Description:
def get_related_targetApplication(vR, app_id, app_ver):
"""Return the first matching target application in this version range.
Returns None if there are no target applications or no matching ones.""" |
targetApplication = vR.get('targetApplication')
if not targetApplication:
return None
for tA in targetApplication:
guid = tA.get('guid')
if not guid or guid == app_id:
if not app_ver:
return tA
# We purposefully use maxVersion only, so that the blocklist contains items
# whose minimum version is ahead of the version we get passed. This means
# the blocklist we serve is "future-proof" for app upgrades.
if between(version_int(app_ver), '0', tA.get('maxVersion', '*')):
return tA
return None |
<SYSTEM_TASK:>
Generate the addons blocklists.
<END_TASK>
<USER_TASK:>
Description:
def write_addons_items(xml_tree, records, app_id, api_ver=3, app_ver=None):
"""Generate the addons blocklists.
<emItem blockID="i372" id="[email protected]">
<versionRange minVersion="0" maxVersion="*" severity="3">
<targetApplication id="{ec8030f7-c20a-464f-9b0e-13a3a9e97384}">
<versionRange minVersion="39.0a1" maxVersion="*"/>
</targetApplication>
</versionRange>
<prefs>
<pref>browser.startup.homepage</pref>
<pref>browser.search.defaultenginename</pref>
</prefs>
</emItem>
""" |
if not records:
return
emItems = etree.SubElement(xml_tree, 'emItems')
groupby = {}
for item in records:
if is_related_to(item, app_id, app_ver):
if item['guid'] in groupby:
emItem = groupby[item['guid']]
# When creating new records from the Kinto Admin we don't have proper blockID.
if 'blockID' in item:
# Remove the first caracter which is the letter i to
# compare the numeric value i45 < i356.
current_blockID = int(item['blockID'][1:])
previous_blockID = int(emItem.attrib['blockID'][1:])
# Group by and keep the biggest blockID in the XML file.
if current_blockID > previous_blockID:
emItem.attrib['blockID'] = item['blockID']
else:
# If the latest entry does not have any blockID attribute, its
# ID should be used. (the list of records is sorted by ascending
# last_modified).
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1473194
emItem.attrib['blockID'] = item['id']
else:
emItem = etree.SubElement(emItems, 'emItem',
blockID=item.get('blockID', item['id']))
groupby[item['guid']] = emItem
prefs = etree.SubElement(emItem, 'prefs')
for p in item['prefs']:
pref = etree.SubElement(prefs, 'pref')
pref.text = p
# Set the add-on ID
emItem.set('id', item['guid'])
for field in ['name', 'os']:
if field in item:
emItem.set(field, item[field])
build_version_range(emItem, item, app_id) |
<SYSTEM_TASK:>
Create or update a label
<END_TASK>
<USER_TASK:>
Description:
def label(self, name, color, update=True):
"""Create or update a label
""" |
url = '%s/labels' % self
data = dict(name=name, color=color)
response = self.http.post(
url, json=data, auth=self.auth, headers=self.headers
)
if response.status_code == 201:
return True
elif response.status_code == 422 and update:
url = '%s/%s' % (url, name)
response = self.http.patch(
url, json=data, auth=self.auth, headers=self.headers
)
response.raise_for_status()
return False |
<SYSTEM_TASK:>
Check a plaintext password against a hashed password.
<END_TASK>
<USER_TASK:>
Description:
def check_password(password: str, encrypted: str) -> bool:
""" Check a plaintext password against a hashed password. """ |
# some old passwords have {crypt} in lower case, and passlib wants it to be
# in upper case.
if encrypted.startswith("{crypt}"):
encrypted = "{CRYPT}" + encrypted[7:]
return pwd_context.verify(password, encrypted) |
<SYSTEM_TASK:>
Check if version of repository is semantic
<END_TASK>
<USER_TASK:>
Description:
def validate(ctx, sandbox):
"""Check if version of repository is semantic
""" |
m = RepoManager(ctx.obj['agile'])
if not sandbox or m.can_release('sandbox'):
click.echo(m.validate_version()) |
<SYSTEM_TASK:>
Reset transaction back to original state, discarding all
<END_TASK>
<USER_TASK:>
Description:
def reset(self, force_flush_cache: bool = False) -> None:
"""
Reset transaction back to original state, discarding all
uncompleted transactions.
""" |
super(LDAPwrapper, self).reset()
if len(self._transactions) == 0:
raise RuntimeError("reset called outside a transaction.")
self._transactions[-1] = [] |
<SYSTEM_TASK:>
Object state is cached. When an update is required the update will be
<END_TASK>
<USER_TASK:>
Description:
def _cache_get_for_dn(self, dn: str) -> Dict[str, bytes]:
"""
Object state is cached. When an update is required the update will be
simulated on this cache, so that rollback information can be correct.
This function retrieves the cached data.
""" |
# no cached item, retrieve from ldap
self._do_with_retry(
lambda obj: obj.search(
dn,
'(objectclass=*)',
ldap3.BASE,
attributes=['*', '+']))
results = self._obj.response
if len(results) < 1:
raise NoSuchObject("No results finding current value")
if len(results) > 1:
raise RuntimeError("Too many results finding current value")
return results[0]['raw_attributes'] |
<SYSTEM_TASK:>
Attempt to commit all changes to LDAP database. i.e. forget all
<END_TASK>
<USER_TASK:>
Description:
def commit(self) -> None:
"""
Attempt to commit all changes to LDAP database. i.e. forget all
rollbacks. However stay inside transaction management.
""" |
if len(self._transactions) == 0:
raise RuntimeError("commit called outside transaction")
# If we have nested transactions, we don't actually commit, but push
# rollbacks up to previous transaction.
if len(self._transactions) > 1:
for on_rollback in reversed(self._transactions[-1]):
self._transactions[-2].insert(0, on_rollback)
_debug("commit")
self.reset() |
<SYSTEM_TASK:>
Roll back to previous database state. However stay inside transaction
<END_TASK>
<USER_TASK:>
Description:
def rollback(self) -> None:
"""
Roll back to previous database state. However stay inside transaction
management.
""" |
if len(self._transactions) == 0:
raise RuntimeError("rollback called outside transaction")
_debug("rollback:", self._transactions[-1])
# if something goes wrong here, nothing we can do about it, leave
# database as is.
try:
# for every rollback action ...
for on_rollback in self._transactions[-1]:
# execute it
_debug("--> rolling back", on_rollback)
self._do_with_retry(on_rollback)
except: # noqa: E722
_debug("--> rollback failed")
exc_class, exc, tb = sys.exc_info()
raise tldap.exceptions.RollbackError(
"FATAL Unrecoverable rollback error: %r" % exc)
finally:
# reset everything to clean state
_debug("--> rollback success")
self.reset() |
<SYSTEM_TASK:>
for testing purposes only. always fail in commit
<END_TASK>
<USER_TASK:>
Description:
def fail(self) -> None:
""" for testing purposes only. always fail in commit """ |
_debug("fail")
# on commit carry out action; on rollback reverse rename
def on_commit(_obj):
raise_testfailure("commit")
def on_rollback(_obj):
raise_testfailure("rollback")
return self._process(on_commit, on_rollback) |
<SYSTEM_TASK:>
Spits out the timedelta in days.
<END_TASK>
<USER_TASK:>
Description:
def get_interval_timedelta(self):
""" Spits out the timedelta in days. """ |
now_datetime = timezone.now()
current_month_days = monthrange(now_datetime.year, now_datetime.month)[1]
# Two weeks
if self.interval == reminders_choices.INTERVAL_2_WEEKS:
interval_timedelta = datetime.timedelta(days=14)
# One month
elif self.interval == reminders_choices.INTERVAL_ONE_MONTH:
interval_timedelta = datetime.timedelta(days=current_month_days)
# Three months
elif self.interval == reminders_choices.INTERVAL_THREE_MONTHS:
three_months = now_datetime + relativedelta(months=+3)
interval_timedelta = three_months - now_datetime
# Six months
elif self.interval == reminders_choices.INTERVAL_SIX_MONTHS:
six_months = now_datetime + relativedelta(months=+6)
interval_timedelta = six_months - now_datetime
# One year
elif self.interval == reminders_choices.INTERVAL_ONE_YEAR:
one_year = now_datetime + relativedelta(years=+1)
interval_timedelta = one_year - now_datetime
return interval_timedelta |
<SYSTEM_TASK:>
Execute a runner without blocking the event loop
<END_TASK>
<USER_TASK:>
Description:
async def awaitable_runner(runner: BaseRunner):
"""Execute a runner without blocking the event loop""" |
runner_thread = CapturingThread(target=runner.run)
runner_thread.start()
delay = 0.0
while not runner_thread.join(timeout=0):
await asyncio.sleep(delay)
delay = min(delay + 0.1, 1.0) |
<SYSTEM_TASK:>
Create an ``asyncio`` event loop running in the main thread and watching runners
<END_TASK>
<USER_TASK:>
Description:
def asyncio_main_run(root_runner: BaseRunner):
"""
Create an ``asyncio`` event loop running in the main thread and watching runners
Using ``asyncio`` to handle suprocesses requires a specific loop type to run in the main thread.
This function sets up and runs the correct loop in a portable way.
In addition, it runs a single :py:class:`~.BaseRunner` until completion or failure.
.. seealso:: The `issue #8 <https://github.com/MatterMiners/cobald/issues/8>`_ for details.
""" |
assert threading.current_thread() == threading.main_thread(), 'only main thread can accept asyncio subprocesses'
if sys.platform == 'win32':
event_loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(event_loop)
else:
event_loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(event_loop)
event_loop.run_until_complete(awaitable_runner(root_runner)) |
<SYSTEM_TASK:>
Dump initialized object structure to yaml
<END_TASK>
<USER_TASK:>
Description:
def dump(node):
""" Dump initialized object structure to yaml
""" |
from qubell.api.private.platform import Auth, QubellPlatform
from qubell.api.private.organization import Organization
from qubell.api.private.application import Application
from qubell.api.private.instance import Instance
from qubell.api.private.revision import Revision
from qubell.api.private.environment import Environment
from qubell.api.private.zone import Zone
from qubell.api.private.manifest import Manifest
# Exclude keys from dump
# Format: { 'ClassName': ['fields', 'to', 'exclude']}
exclusion_list = {
Auth: ['cookies'],
QubellPlatform:['auth', ],
Organization: ['auth', 'organizationId', 'zone'],
Application: ['auth', 'applicationId', 'organization'],
Instance: ['auth', 'instanceId', 'application'],
Manifest: ['name', 'content'],
Revision: ['auth', 'revisionId'],
Environment: ['auth', 'environmentId', 'organization'],
Zone: ['auth', 'zoneId', 'organization'],
}
def obj_presenter(dumper, obj):
for x in exclusion_list.keys():
if isinstance(obj, x): # Find class
fields = obj.__dict__.copy()
for excl_item in exclusion_list[x]:
try:
fields.pop(excl_item)
except:
log.warn('No item %s in object %s' % (excl_item, x))
return dumper.represent_mapping('tag:yaml.org,2002:map', fields)
return dumper.represent_mapping('tag:yaml.org,2002:map', obj.__dict__)
noalias_dumper = yaml.dumper.Dumper
noalias_dumper.ignore_aliases = lambda self, data: True
yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
yaml.add_multi_representer(object, obj_presenter)
serialized = yaml.dump(node, default_flow_style=False, Dumper=noalias_dumper)
return serialized |
<SYSTEM_TASK:>
Function list
<END_TASK>
<USER_TASK:>
Description:
def list(self, obj, filter=False, only_id=False, limit=20):
""" Function list
Get the list of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param filter: filter for objects
@param only_id: boolean to only return dict with name/id
@return RETURN: the list of the object
""" |
self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
self.method = 'GET'
if filter:
self.url += '&search={}'.format(filter)
self.resp = requests.get(url=self.url, auth=self.auth,
headers=self.headers, cert=self.ca_cert)
if only_id:
if self.__process_resp__(obj) is False:
return False
if type(self.res['results']) is list:
return dict((x['name'], x['id']) for x in self.res['results'])
elif type(self.res['results']) is dict:
r = {}
for v in self.res['results'].values():
for vv in v:
r[vv['name']] = vv['id']
return r
else:
return False
else:
return self.__process_resp__(obj) |
<SYSTEM_TASK:>
Function get
<END_TASK>
<USER_TASK:>
Description:
def get(self, obj, id, sub_object=None):
""" Function get
Get an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the targeted object
""" |
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'GET'
if sub_object:
self.url += '/' + sub_object
self.resp = requests.get(url=self.url, auth=self.auth,
headers=self.headers, cert=self.ca_cert)
if self.__process_resp__(obj):
return self.res
return False |
<SYSTEM_TASK:>
Function get_id_by_name
<END_TASK>
<USER_TASK:>
Description:
def get_id_by_name(self, obj, name):
""" Function get_id_by_name
Get the id of an object
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the targeted object
""" |
list = self.list(obj, filter='name = "{}"'.format(name),
only_id=True, limit=1)
return list[name] if name in list.keys() else False |
<SYSTEM_TASK:>
Function set
<END_TASK>
<USER_TASK:>
Description:
def set(self, obj, id, payload, action='', async=False):
""" Function set
Set an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@param action: specific action of an object ('power'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
""" |
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'PUT'
if action:
self.url += '/{}'.format(action)
self.payload = json.dumps(payload)
if async:
session = FuturesSession()
return session.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
else:
self.resp = requests.put(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
if self.__process_resp__(obj):
return self.res
return False |
<SYSTEM_TASK:>
Function create
<END_TASK>
<USER_TASK:>
Description:
def create(self, obj, payload, async=False):
""" Function create
Create an new object
@param obj: object name ('hosts', 'puppetclasses'...)
@param payload: the dict of the payload
@param async: should this request be async, if true use
return.result() to get the response
@return RETURN: the server response
""" |
self.url = self.base_url + obj
self.method = 'POST'
self.payload = json.dumps(payload)
if async:
self.method = 'POST(Async)'
session = FuturesSession()
self.resp = session.post(url=self.url, auth=self.auth,
headers=self.headers, data=self.payload,
cert=self.ca_cert)
return self.resp
else:
self.resp = requests.post(url=self.url, auth=self.auth,
headers=self.headers,
data=self.payload, cert=self.ca_cert)
return self.__process_resp__(obj) |
<SYSTEM_TASK:>
Function delete
<END_TASK>
<USER_TASK:>
Description:
def delete(self, obj, id):
""" Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response
""" |
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'DELETE'
self.resp = requests.delete(url=self.url,
auth=self.auth,
headers=self.headers, cert=self.ca_cert)
return self.__process_resp__(obj) |
<SYSTEM_TASK:>
Modified ``run`` that captures return value and exceptions from ``target``
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Modified ``run`` that captures return value and exceptions from ``target``""" |
try:
if self._target:
return_value = self._target(*self._args, **self._kwargs)
if return_value is not None:
self._exception = OrphanedReturn(self, return_value)
except BaseException as err:
self._exception = err
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs |
<SYSTEM_TASK:>
Decorate functions that modify the internally stored usernotes JSON.
<END_TASK>
<USER_TASK:>
Description:
def update_cache(func):
"""Decorate functions that modify the internally stored usernotes JSON.
Ensures that updates are mirrored onto reddit.
Arguments:
func: the function being decorated
""" |
@wraps(func)
def wrapper(self, *args, **kwargs):
"""The wrapper function."""
lazy = kwargs.get('lazy', False)
kwargs.pop('lazy', None)
if not lazy:
self.get_json()
ret = func(self, *args, **kwargs)
# If returning a string assume it is an update message
if isinstance(ret, str) and not lazy:
self.set_json(ret)
else:
return ret
return wrapper |
<SYSTEM_TASK:>
Return a tuple for authenticating a user
<END_TASK>
<USER_TASK:>
Description:
def get_auth():
"""Return a tuple for authenticating a user
If not successful raise ``AgileError``.
""" |
auth = get_auth_from_env()
if auth[0] and auth[1]:
return auth
home = os.path.expanduser("~")
config = os.path.join(home, '.gitconfig')
if not os.path.isfile(config):
raise GithubException('No .gitconfig available')
parser = configparser.ConfigParser()
parser.read(config)
if 'user' in parser:
user = parser['user']
if 'username' not in user:
raise GithubException('Specify username in %s user '
'section' % config)
if 'token' not in user:
raise GithubException('Specify token in %s user section'
% config)
return user['username'], user['token']
else:
raise GithubException('No user section in %s' % config) |
<SYSTEM_TASK:>
Function checkAndCreate
<END_TASK>
<USER_TASK:>
Description:
def checkAndCreate(self, key, payload, osIds):
""" Function checkAndCreate
Check if an architectures exists and create it if not
@param key: The targeted architectures
@param payload: The targeted architectures description
@param osIds: The list of os ids liked with this architecture
@return RETURN: The id of the object
""" |
if key not in self:
self[key] = payload
oid = self[key]['id']
if not oid:
return False
#~ To be sure the OS list is good, we ensure our os are in the list
for os in self[key]['operatingsystems']:
osIds.add(os['id'])
self[key]["operatingsystem_ids"] = list(osIds)
if (len(self[key]['operatingsystems']) is not len(osIds)):
return False
return oid |
<SYSTEM_TASK:>
Find the common prefix of two or more paths.
<END_TASK>
<USER_TASK:>
Description:
def find_common_prefix(
paths: Sequence[Union[str, pathlib.Path]]
) -> Optional[pathlib.Path]:
"""
Find the common prefix of two or more paths.
::
>>> import pathlib
>>> one = pathlib.Path('foo/bar/baz')
>>> two = pathlib.Path('foo/quux/biz')
>>> three = pathlib.Path('foo/quux/wuux')
::
>>> import uqbar.io
>>> str(uqbar.io.find_common_prefix([one, two, three]))
'foo'
:param paths: paths to inspect
""" |
counter: collections.Counter = collections.Counter()
for path in paths:
path = pathlib.Path(path)
counter.update([path])
counter.update(path.parents)
valid_paths = sorted(
[path for path, count in counter.items() if count >= len(paths)],
key=lambda x: len(x.parts),
)
if valid_paths:
return valid_paths[-1]
return None |
<SYSTEM_TASK:>
r"""Finds executable `name`.
<END_TASK>
<USER_TASK:>
Description:
def find_executable(name: str, flags=os.X_OK) -> List[str]:
r"""Finds executable `name`.
Similar to Unix ``which`` command.
Returns list of zero or more full paths to `name`.
""" |
result = []
extensions = [x for x in os.environ.get("PATHEXT", "").split(os.pathsep) if x]
path = os.environ.get("PATH", None)
if path is None:
return []
for path in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(path, name)
if os.access(path, flags):
result.append(path)
for extension in extensions:
path_extension = path + extension
if os.access(path_extension, flags):
result.append(path_extension)
return result |
<SYSTEM_TASK:>
Walks a directory tree.
<END_TASK>
<USER_TASK:>
Description:
def walk(
root_path: Union[str, pathlib.Path], top_down: bool = True
) -> Generator[
Tuple[pathlib.Path, Sequence[pathlib.Path], Sequence[pathlib.Path]], None, None
]:
"""
Walks a directory tree.
Like :py:func:`os.walk` but yielding instances of :py:class:`pathlib.Path`
instead of strings.
:param root_path: foo
:param top_down: bar
""" |
root_path = pathlib.Path(root_path)
directory_paths, file_paths = [], []
for path in sorted(root_path.iterdir()):
if path.is_dir():
directory_paths.append(path)
else:
file_paths.append(path)
if top_down:
yield root_path, directory_paths, file_paths
for directory_path in directory_paths:
yield from walk(directory_path, top_down=top_down)
if not top_down:
yield root_path, directory_paths, file_paths |
<SYSTEM_TASK:>
Writes ``contents`` to ``path``.
<END_TASK>
<USER_TASK:>
Description:
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
"""
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
""" |
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True |
<SYSTEM_TASK:>
Send command without return value, wait for completion, verify success.
<END_TASK>
<USER_TASK:>
Description:
def sendQueryVerify(self, cmd):
""" Send command without return value, wait for completion, verify success.
:param cmd: command to send
""" |
cmd = cmd.strip()
self.logger.debug("sendQueryVerify(%s)", cmd)
if not self.is_connected():
raise socket.error("sendQueryVerify on a disconnected socket")
resp = self.__sendQueryReply(cmd)
if resp != self.reply_ok:
raise XenaCommandException('Command {} Fail Expected {} Actual {}'.format(cmd, self.reply_ok, resp))
self.logger.debug("SendQueryVerify(%s) Succeed", cmd) |
<SYSTEM_TASK:>
Modify a changes to add an automatically generated uidNumber.
<END_TASK>
<USER_TASK:>
Description:
def save_account(changes: Changeset, table: LdapObjectClass, database: Database) -> Changeset:
""" Modify a changes to add an automatically generated uidNumber. """ |
d = {}
settings = database.settings
uid_number = changes.get_value_as_single('uidNumber')
if uid_number is None:
scheme = settings['NUMBER_SCHEME']
first = settings.get('UID_FIRST', 10000)
d['uidNumber'] = Counters.get_and_increment(
scheme, "uidNumber", first,
lambda n: not _check_exists(database, table, 'uidNumber', n)
)
changes = changes.merge(d)
return changes |
<SYSTEM_TASK:>
Search for entries in LDAP database.
<END_TASK>
<USER_TASK:>
Description:
def search(self, base, scope, filterstr='(objectClass=*)',
attrlist=None, limit=None) -> Generator[Tuple[str, dict], None, None]:
"""
Search for entries in LDAP database.
""" |
_debug("search", base, scope, filterstr, attrlist, limit)
# first results
if attrlist is None:
attrlist = ldap3.ALL_ATTRIBUTES
elif isinstance(attrlist, set):
attrlist = list(attrlist)
def first_results(obj):
_debug("---> searching ldap", limit)
obj.search(
base, filterstr, scope, attributes=attrlist, paged_size=limit)
return obj.response
# get the 1st result
result_list = self._do_with_retry(first_results)
# Loop over list of search results
for result_item in result_list:
# skip searchResRef for now
if result_item['type'] != "searchResEntry":
continue
dn = result_item['dn']
attributes = result_item['raw_attributes']
# did we already retrieve this from cache?
_debug("---> got ldap result", dn)
_debug("---> yielding", result_item)
yield (dn, attributes)
# we are finished - return results, eat cake
_debug("---> done")
return |
<SYSTEM_TASK:>
Example shows how to configure environment from scratch
<END_TASK>
<USER_TASK:>
Description:
def prepare_env(org):
""" Example shows how to configure environment from scratch """ |
# Add services
key_service = org.service(type='builtin:cobalt_secure_store', name='Keystore')
wf_service = org.service(type='builtin:workflow_service', name='Workflow', parameters='{}')
# Add services to environment
env = org.environment(name='default')
env.clean()
env.add_service(key_service)
env.add_service(wf_service)
env.add_policy(
{"action": "provisionVms",
"parameter": "publicKeyId",
"value": key_service.regenerate()['id']})
# Add cloud provider account
access = {
"provider": "aws-ec2",
"usedEnvironments": [],
"ec2SecurityGroup": "default",
"providerCopy": "aws-ec2",
"name": "test-provider",
"jcloudsIdentity": KEY,
"jcloudsCredential": SECRET_KEY,
"jcloudsRegions": "us-east-1"
}
prov = org.provider(access)
env.add_provider(prov)
return org.organizationId |
<SYSTEM_TASK:>
Commands for devops operations
<END_TASK>
<USER_TASK:>
Description:
def start(ctx, debug, version, config):
"""Commands for devops operations""" |
ctx.obj = {}
ctx.DEBUG = debug
if os.path.isfile(config):
with open(config) as fp:
agile = json.load(fp)
else:
agile = {}
ctx.obj['agile'] = agile
if version:
click.echo(__version__)
ctx.exit(0)
if not ctx.invoked_subcommand:
click.echo(ctx.get_help()) |
<SYSTEM_TASK:>
Duplicate all related objects of obj setting
<END_TASK>
<USER_TASK:>
Description:
def duplicate(obj, value=None, field=None, duplicate_order=None):
"""
Duplicate all related objects of obj setting
field to value. If one of the duplicate
objects has an FK to another duplicate object
update that as well. Return the duplicate copy
of obj.
duplicate_order is a list of models which specify how
the duplicate objects are saved. For complex objects
this can matter. Check to save if objects are being
saved correctly and if not just pass in related objects
in the order that they should be saved.
""" |
using = router.db_for_write(obj._meta.model)
collector = CloneCollector(using=using)
collector.collect([obj])
collector.sort()
related_models = list(collector.data.keys())
data_snapshot = {}
for key in collector.data.keys():
data_snapshot.update({
key: dict(zip(
[item.pk for item in collector.data[key]], [item for item in collector.data[key]]))
})
root_obj = None
# Sometimes it's good enough just to save in reverse deletion order.
if duplicate_order is None:
duplicate_order = reversed(related_models)
for model in duplicate_order:
# Find all FKs on model that point to a related_model.
fks = []
for f in model._meta.fields:
if isinstance(f, ForeignKey) and f.rel.to in related_models:
fks.append(f)
# Replace each `sub_obj` with a duplicate.
if model not in collector.data:
continue
sub_objects = collector.data[model]
for obj in sub_objects:
for fk in fks:
fk_value = getattr(obj, "%s_id" % fk.name)
# If this FK has been duplicated then point to the duplicate.
fk_rel_to = data_snapshot[fk.rel.to]
if fk_value in fk_rel_to:
dupe_obj = fk_rel_to[fk_value]
setattr(obj, fk.name, dupe_obj)
# Duplicate the object and save it.
obj.id = None
if field is not None:
setattr(obj, field, value)
obj.save()
if root_obj is None:
root_obj = obj
return root_obj |
<SYSTEM_TASK:>
Enters transaction management for a running thread. It must be balanced
<END_TASK>
<USER_TASK:>
Description:
def enter_transaction_management(using=None):
"""
Enters transaction management for a running thread. It must be balanced
with the appropriate leave_transaction_management call, since the actual
state is managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
""" |
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.enter_transaction_management()
return
connection = tldap.backend.connections[using]
connection.enter_transaction_management() |
<SYSTEM_TASK:>
Returns True if the current transaction requires a commit for changes to
<END_TASK>
<USER_TASK:>
Description:
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
""" |
if using is None:
dirty = False
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
if connection.is_dirty():
dirty = True
return dirty
connection = tldap.backend.connections[using]
return connection.is_dirty() |
<SYSTEM_TASK:>
Checks whether the transaction manager is in manual or in auto state.
<END_TASK>
<USER_TASK:>
Description:
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
""" |
if using is None:
managed = False
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
if connection.is_managed():
managed = True
return managed
connection = tldap.backend.connections[using]
return connection.is_managed() |
<SYSTEM_TASK:>
Does the commit itself and resets the dirty flag.
<END_TASK>
<USER_TASK:>
Description:
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
""" |
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.commit()
return
connection = tldap.backend.connections[using]
connection.commit() |
<SYSTEM_TASK:>
This function does the rollback itself and resets the dirty flag.
<END_TASK>
<USER_TASK:>
Description:
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
""" |
if using is None:
for using in tldap.backend.connections:
connection = tldap.backend.connections[using]
connection.rollback()
return
connection = tldap.backend.connections[using]
connection.rollback() |
<SYSTEM_TASK:>
This decorator activates commit on response. This way, if the view function
<END_TASK>
<USER_TASK:>
Description:
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
""" |
def entering(using):
enter_transaction_management(using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
commit(using=using)
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using) |
<SYSTEM_TASK:>
Reloads glitter URL patterns if page URLs change.
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, request):
"""
Reloads glitter URL patterns if page URLs change.
Avoids having to restart the server to recreate the glitter URLs being used by Django.
""" |
global _urlconf_pages
page_list = list(
Page.objects.exclude(glitter_app_name='').values_list('id', 'url').order_by('id')
)
with _urlconf_lock:
if page_list != _urlconf_pages:
glitter_urls = 'glitter.urls'
if glitter_urls in sys.modules:
importlib.reload(sys.modules[glitter_urls])
_urlconf_pages = page_list |