repository
stringclasses 11
values | repo_id
stringlengths 1
3
| target_module_path
stringlengths 16
72
| prompt
stringlengths 298
21.7k
| relavent_test_path
stringlengths 50
99
| full_function
stringlengths 336
33.8k
| function_name
stringlengths 2
51
|
---|---|---|---|---|---|---|
astropy | 8 | astropy/coordinates/earth.py | def get_itrs(self, obstime=None, location=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``, either geocentric, or
topocentric relative to a given ``location``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
location : `~astropy.coordinates.EarthLocation` or None
A possible observer's location, for a topocentric ITRS position.
If not given (default), a geocentric ITRS object will be created.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame, either geocentric or topocentric
relative to the given ``location``.
"""
| /usr/src/app/target_test_cases/failed_tests_EarthLocation.get_itrs.txt | def get_itrs(self, obstime=None, location=None):
"""
Generates an `~astropy.coordinates.ITRS` object with the location of
this object at the requested ``obstime``, either geocentric, or
topocentric relative to a given ``location``.
Parameters
----------
obstime : `~astropy.time.Time` or None
The ``obstime`` to apply to the new `~astropy.coordinates.ITRS`, or
if None, the default ``obstime`` will be used.
location : `~astropy.coordinates.EarthLocation` or None
A possible observer's location, for a topocentric ITRS position.
If not given (default), a geocentric ITRS object will be created.
Returns
-------
itrs : `~astropy.coordinates.ITRS`
The new object in the ITRS frame, either geocentric or topocentric
relative to the given ``location``.
"""
# Broadcast for a single position at multiple times, but don't attempt
# to be more general here.
if obstime and self.size == 1 and obstime.shape:
self = np.broadcast_to(self, obstime.shape, subok=True)
# do this here to prevent a series of complicated circular imports
from .builtin_frames import ITRS
if location is None:
# No location provided, return geocentric ITRS coordinates
return ITRS(x=self.x, y=self.y, z=self.z, obstime=obstime)
else:
return ITRS(
self.x - location.x,
self.y - location.y,
self.z - location.z,
copy=False,
obstime=obstime,
location=location,
)
| EarthLocation.get_itrs |
astropy | 9 | astropy/coordinates/earth.py | def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
| /usr/src/app/target_test_cases/failed_tests_EarthLocation.gravitational_redshift.txt | def gravitational_redshift(
self, obstime, bodies=["sun", "jupiter", "moon"], masses={}
):
"""Return the gravitational redshift at this EarthLocation.
Calculates the gravitational redshift, of order 3 m/s, due to the
requested solar system bodies.
Parameters
----------
obstime : `~astropy.time.Time`
The ``obstime`` to calculate the redshift at.
bodies : iterable, optional
The bodies (other than the Earth) to include in the redshift
calculation. List elements should be any body name
`get_body_barycentric` accepts. Defaults to Jupiter, the Sun, and
the Moon. Earth is always included (because the class represents
an *Earth* location).
masses : dict[str, `~astropy.units.Quantity`], optional
The mass or gravitational parameters (G * mass) to assume for the
bodies requested in ``bodies``. Can be used to override the
defaults for the Sun, Jupiter, the Moon, and the Earth, or to
pass in masses for other bodies.
Returns
-------
redshift : `~astropy.units.Quantity`
Gravitational redshift in velocity units at given obstime.
"""
# needs to be here to avoid circular imports
from .solar_system import get_body_barycentric
bodies = list(bodies)
# Ensure earth is included and last in the list.
if "earth" in bodies:
bodies.remove("earth")
bodies.append("earth")
_masses = {
"sun": consts.GM_sun,
"jupiter": consts.GM_jup,
"moon": consts.G * 7.34767309e22 * u.kg,
"earth": consts.GM_earth,
}
_masses.update(masses)
GMs = []
M_GM_equivalency = (u.kg, u.Unit(consts.G * u.kg))
for body in bodies:
try:
GMs.append(_masses[body].to(u.m**3 / u.s**2, [M_GM_equivalency]))
except KeyError as err:
raise KeyError(f'body "{body}" does not have a mass.') from err
except u.UnitsError as exc:
exc.args += (
(
'"masses" argument values must be masses or '
"gravitational parameters."
),
)
raise
positions = [get_body_barycentric(name, obstime) for name in bodies]
# Calculate distances to objects other than earth.
distances = [(pos - positions[-1]).norm() for pos in positions[:-1]]
# Append distance from Earth's center for Earth's contribution.
distances.append(CartesianRepresentation(self.geocentric).norm())
# Get redshifts due to all objects.
redshifts = [
-GM / consts.c / distance for (GM, distance) in zip(GMs, distances)
]
# Reverse order of summing, to go from small to big, and to get
# "earth" first, which gives m/s as unit.
return sum(redshifts[::-1])
| EarthLocation.gravitational_redshift |
astropy | 10 | astropy/io/fits/hdu/hdulist.py | def writeto(
self, fileobj, output_verify="exception", overwrite=False, checksum=False
):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
Notes
-----
gzip, zip and bzip2 compression algorithms are natively supported.
Compression mode is determined from the filename extension
('.gz', '.zip' or '.bz2' respectively). It is also possible to pass a
compressed file object, e.g. `gzip.GzipFile`.
"""
| /usr/src/app/target_test_cases/failed_tests_HDUList.writeto.txt | def writeto(
self, fileobj, output_verify="exception", overwrite=False, checksum=False
):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : str, file-like or `pathlib.Path`
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
Notes
-----
gzip, zip and bzip2 compression algorithms are natively supported.
Compression mode is determined from the filename extension
('.gz', '.zip' or '.bz2' respectively). It is also possible to pass a
compressed file object, e.g. `gzip.GzipFile`.
"""
if len(self) == 0:
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
if fileobj is sys.stdout:
# special case stdout for debugging convenience
# see https://github.com/astropy/astropy/issues/3427
fileobj = fileobj.buffer
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream"
# This can accept an open file object that's open to write only, or in
# append/update modes but only if the file doesn't exist.
fileobj = _File(fileobj, mode=mode, overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except (AttributeError, TypeError):
dirname = None
try:
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
finally:
hdulist.close(output_verify=output_verify, closed=closed)
| HDUList.writeto |
astropy | 11 | astropy/timeseries/periodograms/lombscargle/core.py | def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.autofrequency.txt | def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
baseline = self._trel.max() - self._trel.min()
n_samples = self._trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (Nf - 1)
else:
return minimum_frequency + df * np.arange(Nf)
| LombScargle.autofrequency |
astropy | 12 | astropy/timeseries/periodograms/lombscargle/core.py | def autopower(
self,
method="auto",
method_kwds=None,
normalization=None,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.autopower.txt | def autopower(
self,
method="auto",
method_kwds=None,
normalization=None,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
)
power = self.power(
frequency,
normalization=normalization,
method=method,
method_kwds=method_kwds,
assume_regular_frequency=True,
)
return frequency, power
| LombScargle.autopower |
astropy | 13 | astropy/timeseries/periodograms/lombscargle/core.py | def false_alarm_level(
self,
false_alarm_probability,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data. The number of samples can
be set with the method-specific keyword "n_bootstraps" (default=1000).
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.false_alarm_level.txt | def false_alarm_level(
self,
false_alarm_probability,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data. The number of samples can
be set with the method-specific keyword "n_bootstraps" (default=1000).
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError(
"false alarm probability is not implemented for multiterm periodograms."
)
if not (self.fit_mean or self.center_data):
raise NotImplementedError(
"false alarm probability is implemented "
"only for periodograms of centered data."
)
fmin, fmax = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True,
)
return _statistics.false_alarm_level(
false_alarm_probability,
fmax=fmax,
t=self._trel,
y=self.y,
dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds,
)
| LombScargle.false_alarm_level |
astropy | 14 | astropy/timeseries/periodograms/lombscargle/core.py | def false_alarm_probability(
self,
power,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.false_alarm_probability.txt | def false_alarm_probability(
self,
power,
method="baluev",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
method_kwds=None,
):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError(
"false alarm probability is not implemented for multiterm periodograms."
)
if not (self.fit_mean or self.center_data):
raise NotImplementedError(
"false alarm probability is implemented "
"only for periodograms of centered data."
)
fmin, fmax = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True,
)
return _statistics.false_alarm_probability(
power,
fmax=fmax,
t=self._trel,
y=self.y,
dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds,
)
| LombScargle.false_alarm_probability |
astropy | 15 | astropy/timeseries/periodograms/lombscargle/core.py | def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times
(will have length ``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.model.txt | def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times
(will have length ``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time("t", t))
y_fit = periodic_fit(
*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
t_fit=strip_units(t),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
)
return y_fit * get_unit(self.y)
| LombScargle.model |
astropy | 16 | astropy/timeseries/periodograms/lombscargle/core.py | def power(
self,
frequency,
normalization=None,
method="auto",
assume_regular_frequency=False,
method_kwds=None,
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargle.power.txt | def power(
self,
frequency,
normalization=None,
method="auto",
assume_regular_frequency=False,
method_kwds=None,
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
power = lombscargle(
*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
normalization=normalization,
method=method,
method_kwds=method_kwds,
assume_regular_frequency=assume_regular_frequency,
)
return power * self._power_unit(normalization)
| LombScargle.power |
astropy | 17 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.autofrequency.txt | def autofrequency(
self,
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
return_freq_limits=False,
):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
if hasattr(self._trel, "unit"):
unit = self._trel.unit
trel = self._trel.to(u.day) # do frequency calculations in days
else:
trel = self._trel
baseline = trel.max() - trel.min()
n_samples = trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
# Convert back to the input units
if hasattr(self._trel, "unit"):
df = df.to(1 / unit)
minimum_frequency = minimum_frequency.to(1 / unit)
maximum_frequency = maximum_frequency.to(1 / unit)
n_freq = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (n_freq - 1)
else:
return minimum_frequency + df * np.arange(n_freq)
| LombScargleMultiband.autofrequency |
astropy | 18 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def autopower(
self,
method="flexible",
sb_method="auto",
normalization="standard",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.autopower.txt | def autopower(
self,
method="flexible",
sb_method="auto",
normalization="standard",
samples_per_peak=5,
nyquist_factor=5,
minimum_frequency=None,
maximum_frequency=None,
):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(
samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
)
power = self.power(
frequency, method=method, sb_method=sb_method, normalization=normalization
)
return frequency, power
| LombScargleMultiband.autopower |
astropy | 19 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def design_matrix(self, frequency, t_fit=None, bands_fit=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
ndarray
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.design_matrix.txt | def design_matrix(self, frequency, t_fit=None, bands_fit=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
ndarray
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
if t_fit is None:
t_fit, dy = strip_units(self._trel, self.dy)
else:
t_fit, dy = strip_units(
self._validate_t(self._as_relative_time("t", t_fit)), None
)
if bands_fit is None:
bands_fit = np.unique(self.bands)
elif type(bands_fit) == str:
bands_fit = [bands_fit]
unique_bands = np.unique(bands_fit)
unique_bands_fit = np.unique(bands_fit)
bands_fit = bands_fit[:, np.newaxis]
if not set(unique_bands_fit).issubset(set(unique_bands)):
raise ValueError(
"bands_fit does not match training data: "
f"input: {set(unique_bands_fit)} output: {set(unique_bands)}"
)
t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit)
return design_matrix(
t_fit.ravel(),
bands_fit.ravel(),
frequency,
dy,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
)
| LombScargleMultiband.design_matrix |
astropy | 20 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def model(self, t, frequency, bands_fit=None):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
bands_fit : list or array-like
the unique bands to fit in the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times.
Will have shape (``n_bands``,``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.model.txt | def model(self, t, frequency, bands_fit=None):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
bands_fit : list or array-like
the unique bands to fit in the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times.
Will have shape (``n_bands``,``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
if bands_fit is None:
bands_fit = np.unique(self.bands)
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time("t", t))
y_fit = periodic_fit(
*strip_units(self._trel, self.y, self.dy),
bands=self.bands,
frequency=strip_units(frequency),
t_fit=strip_units(t),
bands_fit=bands_fit,
center_data=self.center_data,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
reg_base=self.reg_base,
reg_band=self.reg_band,
regularize_by_trace=self.regularize_by_trace,
)
return y_fit * get_unit(self.y)
| LombScargleMultiband.model |
astropy | 21 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def offset(self, t_fit=None, bands_fit=None):
"""Return the offset array of the model
The offset array of the model is the (weighted) mean of the y values in each band.
Note that if self.center_data is False, the offset is 0 by definition.
Parameters
----------
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
offset : array
See Also
--------
design_matrix
model
model_parameters
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.offset.txt | def offset(self, t_fit=None, bands_fit=None):
"""Return the offset array of the model
The offset array of the model is the (weighted) mean of the y values in each band.
Note that if self.center_data is False, the offset is 0 by definition.
Parameters
----------
t_fit : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
bands_fit : array-like, or str
Bands to use in fitting, must be subset of bands in input data.
Returns
-------
offset : array
See Also
--------
design_matrix
model
model_parameters
"""
if bands_fit is None:
bands_fit = np.unique(self.bands)
if t_fit is None:
on_fit = False
t_fit = self.t
else:
on_fit = True
bands_fit = bands_fit[:, np.newaxis]
unique_bands = np.unique(self.bands)
unique_bands_fit = np.unique(bands_fit)
if not set(unique_bands_fit).issubset(set(unique_bands)):
raise ValueError(
"bands_fit does not match training data: "
f"input: {set(unique_bands_fit)} output: {set(unique_bands)}"
)
y, dy = strip_units(self.y, self.dy)
if np.shape(t_fit) != np.shape(
bands_fit
): # No need to broadcast if bands map to timestamps
t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit)
# need to make sure all unique filters are represented
u, i = np.unique(
np.concatenate([bands_fit.ravel(), unique_bands]), return_inverse=True
)
if not self.center_data:
return 0
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
# Calculate ymeans -- per band
ymeans = np.zeros(y.shape) # filter specific means
for band in unique_bands:
mask = self.bands == band
ymeans[mask] = np.average(y[mask], weights=1 / dy[mask] ** 2)
ymeans_fit = ymeans[i[: -len(unique_bands)]]
if on_fit:
return ymeans_fit * get_unit(self.y)
else:
return ymeans * get_unit(self.y)
| LombScargleMultiband.offset |
astropy | 22 | astropy/timeseries/periodograms/lombscargle_multiband/core.py | def power(
self, frequency, method="flexible", sb_method="auto", normalization="standard"
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options can be found
in `~astropy.timeseries.LombScargle`.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
| /usr/src/app/target_test_cases/failed_tests_LombScargleMultiband.power.txt | def power(
self, frequency, method="flexible", sb_method="auto", normalization="standard"
):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the multi-band lomb scargle implementation to use. Options are:
- 'flexible': Constructs a common model, and an offset model per individual
band. Applies regularization to the resulting model to constrain
complexity.
- 'fast': Passes each band individually through LombScargle (single-band),
combines periodograms at the end by weight. Speed depends on single-band
method chosen in 'sb_method'.
sb_method : str, optional
specify the single-band lomb scargle implementation to use, only in
the case of using the 'fast' multiband method. Options can be found
in `~astropy.timeseries.LombScargle`.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
f_shape = np.shape(frequency)
power = lombscargle_multiband(
strip_units(self._trel),
strip_units(self.y),
strip_units(self.bands),
dy=strip_units(self.dy),
frequency=strip_units(np.ravel(frequency)),
method=method,
sb_method=sb_method,
normalization=normalization,
nterms_base=self.nterms_base,
nterms_band=self.nterms_band,
reg_base=self.reg_base,
reg_band=self.reg_band,
regularize_by_trace=self.regularize_by_trace,
center_data=self.center_data,
fit_mean=self.fit_mean,
)
return np.reshape(power * self._power_unit(normalization), f_shape)
| LombScargleMultiband.power |
astropy | 23 | astropy/nddata/compat.py | def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
| /usr/src/app/target_test_cases/failed_tests_NDDataArray.convert_unit_to.txt | def convert_unit_to(self, unit, equivalencies=[]):
"""
Returns a new `NDData` object whose values have been converted
to a new unit.
Parameters
----------
unit : `astropy.units.UnitBase` instance or str
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Returns
-------
result : `~astropy.nddata.NDData`
The resulting dataset
Raises
------
`~astropy.units.UnitsError`
If units are inconsistent.
"""
if self.unit is None:
raise ValueError("No unit specified on source data")
data = self.unit.to(unit, self.data, equivalencies=equivalencies)
if self.uncertainty is not None:
uncertainty_values = self.unit.to(
unit, self.uncertainty.array, equivalencies=equivalencies
)
# should work for any uncertainty class
uncertainty = self.uncertainty.__class__(uncertainty_values)
else:
uncertainty = None
if self.mask is not None:
new_mask = self.mask.copy()
else:
new_mask = None
# Call __class__ in case we are dealing with an inherited type
result = self.__class__(
data,
uncertainty=uncertainty,
mask=new_mask,
wcs=self.wcs,
meta=self.meta,
unit=unit,
)
return result
| NDDataArray.convert_unit_to |
astropy | 24 | astropy/coordinates/sky_coordinate.py | def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
| /usr/src/app/target_test_cases/failed_tests_SkyCoord.apply_space_motion.txt | def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=COPY_IF_NEEDED),
dec=u.Quantity(starpm[1], u.radian, copy=COPY_IF_NEEDED),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=COPY_IF_NEEDED),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=COPY_IF_NEEDED),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=COPY_IF_NEEDED),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
| SkyCoord.apply_space_motion |
astropy | 25 | astropy/coordinates/spectral_coordinate.py | def replicate(
self,
value=None,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
doppler_convention=None,
doppler_rest=None,
copy=False,
):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralCoord.replicate.txt | def replicate(
self,
value=None,
unit=None,
observer=None,
target=None,
radial_velocity=None,
redshift=None,
doppler_convention=None,
doppler_rest=None,
copy=False,
):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError(
"Cannot specify value as a Quantity and also specify unit"
)
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is True
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (
(self.observer is None or self.target is None)
and radial_velocity is None
and redshift is None
):
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter("ignore", NoVelocityWarning)
return self.__class__(
value=value,
unit=unit,
observer=observer,
target=target,
radial_velocity=radial_velocity,
redshift=redshift,
doppler_convention=doppler_convention,
doppler_rest=doppler_rest,
copy=COPY_IF_NEEDED,
)
| SpectralCoord.replicate |
astropy | 26 | astropy/coordinates/spectral_coordinate.py | def with_observer_stationary_relative_to(
self, frame, velocity=None, preserve_observer_frame=False
):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralCoord.with_observer_stationary_relative_to.txt | def with_observer_stationary_relative_to(
self, frame, velocity=None, preserve_observer_frame=False
):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError(
"This method can only be used if both observer "
"and target are defined on the SpectralCoord."
)
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(
CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)
)
if frame.data.differentials:
if velocity is not None:
raise ValueError(
"frame already has differentials, cannot also specify velocity"
)
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(
frame.data.with_differentials(differentials)
)
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError("velocity should be a Quantity vector with 3 elements")
frame = frame_cls(
0 * u.m,
0 * u.m,
0 * u.m,
*velocity,
representation_type="cartesian",
differential_type="cartesian",
)
observer = update_differentials_to_match(
self.observer, frame, preserve_observer_frame=preserve_observer_frame
)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(
self.observer, self.target, as_scalar=True
)
fin_obs_vel = self._calculate_radial_velocity(
observer, self.target, as_scalar=True
)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
| SpectralCoord.with_observer_stationary_relative_to |
astropy | 27 | astropy/coordinates/spectral_coordinate.py | def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralCoord.with_radial_velocity_shift.txt | def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (
self.target is None or self.observer is None
):
raise ValueError(
"Both an observer and target must be defined "
"before applying a velocity shift."
)
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError(
"Argument must have unit physical type 'speed' for radial velocty"
" or 'dimensionless' for redshift."
)
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == "dimensionless":
target_shift = target_shift.to(u.km / u.s, u.doppler_redshift())
if self._observer is None or self._target is None:
return self.replicate(
value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift,
)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == "dimensionless":
observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift())
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = target_icrs.realize_frame(
target_icrs.cartesian.with_differentials(target_velocity)
).transform_to(self._target)
new_observer = observer_icrs.realize_frame(
observer_icrs.cartesian.with_differentials(observer_velocity)
).transform_to(self._observer)
init_obs_vel = self._calculate_radial_velocity(
observer_icrs, target_icrs, as_scalar=True
)
fin_obs_vel = self._calculate_radial_velocity(
new_observer, new_target, as_scalar=True
)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data, observer=new_observer, target=new_target)
| SpectralCoord.with_radial_velocity_shift |
astropy | 28 | astropy/coordinates/spectral_quantity.py | def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
| /usr/src/app/target_test_cases/failed_tests_SpectralQuantity.to.txt | def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError(
"Either both or neither doppler_rest and doppler_convention should"
" be defined for velocity conversions"
)
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](
self._doppler_rest
)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError(
"doppler_convention not set, cannot convert to/from velocities"
)
if doppler_rest is None:
raise ValueError(
"doppler_rest not set, cannot convert to/from velocities"
)
additional_equivalencies = (
additional_equivalencies
+ DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
)
result = super().to(
unit, equivalencies=equivalencies + additional_equivalencies
)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
| SpectralQuantity.to |
astropy | 29 | astropy/table/table.py | def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
| /usr/src/app/target_test_cases/failed_tests_Table.add_index.txt | def add_index(self, colnames, engine=None, unique=False):
"""
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
"""
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, "_supports_indexing", False):
raise ValueError(
f'Cannot create an index on column "{col.info.name}", '
f'of type "{type(col)}"'
)
if col.ndim > 1:
raise ValueError(
f"Multi-dimensional column {col.info.name!r} "
"cannot be used as an index."
)
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
| Table.add_index |
astropy | 30 | astropy/table/table.py | def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
| /usr/src/app/target_test_cases/failed_tests_Table.argsort.txt | def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs["order"] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs["kind"] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
| Table.argsort |
astropy | 31 | astropy/table/table.py | def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
| /usr/src/app/target_test_cases/failed_tests_Table.reverse.txt | def reverse(self):
"""
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
"""
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
| Table.reverse |
astropy | 32 | astropy/table/table.py | def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
| /usr/src/app/target_test_cases/failed_tests_Table.round.txt | def round(self, decimals=0):
"""
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
"""
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
| Table.round |
astropy | 33 | astropy/table/table.py | def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
| /usr/src/app/target_test_cases/failed_tests_Table.sort.txt | def sort(self, keys=None, *, kind=None, reverse=False):
"""
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
"""
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode("freeze"):
for col in self.columns.values():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
| Table.sort |
astropy | 34 | astropy/time/core.py | def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
| /usr/src/app/target_test_cases/failed_tests_Time.earth_rotation_angle.txt | def earth_rotation_angle(self, longitude=None):
"""Calculate local Earth rotation angle.
Parameters
----------
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'tio', the result will be relative to the Terrestrial
Intermediate Origin (TIO) (i.e., the output of `~erfa.era00`).
Returns
-------
`~astropy.coordinates.Longitude`
Local Earth rotation angle with units of hourangle.
See Also
--------
astropy.time.Time.sidereal_time
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
The result includes the TIO locator (s'), which positions the Terrestrial
Intermediate Origin on the equator of the Celestial Intermediate Pole (CIP)
and is rigorously corrected for polar motion.
(except when ``longitude='tio'``).
"""
if isinstance(longitude, str) and longitude == "tio":
longitude = 0
include_tio = False
else:
include_tio = True
return self._sid_time_or_earth_rot_ang(
longitude=longitude,
function=erfa.era00,
scales=("ut1",),
include_tio=include_tio,
)
| Time.earth_rotation_angle |
astropy | 35 | astropy/time/core.py | def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
| /usr/src/app/target_test_cases/failed_tests_Time.sidereal_time.txt | def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
----------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `~astropy.coordinates.EarthLocation`, str, or None; optional
The longitude on the Earth at which to compute the Earth rotation
angle (taken from a location as needed). If `None` (default), taken
from the ``location`` attribute of the Time instance. If the special
string 'greenwich' or 'tio', the result will be relative to longitude
0 for models before 2000, and relative to the Terrestrial Intermediate
Origin (TIO) for later ones (i.e., the output of the relevant ERFA
function that calculates greenwich sidereal time).
model : str or None; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
`~astropy.coordinates.Longitude`
Local sidereal time, with units of hourangle.
See Also
--------
astropy.time.Time.earth_rotation_angle
References
----------
IAU 2006 NFA Glossary
(currently located at: https://syrte.obspm.fr/iauWGnfa/NFA_Glossary.html)
Notes
-----
The difference between apparent sidereal time and Earth rotation angle
is the equation of the origins, which is the angle between the Celestial
Intermediate Origin (CIO) and the equinox. Applying apparent sidereal
time to the hour angle yields the true apparent Right Ascension with
respect to the equinox, while applying the Earth rotation angle yields
the intermediate (CIRS) Right Ascension with respect to the CIO.
For the IAU precession models from 2000 onwards, the result includes the
TIO locator (s'), which positions the Terrestrial Intermediate Origin on
the equator of the Celestial Intermediate Pole (CIP) and is rigorously
corrected for polar motion (except when ``longitude='tio'`` or ``'greenwich'``).
""" # (docstring is formatted below)
if kind.lower() not in SIDEREAL_TIME_MODELS:
raise ValueError(
"The kind of sidereal time has to be "
+ " or ".join(sorted(SIDEREAL_TIME_MODELS))
)
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models)[-1]
elif model.upper() not in available_models:
raise ValueError(
f"Model {model} not implemented for {kind} sidereal time; "
f"available models are {sorted(available_models)}"
)
model_kwargs = available_models[model.upper()]
if isinstance(longitude, str) and longitude in ("tio", "greenwich"):
longitude = 0
model_kwargs = model_kwargs.copy()
model_kwargs["include_tio"] = False
return self._sid_time_or_earth_rot_ang(longitude=longitude, **model_kwargs)
| Time.sidereal_time |
astropy | 36 | astropy/timeseries/sampled.py | def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
| /usr/src/app/target_test_cases/failed_tests_TimeSeries.fold.txt | def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != "time":
raise UnitsError("period should be a Quantity in units of time")
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if (
isinstance(epoch_phase, Quantity)
and epoch_phase.unit.physical_type != "dimensionless"
):
raise UnitsError(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
)
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.0
else:
if (
not isinstance(epoch_phase, Quantity)
or epoch_phase.unit.physical_type != "time"
):
raise UnitsError(
"epoch_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(
wrap_phase, Quantity
) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError(
"wrap_phase should be dimensionless when normalize_phase=True"
)
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError("wrap_phase should be between 0 and 1")
else:
wrap_phase = wrap_phase * period_sec
else:
if (
isinstance(wrap_phase, Quantity)
and wrap_phase.unit.physical_type == "time"
):
if wrap_phase < 0 or wrap_phase > period:
raise ValueError(
"wrap_phase should be between 0 and the period"
)
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError(
"wrap_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
relative_time_sec = (
(self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)
) % period_sec - (period_sec - wrap_phase)
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column("time")
folded.add_column(folded_time, name="time", index=0)
return folded
| TimeSeries.fold |
astropy | 37 | astropy/wcs/wcs.py | def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
| /usr/src/app/target_test_cases/failed_tests_WCS.slice.txt | def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, "__len__") and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, "__len__"): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
# Group the distortion tables by which axis (x or y) they correspond to
x_tables = [t for t in (wcs_new.cpdis1, wcs_new.det2im1) if t is not None]
y_tables = [t for t in (wcs_new.cpdis2, wcs_new.det2im2) if t is not None]
distortion_tables = [*x_tables, *y_tables]
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if wcs_index < 2:
itables = [x_tables, y_tables][wcs_index]
else:
itables = []
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
scale_pixel = lambda px: (
(px - iview.start - 1.0) / iview.step
+ 0.5
+ 1.0 / iview.step / 2.0
)
crp = scale_pixel(crpix)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
for table in distortion_tables:
# The table's crval (which is an image pixel location)
# should be adjusted to the corresponding location in
# the sliced array
table.crval[wcs_index] = scale_pixel(table.crval[wcs_index])
# And its cdelt (with units image pixels / distortion
# table pixel) should reflect the stride
table.cdelt[wcs_index] /= iview.step
for table in itables:
# If we stride an x axis, for example, x distortions
# should be adjusted in magnitude
table.data /= iview.step
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
for table in distortion_tables:
table.crval[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if "indices must be integers" not in str(exc):
raise
warnings.warn(
f"NAXIS{wcs_index} attribute is not updated because at "
f"least one index ('{iview}') is no integer.",
AstropyUserWarning,
)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(
self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix
)
return wcs_new
| WCS.slice |
astropy | 38 | astropy/io/fits/column.py | def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
| /usr/src/app/target_test_cases/failed_tests__parse_tdisp_format.txt | def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = (
tdisp[0]
if tdisp[0] != "E" or (len(tdisp) > 1 and tdisp[1] not in "NS")
else tdisp[:2]
)
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f"Format {tdisp} is not recognized.")
match = tdisp_re.match(tdisp.strip())
if not match or match.group("formatc") is None:
raise VerifyError(f"Format {tdisp} is not recognized.")
formatc = match.group("formatc")
width = match.group("width")
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ("I", "B", "O", "Z", "F", "E", "G", "D"):
precision = match.group("precision")
if precision is None:
precision = 1
if tdisp[0] in ("E", "D", "G") and tdisp[1] not in ("N", "S"):
exponential = match.group("exponential")
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
| _parse_tdisp_format |
astropy | 39 | astropy/io/ascii/qdp.py | def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
| /usr/src/app/target_test_cases/failed_tests__read_table_qdp.txt | def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file.
Parameters
----------
qdp_file : str
Input QDP file name
Other Parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn(
"table_id not specified. Reading the first available table",
AstropyUserWarning,
)
table_id = 0
tables = _get_tables_from_qdp_file(
qdp_file, input_colnames=names, delimiter=delimiter
)
return tables[table_id]
| _read_table_qdp |
astropy | 40 | astropy/timeseries/downsample.py | def aggregate_downsample(
time_series,
*,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
| /usr/src/app/target_test_cases/failed_tests_aggregate_downsample.txt | def aggregate_downsample(
time_series,
*,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
):
"""
Downsample a time series by binning values into bins with a fixed size or
custom sizes, using a single function to combine the values in the bin.
Parameters
----------
time_series : :class:`~astropy.timeseries.TimeSeries`
The time series to downsample.
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta` ['time'], optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` or ``time_bin_end`` is provided.
Defaults to the first time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is provided or its default is used. If ``time_bin_end`` is scalar and
``time_bin_start`` is an array, time bins are assumed to be contiguous;
the end of each bin is the start of the next one, and ``time_bin_end`` gives
the end time for the last bin. If ``time_bin_end`` is an array and
``time_bin_start`` is scalar, bins will be contiguous. If both ``time_bin_end``
and ``time_bin_start`` are arrays, bins do not need to be contiguous.
If this argument is provided, ``time_bin_size`` should not be provided.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. If both ``time_bin_start`` and ``time_bin_size``
are provided and are scalar values, this determines the total bins
within that interval. If ``time_bin_start`` is an iterable, this
parameter will be ignored.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
Returns
-------
binned_time_series : :class:`~astropy.timeseries.BinnedTimeSeries`
The downsampled time series.
"""
if not isinstance(time_series, TimeSeries):
raise TypeError("time_series should be a TimeSeries")
if time_bin_size is not None and not isinstance(
time_bin_size, (u.Quantity, TimeDelta)
):
raise TypeError("'time_bin_size' should be a Quantity or a TimeDelta")
if time_bin_start is not None and not isinstance(time_bin_start, (Time, TimeDelta)):
time_bin_start = Time(time_bin_start)
if time_bin_end is not None and not isinstance(time_bin_end, (Time, TimeDelta)):
time_bin_end = Time(time_bin_end)
# Use the table sorted by time
ts_sorted = time_series.iloc[:]
# If start time is not provided, it is assumed to be the start of the timeseries
if time_bin_start is None:
time_bin_start = ts_sorted.time[0]
# Total duration of the timeseries is needed for determining either
# `time_bin_size` or `nbins` in the case of scalar `time_bin_start`
if time_bin_start.isscalar:
time_duration = (ts_sorted.time[-1] - time_bin_start).sec
if time_bin_size is None and time_bin_end is None:
if time_bin_start.isscalar:
if n_bins is None:
raise TypeError(
"With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided"
)
else:
# `nbins` defaults to the number needed to fit all points
time_bin_size = time_duration / n_bins * u.s
else:
time_bin_end = np.maximum(ts_sorted.time[-1], time_bin_start[-1])
if time_bin_start.isscalar:
if time_bin_size is not None:
if time_bin_size.isscalar:
# Determine the number of bins
if n_bins is None:
bin_size_sec = time_bin_size.to_value(u.s)
n_bins = int(np.ceil(time_duration / bin_size_sec))
elif time_bin_end is not None:
if not time_bin_end.isscalar:
# Convert start time to an array and populate using `time_bin_end`
scalar_start_time = time_bin_start
time_bin_start = time_bin_end.replicate(copy=True)
time_bin_start[0] = scalar_start_time
time_bin_start[1:] = time_bin_end[:-1]
# Check for overlapping bins, and warn if they are present
if time_bin_end is not None:
if (
not time_bin_end.isscalar
and not time_bin_start.isscalar
and np.any(time_bin_start[1:] < time_bin_end[:-1])
):
warnings.warn(
"Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning.",
AstropyUserWarning,
)
binned = BinnedTimeSeries(
time_bin_size=time_bin_size,
time_bin_start=time_bin_start,
time_bin_end=time_bin_end,
n_bins=n_bins,
)
if aggregate_func is None:
aggregate_func = np.nanmean
# Start and end times of the binned timeseries
bin_start = binned.time_bin_start
bin_end = binned.time_bin_end
# Set `n_bins` to match the length of `time_bin_start` if
# `n_bins` is unspecified or if `time_bin_start` is an iterable
if n_bins is None or not time_bin_start.isscalar:
n_bins = len(bin_start)
# Find the subset of the table that is inside the union of all bins
# - output: `keep` a mask to create the subset
# - use relative time in seconds `np.longdouble`` in in creating `keep` to speed up
# (`Time` object comparison is rather slow)
# - tiny sacrifice on precision (< 0.01ns on 64 bit platform)
rel_base = ts_sorted.time[0]
rel_bin_start = _to_relative_longdouble(bin_start, rel_base)
rel_bin_end = _to_relative_longdouble(bin_end, rel_base)
rel_ts_sorted_time = _to_relative_longdouble(ts_sorted.time, rel_base)
keep = (rel_ts_sorted_time >= rel_bin_start[0]) & (
rel_ts_sorted_time <= rel_bin_end[-1]
)
# Find out indices to be removed because of noncontiguous bins
#
# Only need to check when adjacent bins have gaps, i.e.,
# bin_start[ind + 1] > bin_end[ind]
# - see: https://github.com/astropy/astropy/issues/13058#issuecomment-1090846697
# on thoughts on how to reduce the number of times to loop
noncontiguous_bins_indices = np.where(rel_bin_start[1:] > rel_bin_end[:-1])[0]
for ind in noncontiguous_bins_indices:
delete_indices = np.where(
np.logical_and(
rel_ts_sorted_time > rel_bin_end[ind],
rel_ts_sorted_time < rel_bin_start[ind + 1],
)
)
keep[delete_indices] = False
rel_subset_time = rel_ts_sorted_time[keep]
# Figure out which bin each row falls in by sorting with respect
# to the bin end times
indices = np.searchsorted(rel_bin_end, rel_subset_time)
# For time == bin_start[i+1] == bin_end[i], let bin_start takes precedence
if len(indices) and np.all(rel_bin_start[1:] >= rel_bin_end[:-1]):
indices_start = np.searchsorted(
rel_subset_time, rel_bin_start[rel_bin_start <= rel_ts_sorted_time[-1]]
)
indices[indices_start] = np.arange(len(indices_start))
# Determine rows where values are defined
if len(indices):
groups = np.hstack([0, np.nonzero(np.diff(indices))[0] + 1])
else:
groups = np.array([])
# Find unique indices to determine which rows in the final time series
# will not be empty.
unique_indices = np.unique(indices)
# Add back columns
subset = ts_sorted[keep]
for colname in subset.colnames:
if colname == "time":
continue
values = subset[colname]
# FIXME: figure out how to avoid the following, if possible
if not isinstance(values, (np.ndarray, u.Quantity)):
warnings.warn(
"Skipping column {0} since it has a mix-in type", AstropyUserWarning
)
continue
if isinstance(values, u.Quantity):
data = u.Quantity(np.repeat(np.nan, n_bins), unit=values.unit)
data[unique_indices] = u.Quantity(
reduceat(values.value, groups, aggregate_func), values.unit, copy=False
)
else:
data = np.ma.zeros(n_bins, dtype=values.dtype)
data.mask = 1
data[unique_indices] = reduceat(values, groups, aggregate_func)
data.mask[unique_indices] = 0
binned[colname] = data
return binned
| aggregate_downsample |
astropy | 41 | astropy/coordinates/builtin_frames/utils.py | def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
| /usr/src/app/target_test_cases/failed_tests_aticq.txt | def aticq(srepr, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
There are two issues with the version of aticq in ERFA. Both are associated
with the handling of light deflection.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
In addition, ERFA's aticq assumes a distant source, so there is no difference between
the object-Sun vector and the observer-Sun vector. This can lead to errors of up to a
few arcseconds in the worst case (e.g a Venus transit).
Parameters
----------
srepr : `~astropy.coordinates.SphericalRepresentation`
Astrometric GCRS or CIRS position of object from observer
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
-------
rc : float or `~numpy.ndarray`
Right Ascension in radians
dc : float or `~numpy.ndarray`
Declination in radians
"""
# ignore parallax effects if no distance, or far away
srepr_distance = srepr.distance
ignore_distance = srepr_distance.unit == u.one
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(srepr.lon.radian, srepr.lat.radian)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom["bpn"], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr - d)
after = erfa.ab(before, astrom["v"], astrom["em"], astrom["bm1"])
d = after - before
pnat = norm(ppr - d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat - d)
if ignore_distance:
# No distance to object, assume a long way away
q = before
else:
# Find BCRS direction of Sun to object.
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively.
eh = astrom["em"][..., np.newaxis] * astrom["eh"]
# unit vector from Sun to object
q = eh + srepr_distance[..., np.newaxis].to_value(u.au) * before
sundist, q = erfa.pn(q)
sundist = sundist[..., np.newaxis]
# calculation above is extremely unstable very close to the sun
# in these situations, default back to ldsun-style behaviour,
# since this is reversible and drops to zero within stellar limb
q = np.where(sundist > 1.0e-10, q, before)
after = erfa.ld(1.0, before, q, astrom["eh"], astrom["em"], 1e-6)
d = after - before
pco = norm(pnat - d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
| aticq |
astropy | 42 | astropy/modeling/core.py | def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
| /usr/src/app/target_test_cases/failed_tests_bind_compound_bounding_box.txt | def bind_compound_bounding_box(
modelinstance,
bounding_boxes,
selector_args,
create_selector=None,
ignored=None,
order="C",
):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(
modelinstance,
bounding_boxes,
selector_args,
create_selector=create_selector,
ignored=ignored,
order=order,
)
| bind_compound_bounding_box |
astropy | 43 | astropy/wcs/utils.py | def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
| /usr/src/app/target_test_cases/failed_tests_celestial_frame_to_wcs.txt | def celestial_frame_to_wcs(frame, projection="TAN"):
"""
For a given coordinate frame, return the corresponding WCS object.
Note that the returned WCS object has only the elements corresponding to
coordinate frames set (e.g. ctype, equinox, radesys).
Parameters
----------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance for which to find the WCS
projection : str
Projection code to use in ctype, if applicable
Returns
-------
wcs : :class:`~astropy.wcs.WCS` instance
The corresponding WCS object
Examples
--------
::
>>> from astropy.wcs.utils import celestial_frame_to_wcs
>>> from astropy.coordinates import FK5
>>> frame = FK5(equinox='J2010')
>>> wcs = celestial_frame_to_wcs(frame)
>>> wcs.to_header()
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / Right ascension, gnomonic projection
CTYPE2 = 'DEC--TAN' / Declination, gnomonic projection
CRVAL1 = 0.0 / [deg] Coordinate value at reference point
CRVAL2 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
RADESYS = 'FK5' / Equatorial coordinate system
EQUINOX = 2010.0 / [yr] Equinox of equatorial coordinates
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a
:class:`~astropy.coordinates.BaseCoordinateFrame` subclass
instance and a projection (given as a string) and should return either a WCS
instance, or `None` if the WCS could not be determined. You can register
this function temporarily with::
>>> from astropy.wcs.utils import celestial_frame_to_wcs, custom_frame_to_wcs_mappings
>>> with custom_frame_to_wcs_mappings(my_function):
... celestial_frame_to_wcs(...)
"""
for mapping_set in FRAME_WCS_MAPPINGS:
for func in mapping_set:
wcs = func(frame, projection=projection)
if wcs is not None:
return wcs
raise ValueError(
"Could not determine WCS corresponding to the specified coordinate frame."
)
| celestial_frame_to_wcs |
astropy | 44 | astropy/io/fits/hdu/compressed/_tiled_compression.py | def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
| /usr/src/app/target_test_cases/failed_tests_compress_image_data.txt | def compress_image_data(
image_data,
compression_type,
compressed_header,
compressed_coldefs,
):
"""
Compress the data in a `~astropy.io.fits.CompImageHDU`.
The input HDU is expected to have a uncompressed numpy array as it's
``.data`` attribute.
Parameters
----------
image_data : `~numpy.ndarray`
The image data to compress
compression_type : str
The compression algorithm
compressed_header : `~astropy.io.fits.Header`
The header of the compressed binary table
compressed_coldefs : `~astropy.io.fits.ColDefs`
The ColDefs object for the compressed binary table
Returns
-------
nbytes : `int`
The number of bytes of the heap.
heap : `bytes`
The bytes of the FITS table heap.
"""
if not isinstance(image_data, np.ndarray):
raise TypeError("Image data must be a numpy.ndarray")
_check_compressed_header(compressed_header)
# TODO: This implementation is memory inefficient as it generates all the
# compressed bytes before forming them into the heap, leading to 2x the
# potential memory usage. Directly storing the compressed bytes into an
# expanding heap would fix this.
tile_shape = _tile_shape(compressed_header)
data_shape = _data_shape(compressed_header)
compressed_bytes = []
gzip_fallback = []
scales = []
zeros = []
zblank = None
noisebit = _get_compression_setting(compressed_header, "noisebit", 0)
settings = _header_to_settings(compressed_header)
for irow, tile_slices in _iter_array_tiles(data_shape, tile_shape):
tile_data = image_data[tile_slices]
settings = _update_tile_settings(settings, compression_type, tile_data.shape)
quantize = "ZSCALE" in compressed_coldefs.dtype.names
if tile_data.dtype.kind == "f" and quantize:
dither_method = DITHER_METHODS[
compressed_header.get("ZQUANTIZ", "NO_DITHER")
]
dither_seed = compressed_header.get("ZDITHER0", 0)
q = Quantize(
row=(irow + dither_seed) if dither_method != -1 else 0,
dither_method=dither_method,
quantize_level=noisebit,
bitpix=compressed_header["ZBITPIX"],
)
original_shape = tile_data.shape
# If there are any NaN values in the data, we should reset them to
# a value that will not affect the quantization (an already existing
# data value in the array) and we can then reset this after quantization
# to ZBLANK and set the appropriate header keyword
nan_mask = np.isnan(tile_data)
any_nan = np.any(nan_mask)
if any_nan:
# Note that we need to copy here to avoid modifying the input array.
tile_data = tile_data.copy()
if np.all(nan_mask):
tile_data[nan_mask] = 0
else:
tile_data[nan_mask] = np.nanmin(tile_data)
try:
tile_data, scale, zero = q.encode_quantized(tile_data)
except QuantizationFailedException:
if any_nan:
# reset NaN values since we will losslessly compress.
tile_data[nan_mask] = np.nan
scales.append(0)
zeros.append(0)
gzip_fallback.append(True)
else:
tile_data = np.asarray(tile_data).reshape(original_shape)
if any_nan:
if not tile_data.flags.writeable:
tile_data = tile_data.copy()
# For now, we just use the default ZBLANK value and assume
# this is the same for all tiles. We could generalize this
# to allow different ZBLANK values (for example if the data
# includes this value by chance) and to allow different values
# per tile, which is allowed by the FITS standard.
tile_data[nan_mask] = DEFAULT_ZBLANK
zblank = DEFAULT_ZBLANK
scales.append(scale)
zeros.append(zero)
gzip_fallback.append(False)
else:
scales.append(0)
zeros.append(0)
gzip_fallback.append(False)
if gzip_fallback[-1]:
cbytes = _compress_tile(tile_data, algorithm="GZIP_1")
else:
cbytes = _compress_tile(tile_data, algorithm=compression_type, **settings)
compressed_bytes.append(cbytes)
if zblank is not None:
compressed_header["ZBLANK"] = zblank
table = np.zeros(
len(compressed_bytes), dtype=compressed_coldefs.dtype.newbyteorder(">")
)
if "ZSCALE" in table.dtype.names:
table["ZSCALE"] = np.array(scales)
table["ZZERO"] = np.array(zeros)
for irow, cbytes in enumerate(compressed_bytes):
table["COMPRESSED_DATA"][irow, 0] = len(cbytes)
table["COMPRESSED_DATA"][:1, 1] = 0
table["COMPRESSED_DATA"][1:, 1] = np.cumsum(table["COMPRESSED_DATA"][:-1, 0])
for irow in range(len(compressed_bytes)):
if gzip_fallback[irow]:
table["GZIP_COMPRESSED_DATA"][irow] = table["COMPRESSED_DATA"][irow]
table["COMPRESSED_DATA"][irow] = 0
# For PLIO_1, the size of each heap element is a factor of two lower than
# the real size - not clear if this is deliberate or bug somewhere.
if compression_type == "PLIO_1":
table["COMPRESSED_DATA"][:, 0] //= 2
# For PLIO_1, it looks like the compressed data is always stored big endian
if compression_type == "PLIO_1":
for irow in range(len(compressed_bytes)):
if not gzip_fallback[irow]:
array = np.frombuffer(compressed_bytes[irow], dtype="i2")
if array.dtype.byteorder == "<" or (
array.dtype.byteorder == "=" and sys.byteorder == "little"
):
compressed_bytes[irow] = array.astype(">i2", copy=False).tobytes()
compressed_bytes = b"".join(compressed_bytes)
table_bytes = table.tobytes()
heap = table.tobytes() + compressed_bytes
return len(compressed_bytes), np.frombuffer(heap, dtype=np.uint8)
| compress_image_data |
astropy | 45 | astropy/timeseries/periodograms/lombscargle/utils.py | def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True):
"""Compute the reference chi-square for a particular dataset.
Note: this is not valid center_data=False and fit_mean=False.
Parameters
----------
y : array-like
data values
dy : float, array, or None, optional
data uncertainties
center_data : bool
specify whether data should be pre-centered
fit_mean : bool
specify whether model should fit the mean of the data
Returns
-------
chi2_ref : float
The reference chi-square for the periodogram of this data
"""
| /usr/src/app/target_test_cases/failed_tests_compute_chi2_ref.txt | def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True):
"""Compute the reference chi-square for a particular dataset.
Note: this is not valid center_data=False and fit_mean=False.
Parameters
----------
y : array-like
data values
dy : float, array, or None, optional
data uncertainties
center_data : bool
specify whether data should be pre-centered
fit_mean : bool
specify whether model should fit the mean of the data
Returns
-------
chi2_ref : float
The reference chi-square for the periodogram of this data
"""
if dy is None:
dy = 1
y, dy = np.broadcast_arrays(y, dy)
w = dy**-2.0
if center_data or fit_mean:
mu = np.dot(w, y) / w.sum()
else:
mu = 0
yw = (y - mu) / dy
return np.dot(yw, yw)
| compute_chi2_ref |
astropy | 46 | astropy/timeseries/periodograms/lombscargle/utils.py | def convert_normalization(Z, N, from_normalization, to_normalization, chi2_ref=None):
"""Convert power from one normalization to another.
This currently only works for standard & floating-mean models.
Parameters
----------
Z : array-like
the periodogram output
N : int
the number of data points
from_normalization, to_normalization : str
the normalization to convert from and to. Options are
['standard', 'model', 'log', 'psd']
chi2_ref : float
The reference chi-square, required for converting to or from the
psd normalization.
Returns
-------
Z_out : ndarray
The periodogram in the new normalization
"""
| /usr/src/app/target_test_cases/failed_tests_convert_normalization.txt | def convert_normalization(Z, N, from_normalization, to_normalization, chi2_ref=None):
"""Convert power from one normalization to another.
This currently only works for standard & floating-mean models.
Parameters
----------
Z : array-like
the periodogram output
N : int
the number of data points
from_normalization, to_normalization : str
the normalization to convert from and to. Options are
['standard', 'model', 'log', 'psd']
chi2_ref : float
The reference chi-square, required for converting to or from the
psd normalization.
Returns
-------
Z_out : ndarray
The periodogram in the new normalization
"""
Z = np.asarray(Z)
from_to = (from_normalization, to_normalization)
for norm in from_to:
if norm not in NORMALIZATIONS:
raise ValueError(f"{from_normalization} is not a valid normalization")
if from_normalization == to_normalization:
return Z
if "psd" in from_to and chi2_ref is None:
raise ValueError(
"must supply reference chi^2 when converting to or from psd normalization"
)
if from_to == ("log", "standard"):
return 1 - np.exp(-Z)
elif from_to == ("standard", "log"):
return -np.log(1 - Z)
elif from_to == ("log", "model"):
return np.exp(Z) - 1
elif from_to == ("model", "log"):
return np.log(Z + 1)
elif from_to == ("model", "standard"):
return Z / (1 + Z)
elif from_to == ("standard", "model"):
return Z / (1 - Z)
elif from_normalization == "psd":
return convert_normalization(
2 / chi2_ref * Z,
N,
from_normalization="standard",
to_normalization=to_normalization,
)
elif to_normalization == "psd":
Z_standard = convert_normalization(
Z, N, from_normalization=from_normalization, to_normalization="standard"
)
return 0.5 * chi2_ref * Z_standard
else:
raise NotImplementedError(
f"conversion from '{from_normalization}' to '{to_normalization}'"
)
| convert_normalization |
astropy | 47 | astropy/convolution/convolve.py | def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
| /usr/src/app/target_test_cases/failed_tests_convolve_models.txt | def convolve_models(model, kernel, mode="convolve_fft", **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
**kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : `~astropy.modeling.core.CompoundModel`
Convolved model
"""
if mode == "convolve_fft":
operator = SPECIAL_OPERATORS.add(
"convolve_fft", partial(convolve_fft, **kwargs)
)
elif mode == "convolve":
operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs))
else:
raise ValueError(f"Mode {mode} is not supported.")
return CompoundModel(operator, model, kernel)
| convolve_models |
astropy | 48 | astropy/timeseries/periodograms/lombscargle/implementations/mle.py | def design_matrix(t, frequency, dy=None, bias=True, nterms=1):
"""Compute the Lomb-Scargle design matrix at the given frequency.
This is the matrix X such that the periodic model at the given frequency
can be expressed :math:`\\hat{y} = X \\theta`.
Parameters
----------
t : array-like, shape=(n_times,)
times at which to compute the design matrix
frequency : float
frequency for the design matrix
dy : float or array-like, optional
data uncertainties: should be broadcastable with `t`
bias : bool (default=True)
If true, include a bias column in the matrix
nterms : int (default=1)
Number of Fourier terms to include in the model
Returns
-------
X : ndarray, shape=(n_times, n_parameters)
The design matrix, where n_parameters = bool(bias) + 2 * nterms
"""
| /usr/src/app/target_test_cases/failed_tests_design_matrix.txt | def design_matrix(t, frequency, dy=None, bias=True, nterms=1):
"""Compute the Lomb-Scargle design matrix at the given frequency.
This is the matrix X such that the periodic model at the given frequency
can be expressed :math:`\\hat{y} = X \\theta`.
Parameters
----------
t : array-like, shape=(n_times,)
times at which to compute the design matrix
frequency : float
frequency for the design matrix
dy : float or array-like, optional
data uncertainties: should be broadcastable with `t`
bias : bool (default=True)
If true, include a bias column in the matrix
nterms : int (default=1)
Number of Fourier terms to include in the model
Returns
-------
X : ndarray, shape=(n_times, n_parameters)
The design matrix, where n_parameters = bool(bias) + 2 * nterms
"""
t = np.asarray(t)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency must be a scalar")
if nterms == 0 and not bias:
raise ValueError("cannot have nterms=0 and no bias")
if bias:
cols = [np.ones_like(t)]
else:
cols = []
for i in range(1, nterms + 1):
cols.append(np.sin(2 * np.pi * i * frequency * t))
cols.append(np.cos(2 * np.pi * i * frequency * t))
XT = np.vstack(cols)
if dy is not None:
XT /= dy
return np.transpose(XT)
| design_matrix |
astropy | 49 | astropy/utils/data.py | def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
| /usr/src/app/target_test_cases/failed_tests_download_file.txt | def download_file(
remote_url,
cache=False,
show_progress=True,
timeout=None,
sources=None,
pkgname="astropy",
http_headers=None,
ssl_context=None,
allow_insecure=False,
):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(
f"Cache value '{cache}' was requested but "
"'update' is the only recognized string; "
"otherwise use a boolean"
)
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure,
)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (
hasattr(e, "reason")
and hasattr(e.reason, "errno")
and e.reason.errno == 8
):
e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}"
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
"Please include primary URL in sources if you want it to be "
"included as a valid source."
)
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}"
) from errors[sources[0]]
if cache:
try:
return import_file_to_cache(
url_key,
f_name,
remove_original=True,
replace=(cache == "update"),
pkgname=pkgname,
)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
"instead."
)
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
| download_file |
astropy | 50 | astropy/utils/data_info.py | def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
The output is shown below for ``bytes`` and ``str`` types, with <N> being
the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, `~numpy.dtype`, type
Input as an object that can be converted via :class:`numpy.dtype`.
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
| /usr/src/app/target_test_cases/failed_tests_dtype_info_name.txt | def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
The output is shown below for ``bytes`` and ``str`` types, with <N> being
the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, `~numpy.dtype`, type
Input as an object that can be converted via :class:`numpy.dtype`.
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.names is not None:
info_names = ", ".join(dtype_info_name(dt[0]) for dt in dtype.fields.values())
return f"({info_names})"
if dtype.subdtype is not None:
dtype, shape = dtype.subdtype
else:
shape = ()
if dtype.kind in ("S", "U"):
type_name = "bytes" if dtype.kind == "S" else "str"
length = re.search(r"(\d+)", dtype.str).group(1)
out = type_name + length
else:
out = dtype.name
if shape:
out += f"[{','.join(str(n) for n in shape)}]"
return out
| dtype_info_name |
astropy | 51 | astropy/timeseries/periodograms/lombscargle/_statistics.py | def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram.
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
false_alarm_probability : np.ndarray
The single-frequency false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
| /usr/src/app/target_test_cases/failed_tests_fap_single.txt | def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram.
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
false_alarm_probability : np.ndarray
The single-frequency false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == "psd":
return np.exp(-z)
elif normalization == "standard":
return (1 - z) ** (0.5 * Nk)
elif normalization == "model":
return (1 + z) ** (-0.5 * Nk)
elif normalization == "log":
return np.exp(-0.5 * Nk * z)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
| fap_single |
astropy | 52 | astropy/coordinates/funcs.py | def get_constellation(coord, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
"""
| /usr/src/app/target_test_cases/failed_tests_get_constellation.txt | def get_constellation(coord, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coord : coordinate-like
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/cat/VI/42>`_.
"""
if constellation_list != "iau":
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents("data/constellation_data_roman87.dat")
ctable = ascii.read(cdata, names=["ral", "rau", "decl", "name"])
cnames = data.get_pkg_data_contents(
"data/constellation_names.dat", encoding="UTF8"
)
cnames_short_to_long = {
l[:3]: l[4:] for l in cnames.split("\n") if not l.startswith("#")
}
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable["name"]])
_constellation_data["ctable"] = ctable
_constellation_data["cnames_long"] = cnames_long
else:
ctable = _constellation_data["ctable"]
cnames_long = _constellation_data["cnames_long"]
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
# this yields a "dubious year" warning because ERFA considers the year 1875
# "dubious", probably because UTC isn't well-defined then and precession
# models aren't precisely calibrated back to then. But it's plenty
# sufficient for constellations
with warnings.catch_warnings():
warnings.simplefilter("ignore", erfa.ErfaWarning)
constel_coord = coord.transform_to(PrecessedGeocentric(equinox="B1875"))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row["ral"] < rah) & (rah < row["rau"]) & (decd > row["decl"])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError(
f"Could not find constellation for coordinates {constel_coord[notided]}"
)
if short_name:
names = ctable["name"][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
| get_constellation |
astropy | 53 | astropy/coordinates/name_resolve.py | def get_icrs_coordinates(name, parse=False, cache=False):
"""
Retrieve an ICRS object by using `Sesame <https://cds.unistra.fr/cgi-bin/Sesame>`_
to retrieve coordinates for the specified name. By default, this will
search all available databases (SIMBAD, NED and VizieR) until a match is found.
If you would like to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, str, optional
Determines whether to cache the results or not. Passed through to
`~astropy.utils.data.download_file`, so pass "update" to update the
cached value.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
| /usr/src/app/target_test_cases/failed_tests_get_icrs_coordinates.txt | def get_icrs_coordinates(name, parse=False, cache=False):
"""
Retrieve an ICRS object by using `Sesame <https://cds.unistra.fr/cgi-bin/Sesame>`_
to retrieve coordinates for the specified name. By default, this will
search all available databases (SIMBAD, NED and VizieR) until a match is found.
If you would like to specify the database, use the science state
``astropy.coordinates.name_resolve.sesame_database``. You can also
specify a list of servers to use for querying Sesame using the science
state ``astropy.coordinates.name_resolve.sesame_url``. This will try
each one in order until a valid response is returned. By default, this
list includes the main Sesame host and a mirror at vizier. The
configuration item `astropy.utils.data.Conf.remote_timeout` controls the
number of seconds to wait for a response from the server before giving
up.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, str, optional
Determines whether to cache the results or not. Passed through to
`~astropy.utils.data.download_file`, so pass "update" to update the
cached value.
Returns
-------
coord : `astropy.coordinates.ICRS` object
The object's coordinates in the ICRS frame.
"""
# if requested, first try extract coordinates embedded in the object name.
# Do this first since it may be much faster than doing the sesame query
if parse:
from . import jparser
if jparser.search(name):
return jparser.to_skycoord(name)
else:
# if the parser failed, fall back to sesame query.
pass
# maybe emit a warning instead of silently falling back to sesame?
database = sesame_database.get()
# The web API just takes the first letter of the database name
db = database.upper()[0]
# the A option does not set a preferred order for the database
if db == "A":
# we look into SIMBAD, NED, and then VizieR. This is the default Sesame behavior.
db = "SNV"
# Make sure we don't have duplicates in the url list
urls = []
domains = []
for url in sesame_url.get():
domain = urllib.parse.urlparse(url).netloc
# Check for duplicates
if domain not in domains:
domains.append(domain)
# Add the query to the end of the url, add to url list
fmt_url = os.path.join(url, "{db}?{name}")
fmt_url = fmt_url.format(name=urllib.parse.quote(name), db=db)
urls.append(fmt_url)
exceptions = []
for url in urls:
try:
resp_data = get_file_contents(
download_file(url, cache=cache, show_progress=False)
)
break
except urllib.error.URLError as e:
exceptions.append(e)
continue
except TimeoutError as e:
# There are some cases where urllib2 does not catch socket.timeout
# especially while receiving response data on an already previously
# working request
e.reason = (
"Request took longer than the allowed "
f"{data.conf.remote_timeout:.1f} seconds"
)
exceptions.append(e)
continue
# All Sesame URL's failed...
else:
messages = [f"{url}: {e.reason}" for url, e in zip(urls, exceptions)]
raise NameResolveError(
"All Sesame queries failed. Unable to retrieve coordinates. See errors per"
f" URL below: \n {os.linesep.join(messages)}"
)
ra, dec = _parse_response(resp_data)
if ra is None or dec is None:
if db == "SNV":
err = f"Unable to find coordinates for name '{name}' using {url}"
else:
err = (
f"Unable to find coordinates for name '{name}' in database"
f" {database} using {url}"
)
raise NameResolveError(err)
# Return SkyCoord object
sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame="icrs")
return sc
| get_icrs_coordinates |
astropy | 54 | astropy/table/index.py | def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
| /usr/src/app/target_test_cases/failed_tests_get_index.txt | def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is None and table_copy is None:
raise ValueError(
'one and only one argument from "table_copy" or "names" is required'
)
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f"{names} is not a subset of table columns")
for name in names:
for index in table[name].info.indices:
if {col.info.name for col in index.columns} == names:
return index
return None
| get_index |
astropy | 55 | astropy/coordinates/builtin_frames/utils.py | def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
| /usr/src/app/target_test_cases/failed_tests_get_offset_sun_from_barycenter.txt | def get_offset_sun_from_barycenter(time, include_velocity=False, reverse=False):
"""
Returns the offset of the Sun center from the solar-system barycenter (SSB).
Parameters
----------
time : `~astropy.time.Time`
Time at which to calculate the offset
include_velocity : `bool`
If ``True``, attach the velocity as a differential. Defaults to ``False``.
reverse : `bool`
If ``True``, return the offset of the barycenter from the Sun. Defaults to ``False``.
Returns
-------
`~astropy.coordinates.CartesianRepresentation`
The offset
"""
if include_velocity:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric_posvel
offset_pos, offset_vel = get_body_barycentric_posvel("sun", time)
if reverse:
offset_pos, offset_vel = -offset_pos, -offset_vel
offset_vel = offset_vel.represent_as(CartesianDifferential)
offset_pos = offset_pos.with_differentials(offset_vel)
else:
# Import here to avoid a circular import
from astropy.coordinates.solar_system import get_body_barycentric
offset_pos = get_body_barycentric("sun", time)
if reverse:
offset_pos = -offset_pos
return offset_pos
| get_offset_sun_from_barycenter |
astropy | 56 | astropy/coordinates/funcs.py | def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
| /usr/src/app/target_test_cases/failed_tests_get_sun.txt | def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, "tdb"))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio["p"]
earth_v = earth_pv_bary["v"]
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au / u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1 - np.sum(earth_v**2, axis=-1)) ** 0.5
properdir = erfa.ab(
earth_p / dsun.reshape(dsun.shape + (1,)), -earth_v, dsun, invlorentz
)
cartrep = CartesianRepresentation(
x=-dsun * properdir[..., 0] * u.AU,
y=-dsun * properdir[..., 1] * u.AU,
z=-dsun * properdir[..., 2] * u.AU,
)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
| get_sun |
astropy | 57 | astropy/visualization/hist.py | def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs):
"""Enhanced histogram function.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as matplotlib.pyplot.hist().
This function was ported from astroML: https://www.astroml.org/
Parameters
----------
x : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
ax : `~matplotlib.axes.Axes` instance, optional
Specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
max_bins : int, optional
Maximum number of bins allowed. With more than a few thousand bins
the performance of matplotlib will not be great. If the number of
bins is large *and* the number of input data points is large then
the it will take a very long time to compute the histogram.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
| /usr/src/app/target_test_cases/failed_tests_hist.txt | def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs):
"""Enhanced histogram function.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as matplotlib.pyplot.hist().
This function was ported from astroML: https://www.astroml.org/
Parameters
----------
x : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
ax : `~matplotlib.axes.Axes` instance, optional
Specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
max_bins : int, optional
Maximum number of bins allowed. With more than a few thousand bins
the performance of matplotlib will not be great. If the number of
bins is large *and* the number of input data points is large then
the it will take a very long time to compute the histogram.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# Note that we only calculate the bin edges...matplotlib will calculate
# the actual histogram.
range = kwargs.get("range", None)
weights = kwargs.get("weights", None)
bins = calculate_bin_edges(x, bins, range=range, weights=weights)
if len(bins) > max_bins:
raise ValueError(
"Histogram has too many bins: "
f"{len(bins)}. Use max_bins to increase the number "
"of allowed bins or range to restrict "
"the histogram range."
)
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
| hist |
astropy | 58 | astropy/stats/histogram.py | def histogram(
a: ArrayLike,
bins: int
| list[int | float]
| Literal["blocks", "knuth", "scott", "freedman"]
| None = 10,
range: tuple[int | float, int | float] | None = None,
weights: ArrayLike | None = None,
**kwargs,
) -> tuple[NDArray, NDArray]:
"""Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as `numpy.histogram`.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
**kwargs : dict, optional
Extra arguments are described in `numpy.histogram`.
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
| /usr/src/app/target_test_cases/failed_tests_histogram.txt | def histogram(
a: ArrayLike,
bins: int
| list[int | float]
| Literal["blocks", "knuth", "scott", "freedman"]
| None = 10,
range: tuple[int | float, int | float] | None = None,
weights: ArrayLike | None = None,
**kwargs,
) -> tuple[NDArray, NDArray]:
"""Enhanced histogram function, providing adaptive binnings.
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as `numpy.histogram`.
Parameters
----------
a : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None, optional
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array-like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
**kwargs : dict, optional
Extra arguments are described in `numpy.histogram`.
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
| histogram |
astropy | 59 | astropy/timeseries/periodograms/lombscargle/_statistics.py | def inv_fap_single(fap, N, normalization, dH=1, dK=3):
"""Single-frequency inverse false alarm probability.
This function computes the periodogram value associated with the specified
single-frequency false alarm probability. This should not be confused with
the false alarm level of the largest peak.
Parameters
----------
fap : array-like
The false alarm probability.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
z : np.ndarray
The periodogram power corresponding to the single-peak false alarm
probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
| /usr/src/app/target_test_cases/failed_tests_inv_fap_single.txt | def inv_fap_single(fap, N, normalization, dH=1, dK=3):
"""Single-frequency inverse false alarm probability.
This function computes the periodogram value associated with the specified
single-frequency false alarm probability. This should not be confused with
the false alarm level of the largest peak.
Parameters
----------
fap : array-like
The false alarm probability.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
z : np.ndarray
The periodogram power corresponding to the single-peak false alarm
probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
fap = np.asarray(fap)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
# No warnings for fap = 0; rather, just let it give the right infinity.
with np.errstate(divide="ignore"):
if normalization == "psd":
return -np.log(fap)
elif normalization == "standard":
return 1 - fap ** (2 / Nk)
elif normalization == "model":
return -1 + fap ** (-2 / Nk)
elif normalization == "log":
return -2 / Nk * np.log(fap)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
| inv_fap_single |
astropy | 60 | astropy/coordinates/matrix_utilities.py | def is_O3(matrix, atol=None):
"""Check whether a matrix is in the length-preserving group O(3).
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose`.
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
is_o3 : bool or array of bool
If the matrix has more than two axes, the O(3) check is performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
Notes
-----
The orthogonal group O(3) preserves lengths, but is not guaranteed to keep
orientations. Rotations and reflections are in this group.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
| /usr/src/app/target_test_cases/failed_tests_is_O3.txt | def is_O3(matrix, atol=None):
"""Check whether a matrix is in the length-preserving group O(3).
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose`.
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
is_o3 : bool or array of bool
If the matrix has more than two axes, the O(3) check is performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
Notes
-----
The orthogonal group O(3) preserves lengths, but is not guaranteed to keep
orientations. Rotations and reflections are in this group.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
# matrix is in O(3) (rotations, proper and improper).
I = np.identity(matrix.shape[-1])
if atol is None:
if np.issubdtype(matrix.dtype, np.floating):
atol = np.finfo(matrix.dtype).eps * 5
else:
atol = 1e-15
is_o3 = np.all(
np.isclose(matrix @ matrix.swapaxes(-2, -1), I, atol=atol), axis=(-2, -1)
)
return is_o3
| is_O3 |
astropy | 61 | astropy/coordinates/matrix_utilities.py | def is_rotation(matrix, allow_improper=False, atol=None):
"""Check whether a matrix is a rotation, proper or improper.
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose` and `~numpy.linalg.det`.
allow_improper : bool, optional
Whether to restrict check to the SO(3), the group of proper rotations,
or also allow improper rotations (with determinant -1).
The default (False) is only SO(3).
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
isrot : bool or array of bool
If the matrix has more than two axes, the checks are performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
See Also
--------
astopy.coordinates.matrix_utilities.is_O3 :
For the less restrictive check that a matrix is in the group O(3).
Notes
-----
The group SO(3) is the rotation group. It is O(3), with determinant 1.
Rotations with determinant -1 are improper rotations, combining both a
rotation and a reflection.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
| /usr/src/app/target_test_cases/failed_tests_is_rotation.txt | def is_rotation(matrix, allow_improper=False, atol=None):
"""Check whether a matrix is a rotation, proper or improper.
Parameters
----------
matrix : (..., N, N) array-like
Must have attribute ``.shape`` and method ``.swapaxes()`` and not error
when using `~numpy.isclose` and `~numpy.linalg.det`.
allow_improper : bool, optional
Whether to restrict check to the SO(3), the group of proper rotations,
or also allow improper rotations (with determinant -1).
The default (False) is only SO(3).
atol : float, optional
The allowed absolute difference.
If `None` it defaults to 1e-15 or 5 * epsilon of the matrix's dtype, if floating.
.. versionadded:: 5.3
Returns
-------
isrot : bool or array of bool
If the matrix has more than two axes, the checks are performed on
slices along the last two axes -- (M, N, N) => (M, ) bool array.
See Also
--------
astopy.coordinates.matrix_utilities.is_O3 :
For the less restrictive check that a matrix is in the group O(3).
Notes
-----
The group SO(3) is the rotation group. It is O(3), with determinant 1.
Rotations with determinant -1 are improper rotations, combining both a
rotation and a reflection.
For more information, see https://en.wikipedia.org/wiki/Orthogonal_group
"""
if atol is None:
if np.issubdtype(matrix.dtype, np.floating):
atol = np.finfo(matrix.dtype).eps * 5
else:
atol = 1e-15
# matrix is in O(3).
is_o3 = is_O3(matrix, atol=atol)
# determinant checks for rotation (proper and improper)
if allow_improper: # determinant can be +/- 1
is_det1 = np.isclose(np.abs(np.linalg.det(matrix)), 1.0, atol=atol)
else: # restrict to SO(3)
is_det1 = np.isclose(np.linalg.det(matrix), 1.0, atol=atol)
return is_o3 & is_det1
| is_rotation |
astropy | 62 | astropy/stats/jackknife.py | def jackknife_resampling(data: NDArray[DT]) -> NDArray[DT]:
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
| /usr/src/app/target_test_cases/failed_tests_jackknife_resampling.txt | def jackknife_resampling(data: NDArray[DT]) -> NDArray[DT]:
"""Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<https://arxiv.org/abs/1606.00497>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n - 1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
| jackknife_resampling |
astropy | 63 | astropy/stats/jackknife.py | def jackknife_stats(
data: NDArray,
statistic: Callable,
confidence_level: float | None = 0.95,
) -> tuple[float | NDArray, float | NDArray, float | NDArray, NDArray]:
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
np.float64(4.5)
>>> bias
np.float64(0.0)
>>> stderr # doctest: +FLOAT_CMP
np.float64(0.95742710775633832)
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
| /usr/src/app/target_test_cases/failed_tests_jackknife_stats.txt | def jackknife_stats(
data: NDArray,
statistic: Callable,
confidence_level: float | None = 0.95,
) -> tuple[float | NDArray, float | NDArray, float | NDArray, NDArray]:
"""Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
confidence_level : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : float or `~numpy.ndarray`
The i-th element is the bias-corrected "jackknifed" estimate.
bias : float or `~numpy.ndarray`
The i-th element is the jackknife bias.
std_err : float or `~numpy.ndarray`
The i-th element is the jackknife standard error.
conf_interval : ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[2., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 3., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 4., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 5., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 6., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 7., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 8., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 9., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 0.],
[1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
np.float64(4.5)
>>> bias
np.float64(0.0)
>>> stderr # doctest: +FLOAT_CMP
np.float64(0.95742710775633832)
>>> conf_interval
array([2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
# jackknife confidence interval
if not (0 < confidence_level < 1):
raise ValueError("confidence level must be in (0, 1).")
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
# Only import scipy if inputs are valid
from scipy.special import erfinv
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n - 1) * (mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt(
(n - 1)
* np.mean((jack_stat - mean_jack_stat) * (jack_stat - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
z_score = np.sqrt(2.0) * erfinv(confidence_level)
conf_interval = estimate + z_score * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
| jackknife_stats |
astropy | 64 | astropy/table/operations.py | def join(
left,
right,
keys=None,
join_type="inner",
*,
keys_left=None,
keys_right=None,
keep_order=False,
uniq_col_name="{col_name}_{table_name}",
table_names=["1", "2"],
metadata_conflicts="warn",
join_funcs=None,
):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : `~astropy.table.Table`-like object
Left side table in the join. If not a Table, will call ``Table(left)``
right : `~astropy.table.Table`-like object
Right side table in the join. If not a Table, will call ``Table(right)``
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
keys_left : str or list of str or list of column-like, optional
Left column(s) used to match rows instead of ``keys`` arg. This can be
be a single left table column name or list of column names, or a list of
column-like values with the same lengths as the left table.
keys_right : str or list of str or list of column-like, optional
Same as ``keys_left``, but for the right side of the join.
keep_order: bool, optional
By default, rows are sorted by the join keys. If True, preserve the order of
rows from the left table for "inner" or "left" joins, or from the right table
for "right" joins. For other join types this argument is ignored except that a
warning is issued if ``keep_order=True``.
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
| /usr/src/app/target_test_cases/failed_tests_join.txt | def join(
left,
right,
keys=None,
join_type="inner",
*,
keys_left=None,
keys_right=None,
keep_order=False,
uniq_col_name="{col_name}_{table_name}",
table_names=["1", "2"],
metadata_conflicts="warn",
join_funcs=None,
):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : `~astropy.table.Table`-like object
Left side table in the join. If not a Table, will call ``Table(left)``
right : `~astropy.table.Table`-like object
Right side table in the join. If not a Table, will call ``Table(right)``
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
keys_left : str or list of str or list of column-like, optional
Left column(s) used to match rows instead of ``keys`` arg. This can be
be a single left table column name or list of column names, or a list of
column-like values with the same lengths as the left table.
keys_right : str or list of str or list of column-like, optional
Same as ``keys_left``, but for the right side of the join.
keep_order: bool, optional
By default, rows are sorted by the join keys. If True, preserve the order of
rows from the left table for "inner" or "left" joins, or from the right table
for "right" joins. For other join types this argument is ignored except that a
warning is issued if ``keep_order=True``.
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
# Define a magic key that won't conflict with any user column name. This is to
# support the keep_order argument. In this case a temporary column is added to the
# left or right table to keep track of the original row order. After joining, the
# order is restored and the temporary column is removed.
sort_table_index_key = "__astropy_table_keep_order_sort_index__"
sort_table = None
if keep_order:
if join_type not in ["left", "right", "inner"]:
# Keep order is not meaningful for an outer join and cartesian join is
# already ordered by left (primary) then right (secondary).
warnings.warn(
"keep_order=True is only supported for left, right, and inner joins",
UserWarning,
stacklevel=2,
)
else:
sort_table = right if join_type == "right" else left
sort_table[sort_table_index_key] = np.arange(len(sort_table))
# In case keep_order=True we need try/finally to ensure that the temporary column
# is removed even if an exception is raised.
try:
out = _join(
left,
right,
keys,
join_type,
uniq_col_name,
table_names,
metadata_conflicts,
join_funcs,
keys_left=keys_left,
keys_right=keys_right,
)
if sort_table is not None:
# Sort joined table to the original order and remove the temporary column.
out.sort(sort_table_index_key)
del out[sort_table_index_key]
finally:
if sort_table is not None:
# If sort_table is not None that implies keep_order=True.
del sort_table[sort_table_index_key]
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
| join |
astropy | 65 | astropy/modeling/statistic.py | def leastsquare(measured_vals, updated_model, weights, *x):
"""Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control.
"""
| /usr/src/app/target_test_cases/failed_tests_leastsquare.txt | def leastsquare(measured_vals, updated_model, weights, *x):
"""Least square statistic, with optional weights, in N-dimensions.
Parameters
----------
measured_vals : ndarray or sequence
Measured data values. Will be cast to array whose
shape must match the array-cast of the evaluated model.
updated_model : :class:`~astropy.modeling.Model` instance
Model with parameters set by the current iteration of the optimizer.
when evaluated on "x", must return array of shape "measured_vals"
weights : ndarray or None
Array of weights to apply to each residual.
*x : ndarray
Independent variables on which to evaluate the model.
Returns
-------
res : float
The sum of least squares.
See Also
--------
:func:`~astropy.modeling.statistic.leastsquare_1d`
:func:`~astropy.modeling.statistic.leastsquare_2d`
:func:`~astropy.modeling.statistic.leastsquare_3d`
Notes
-----
Models in :mod:`~astropy.modeling` have broadcasting rules that try to
match inputs with outputs with Model shapes. Numpy arrays have flexible
broadcasting rules, so mismatched shapes can often be made compatible. To
ensure data matches the model we must perform shape comparison and leverage
the Numpy arithmetic functions. This can obfuscate arithmetic computation
overrides, like with Quantities. Implement a custom statistic for more
direct control.
"""
model_vals = updated_model(*x)
if np.shape(model_vals) != np.shape(measured_vals):
raise ValueError(
f"Shape mismatch between model ({np.shape(model_vals)}) "
f"and measured ({np.shape(measured_vals)})"
)
if weights is None:
weights = 1.0
return np.sum(np.square(weights * np.subtract(model_vals, measured_vals)))
| leastsquare |
astropy | 66 | astropy/wcs/utils.py | def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
| /usr/src/app/target_test_cases/failed_tests_obsgeo_to_frame.txt | def obsgeo_to_frame(obsgeo, obstime):
"""
Convert a WCS obsgeo property into an ITRS coordinate frame.
Parameters
----------
obsgeo : array-like
A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as
returned by ``WCS.wcs.obsgeo``.
obstime : time-like
The time associated with the coordinate, will be passed to
`~astropy.coordinates.ITRS` as the obstime keyword.
Returns
-------
~astropy.coordinates.ITRS
An `~astropy.coordinates.ITRS` coordinate frame
representing the coordinates.
Notes
-----
The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array
where the first three elements are the coordinate in a cartesian
representation and the second 3 are the coordinate in a spherical
representation.
This function priorities reading the cartesian coordinates, and will only
read the spherical coordinates if the cartesian coordinates are either all
zero or any of the cartesian coordinates are non-finite.
In the case where both the spherical and cartesian coordinates have some
non-finite values the spherical coordinates will be returned with the
non-finite values included.
"""
if (
obsgeo is None
or len(obsgeo) != 6
or np.all(np.array(obsgeo) == 0)
or np.all(~np.isfinite(obsgeo))
):
raise ValueError(
f"Can not parse the 'obsgeo' location ({obsgeo}). "
"obsgeo should be a length 6 non-zero, finite numpy array"
)
# If the cartesian coords are zero or have NaNs in them use the spherical ones
if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])):
data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m)))
# Otherwise we assume the cartesian ones are valid
else:
data = CartesianRepresentation(*obsgeo[:3] * u.m)
return ITRS(data, obstime=obstime)
| obsgeo_to_frame |
astropy | 67 | astropy/nddata/utils.py | def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
| /usr/src/app/target_test_cases/failed_tests_overlap_slices.txt | def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ["partial", "trim", "strict"]:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape,)
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape,)
if np.isscalar(position):
position = (position,)
if any(~np.isfinite(position)):
raise ValueError("Input position contains invalid values (NaNs or infs).")
if len(small_array_shape) != len(large_array_shape):
raise ValueError(
'"large_array_shape" and "small_array_shape" must '
"have the same number of dimensions."
)
if len(small_array_shape) != len(position):
raise ValueError(
'"position" must have the same number of dimensions as "small_array_shape".'
)
# define the min/max pixel indices
indices_min = [
int(np.ceil(pos - (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
indices_max = [
int(np.ceil(pos + (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
for e_max in indices_max:
if e_max < 0 or (e_max == 0 and small_array_shape != (0, 0)):
raise NoOverlapError("Arrays do not overlap.")
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError("Arrays do not overlap.")
if mode == "strict":
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError("Arrays overlap only partially.")
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError("Arrays overlap only partially.")
# Set up slices
slices_large = tuple(
slice(max(0, indices_min), min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
if mode == "trim":
slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large)
else:
slices_small = tuple(
slice(
max(0, -indices_min),
min(large_shape - indices_min, indices_max - indices_min),
)
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
return slices_large, slices_small
| overlap_slices |
astropy | 68 | astropy/io/votable/table.py | def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base subclass or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.format.Base` subclass. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
| /usr/src/app/target_test_cases/failed_tests_parse.txt | def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base subclass or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.format.Base` subclass. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import VERIFY_OPTIONS, conf
invalid = invalid.lower()
if invalid not in ("exception", "mask"):
raise ValueError(
"accepted values of ``invalid`` are: ``'exception'`` or ``'mask'``."
)
if verify is None:
verify = conf.verify
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
"columns": columns,
"invalid": invalid,
"verify": verify,
"chunk_size": chunk_size,
"table_number": table_number,
"filename": filename,
"unit_format": unit_format,
"datatype_mapping": datatype_mapping,
}
if isinstance(source, str):
source = os.path.expanduser(source)
if filename is None and isinstance(source, str):
config["filename"] = source
with iterparser.get_xml_iterator(
source, _debug_python_based_parser=_debug_python_based_parser
) as iterator:
return tree.VOTableFile(config=config, pos=(1, 1)).parse(iterator, config)
| parse |
astropy | 69 | astropy/timeseries/periodograms/lombscargle/implementations/mle.py | def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency.
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_mean : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
| /usr/src/app/target_test_cases/failed_tests_periodic_fit.txt | def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency.
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_mean : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
t, y, frequency = map(np.asarray, (t, y, frequency))
if dy is None:
dy = np.ones_like(y)
else:
dy = np.asarray(dy)
t_fit = np.asarray(t_fit)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if t_fit.ndim != 1:
raise ValueError("t_fit should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency should be a scalar")
if center_data:
w = dy**-2.0
y_mean = np.dot(y, w) / w.sum()
y = y - y_mean
else:
y_mean = 0
X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms)
theta_MLE = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy))
X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms)
return y_mean + np.dot(X_fit, theta_MLE)
| periodic_fit |
astropy | 70 | astropy/wcs/utils.py | def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
| /usr/src/app/target_test_cases/failed_tests_pixel_to_pixel.txt | def pixel_to_pixel(wcs_in, wcs_out, *inputs):
"""
Transform pixel coordinates in a dataset with a WCS to pixel coordinates
in another dataset with a different WCS.
This function is designed to efficiently deal with input pixel arrays that
are broadcasted views of smaller arrays, and is compatible with any
APE14-compliant WCS.
Parameters
----------
wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the original dataset which complies with the
high-level shared APE 14 WCS API.
wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
A WCS object for the target dataset which complies with the
high-level shared APE 14 WCS API.
*inputs :
Scalars or arrays giving the pixel coordinates to transform.
"""
# Shortcut for scalars
if np.isscalar(inputs[0]):
world_outputs = wcs_in.pixel_to_world(*inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
return wcs_out.world_to_pixel(*world_outputs)
# Remember original shape
original_shape = inputs[0].shape
matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
split_info = _split_matrix(matrix)
outputs = [None] * wcs_out.pixel_n_dim
for pixel_in_indices, pixel_out_indices in split_info:
pixel_inputs = []
for ipix in range(wcs_in.pixel_n_dim):
if ipix in pixel_in_indices:
pixel_inputs.append(unbroadcast(inputs[ipix]))
else:
pixel_inputs.append(inputs[ipix].flat[0])
pixel_inputs = np.broadcast_arrays(*pixel_inputs)
world_outputs = wcs_in.pixel_to_world(*pixel_inputs)
if not isinstance(world_outputs, (tuple, list)):
world_outputs = (world_outputs,)
pixel_outputs = wcs_out.world_to_pixel(*world_outputs)
if wcs_out.pixel_n_dim == 1:
pixel_outputs = (pixel_outputs,)
for ipix in range(wcs_out.pixel_n_dim):
if ipix in pixel_out_indices:
outputs[ipix] = np.broadcast_to(pixel_outputs[ipix], original_shape)
return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
| pixel_to_pixel |
astropy | 71 | astropy/wcs/utils.py | def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
| /usr/src/app/target_test_cases/failed_tests_pixel_to_skycoord.txt | def pixel_to_skycoord(xp, yp, wcs, origin=0, mode="all", cls=None):
"""
Convert a set of pixel coordinates into a `~astropy.coordinates.SkyCoord`
coordinate.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
cls : class or None
The class of object to create. Should be a
`~astropy.coordinates.SkyCoord` subclass. If None, defaults to
`~astropy.coordinates.SkyCoord`.
Returns
-------
coords : `~astropy.coordinates.SkyCoord` subclass
The celestial coordinates. Whatever ``cls`` type is.
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
# Import astropy.coordinates here to avoid circular imports
from astropy.coordinates import SkyCoord, UnitSphericalRepresentation
# we have to do this instead of actually setting the default to SkyCoord
# because importing SkyCoord at the module-level leads to circular
# dependencies.
if cls is None:
cls = SkyCoord
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS gives
lon_unit = u.Unit(wcs.wcs.cunit[0])
lat_unit = u.Unit(wcs.wcs.cunit[1])
# Convert pixel coordinates to celestial coordinates
if mode == "all":
lon, lat = wcs.all_pix2world(xp, yp, origin)
elif mode == "wcs":
lon, lat = wcs.wcs_pix2world(xp, yp, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
# Add units to longitude/latitude
lon = lon * lon_unit
lat = lat * lat_unit
# Create a SkyCoord-like object
data = UnitSphericalRepresentation(lon=lon, lat=lat)
coords = cls(frame.realize_frame(data))
return coords
| pixel_to_skycoord |
astropy | 72 | astropy/io/fits/convenience.py | def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
| /usr/src/app/target_test_cases/failed_tests_printdiff.txt | def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for HDU specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By HDU number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension HDU
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension HDU
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
**kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {
key: kwargs.pop(key) for key in ["ext", "extname", "extver"] if key in kwargs
}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError(
"Extension specification with HDUList objects not implemented."
)
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
| printdiff |
astropy | 73 | astropy/io/fits/column.py | def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
| /usr/src/app/target_test_cases/failed_tests_python_to_tdisp.txt | def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {
"a": "A",
"s": "A",
"d": "I",
"b": "B",
"o": "O",
"x": "Z",
"X": "Z",
"f": "F",
"F": "F",
"g": "G",
"G": "G",
"e": "E",
"E": "E",
}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == "{" and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip("}")
elif format_string[0] == "%":
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = "", ""
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == ">" and fmt_str[1] != "0":
ftype = fmt_to_tdisp["a"]
width = fmt_str[1:]
elif fmt_str[-1] == "s" and fmt_str != "s":
ftype = fmt_to_tdisp["a"]
width = fmt_str[:-1].lstrip("0")
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != "0":
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if "." in fmt_str:
width, precision = fmt_str.split(".")
sep = "."
if width == "":
key = ftype if ftype != "G" else "F"
width = str(
int(precision)
+ (ASCII_DEFAULT_WIDTHS[key][0] - ASCII_DEFAULT_WIDTHS[key][1])
)
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn(
f"Format {format_string} cannot be mapped to the accepted TDISPn "
"keyword values. Format will not be moved into TDISPn keyword.",
AstropyUserWarning,
)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = "L"
return ftype + width + sep + precision
| python_to_tdisp |
astropy | 74 | astropy/stats/sigma_clipping.py | def sigma_clipped_stats(
data: ArrayLike,
mask: NDArray | None = None,
mask_value: float | None = None,
sigma: float | None = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable | None = "median",
stdfunc: Literal["std", "mad_std"] | Callable | None = "std",
std_ddof: int | None = 0,
axis: int | tuple[int, ...] | None = None,
grow: float | Literal[False] | None = False,
) -> tuple[float, float, float]:
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
| /usr/src/app/target_test_cases/failed_tests_sigma_clipped_stats.txt | def sigma_clipped_stats(
data: ArrayLike,
mask: NDArray | None = None,
mask_value: float | None = None,
sigma: float | None = 3.0,
sigma_lower: float | None = None,
sigma_upper: float | None = None,
maxiters: int | None = 5,
cenfunc: Literal["median", "mean"] | Callable | None = "median",
stdfunc: Literal["std", "mad_std"] | Callable | None = "std",
std_ddof: int | None = 0,
axis: int | tuple[int, ...] | None = None,
grow: float | Literal[False] | None = False,
) -> tuple[float, float, float]:
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as a string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters,
cenfunc=cenfunc,
stdfunc=stdfunc,
grow=grow,
)
data_clipped = sigclip(
data, axis=axis, masked=False, return_bounds=False, copy=True
)
mean = nanmean(data_clipped, axis=axis)
median = nanmedian(data_clipped, axis=axis)
std = nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
| sigma_clipped_stats |
astropy | 75 | astropy/utils/shapes.py | def simplify_basic_index(
basic_index: int | slice | Sequence[int | slice | EllipsisType | None],
*,
shape: Sequence[int],
) -> tuple[int | slice, ...]:
"""
Given a Numpy basic index, return a tuple of integers and slice objects
with no default values (`None`) if possible.
If one of the dimensions has a slice and the step is negative and the stop
value of the slice was originally `None`, the new stop value of the slice
may still be set to `None`.
For more information on valid basic indices, see
https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing
Parameters
----------
basic_index
A valid Numpy basic index
shape
The shape of the array being indexed
"""
| /usr/src/app/target_test_cases/failed_tests_simplify_basic_index.txt | def simplify_basic_index(
basic_index: int | slice | Sequence[int | slice | EllipsisType | None],
*,
shape: Sequence[int],
) -> tuple[int | slice, ...]:
"""
Given a Numpy basic index, return a tuple of integers and slice objects
with no default values (`None`) if possible.
If one of the dimensions has a slice and the step is negative and the stop
value of the slice was originally `None`, the new stop value of the slice
may still be set to `None`.
For more information on valid basic indices, see
https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing
Parameters
----------
basic_index
A valid Numpy basic index
shape
The shape of the array being indexed
"""
ndim = len(shape)
if not isinstance(basic_index, (tuple, list)): # We just have a single int
basic_index = (basic_index,)
new_index = list(basic_index)
if Ellipsis in new_index:
if new_index.count(Ellipsis) > 1:
raise IndexError("an index can only have a single ellipsis ('...')")
# Replace the Ellipsis with the correct number of slice(None)s
e_ind = new_index.index(Ellipsis)
new_index.remove(Ellipsis)
n_e = ndim - len(new_index)
for i in range(n_e):
ind = e_ind + i
new_index.insert(ind, slice(0, shape[ind], 1))
if len(new_index) > ndim:
raise ValueError(
f"The dimensionality of the basic index {basic_index} can not be greater "
f"than the dimensionality ({ndim}) of the data."
)
for i in range(ndim):
if i < len(new_index):
slc = new_index[i]
if isinstance(slc, slice):
indices = list(slc.indices(shape[i]))
# The following case is the only one where slice(*indices) does
# not give the 'correct' answer because it will set stop to -1
# which means the last element in the array.
if indices[1] == -1:
indices[1] = None
new_index[i] = slice(*indices)
elif isinstance(slc, numbers.Integral):
new_index[i] = normalize_axis_index(int(slc), shape[i])
else:
raise ValueError(f"Unexpected index element in basic index: {slc}")
else:
new_index.append(slice(0, shape[i], 1))
return tuple(new_index)
| simplify_basic_index |
astropy | 76 | astropy/wcs/utils.py | def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
| /usr/src/app/target_test_cases/failed_tests_skycoord_to_pixel.txt | def skycoord_to_pixel(coords, wcs, origin=0, mode="all"):
"""
Convert a set of SkyCoord coordinates into pixels.
Parameters
----------
coords : `~astropy.coordinates.SkyCoord`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use.
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.coordinates.SkyCoord.from_pixel
"""
if _has_distortion(wcs) and wcs.naxis != 2:
raise ValueError("Can only handle WCS with distortions for 2-dimensional WCS")
# Keep only the celestial part of the axes, also re-orders lon/lat
wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE])
if wcs.naxis != 2:
raise ValueError("WCS should contain celestial component")
# Check which frame the WCS uses
frame = wcs_to_celestial_frame(wcs)
# Check what unit the WCS needs
xw_unit = u.Unit(wcs.wcs.cunit[0])
yw_unit = u.Unit(wcs.wcs.cunit[1])
# Convert positions to frame
coords = coords.transform_to(frame)
# Extract longitude and latitude. We first try and use lon/lat directly,
# but if the representation is not spherical or unit spherical this will
# fail. We should then force the use of the unit spherical
# representation. We don't do that directly to make sure that we preserve
# custom lon/lat representations if available.
try:
lon = coords.data.lon.to(xw_unit)
lat = coords.data.lat.to(yw_unit)
except AttributeError:
lon = coords.spherical.lon.to(xw_unit)
lat = coords.spherical.lat.to(yw_unit)
# Convert to pixel coordinates
if mode == "all":
xp, yp = wcs.all_world2pix(lon.value, lat.value, origin)
elif mode == "wcs":
xp, yp = wcs.wcs_world2pix(lon.value, lat.value, origin)
else:
raise ValueError("mode should be either 'all' or 'wcs'")
return xp, yp
| skycoord_to_pixel |
astropy | 77 | astropy/io/fits/convenience.py | def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
| /usr/src/app/target_test_cases/failed_tests_table_to_hdu.txt | def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .column import python_to_tdisp
from .connect import REMOVE_KEYWORDS, is_column_keyword
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names}"
)
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
tarray = table.as_array()
if isinstance(tarray, np.ma.MaskedArray):
# Fill masked values carefully:
# float column's default mask value needs to be Nan and
# string column's default mask should be an empty string.
# Note: getting the fill value for the structured array is
# more reliable than for individual columns for string entries.
# (no 'N/A' for a single-element string, where it should be 'N').
default_fill_value = np.ma.default_fill_value(tarray.dtype)
for colname, (coldtype, _) in tarray.dtype.fields.items():
if np.all(tarray.fill_value[colname] == default_fill_value[colname]):
# Since multi-element columns with dtypes such as '2f8' have
# a subdtype, we should look up the type of column on that.
coltype = (
coldtype.subdtype[0].type if coldtype.subdtype else coldtype.type
)
if issubclass(coltype, np.complexfloating):
tarray.fill_value[colname] = complex(np.nan, np.nan)
elif issubclass(coltype, np.inexact):
tarray.fill_value[colname] = np.nan
elif issubclass(coltype, np.character):
tarray.fill_value[colname] = ""
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(
tarray.filled(), header=hdr, character_as_bytes=character_as_bytes
)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# with non-default fill values in FITS (if at all possible).
# Be careful that we do not set null for columns that were not masked!
int_formats = ("B", "I", "J", "K")
if (
col.format in int_formats or col.format.p_format in int_formats
) and hasattr(table[col.name], "mask"):
fill_value = tarray[col.name].fill_value
col.null = fill_value.astype(int)
else:
table_hdu = BinTableHDU.from_columns(
tarray, header=hdr, character_as_bytes=character_as_bytes
)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(
table[col.name].info.format, logical_dtype=logical
)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format="fits")
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
f"The column '{col.name}' could not be stored in FITS "
f"format because it has a scale '({str(scale)})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units."
)
except ValueError:
# Warn that the unit is lost, but let the details depend on
# whether the column was serialized (because it was a
# quantity), since then the unit can be recovered by astropy.
warning = (
f"The unit '{unit.to_string()}' could not be saved in "
"native FITS format "
)
if any(
"SerializedColumn" in item and "name: " + col.name in item
for item in table.meta.get("comments", [])
):
warning += (
"and hence will be lost to non-astropy fits readers. "
"Within astropy, the unit can roundtrip using QTable, "
"though one has to enable the unit before reading."
)
else:
warning += (
"and cannot be recovered in reading. It can roundtrip "
"within astropy by using QTable both to write and read "
"back, though one has to enable the unit before reading."
)
warnings.warn(warning, AstropyUserWarning)
else:
# Try creating a Unit to issue a warning if the unit is not
# FITS compliant
Unit(col.unit, format="fits", parse_strict="warn")
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop("__coordinate_columns__", {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set these, even if we have no data.
for attr in "coord_type", "coord_unit":
setattr(col, attr, col_info.get(attr, None))
trpos = col_info.get("time_ref_pos", None)
if trpos is not None:
col.time_ref_pos = trpos
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
f"Meta-data keyword {key} will be ignored since it conflicts "
"with a FITS reserved keyword",
AstropyUserWarning,
)
continue
# Convert to FITS format
if key == "comments":
key = "comment"
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
f"Attribute `{key}` of type {type(value)} cannot be "
"added to FITS Header - skipping",
AstropyUserWarning,
)
return table_hdu
| table_to_hdu |
astropy | 78 | astropy/io/fits/fitstime.py | def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
| /usr/src/app/target_test_cases/failed_tests_time_to_fits.txt | def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Make a light copy of table (to the extent possible) and clear any indices along
# the way. Indices are not serialized and cause problems later, but they are not
# needed here so just drop. For Column subclasses take advantage of copy() method,
# but for others it is required to actually copy the data if there are attached
# indices. See #8077 and #9009 for further discussion.
new_cols = []
for col in table.itercols():
if isinstance(col, Column):
new_col = col.copy(copy_data=False) # Also drops any indices
else:
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
newtable = table.__class__(new_cols, copy=False)
newtable.meta = table.meta
# Global time coordinate frame keywords
hdr = Header(
[
Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()
]
)
# Store coordinate column-specific metadata
newtable.meta["__coordinate_columns__"] = defaultdict(OrderedDict)
coord_meta = newtable.meta["__coordinate_columns__"]
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
col_cls = MaskedColumn if col.masked else Column
if col.info.serialize_method["fits"] == "formatted_value":
newtable.replace_column(col.info.name, col_cls(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
# Note: easier would be np.stack([col.jd1, col.jd2], axis=-1), but that
# fails for np.ma.MaskedArray, as it returns just the data, ignoring the mask.
jd12 = np.empty_like(col.jd1, shape=col.jd1.shape + (2,))
jd12[..., 0] = col.jd1
jd12[..., 1] = col.jd2
newtable.replace_column(col.info.name, col_cls(jd12, unit="d"))
# Time column-specific override keywords
coord_meta[col.info.name]["coord_type"] = col.scale.upper()
coord_meta[col.info.name]["coord_unit"] = "d"
# Time column reference position
if col.location is None:
coord_meta[col.info.name]["time_ref_pos"] = None
if location is not None:
warnings.warn(
(
f'Time Column "{col.info.name}" has no specified location, '
"but global Time Position is present, which will be the "
"default for this column in FITS specification."
),
AstropyUserWarning,
)
else:
coord_meta[col.info.name]["time_ref_pos"] = "TOPOCENTER"
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
(
f'Earth Location "TOPOCENTER" for Time Column "{col.info.name}" '
f'is incompatible with scale "{col.scale.upper()}".'
),
AstropyUserWarning,
)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ("x", "y", "z"):
newtable.add_column(
Column(getattr(location, dim).to_value(u.m)),
name=f"OBSGEO-{dim.upper()}",
)
else:
hdr.extend(
[
Card(
keyword=f"OBSGEO-{dim.upper()}",
value=getattr(location, dim).to_value(u.m),
)
for dim in ("x", "y", "z")
]
)
elif np.any(location != col.location):
raise ValueError(
"Multiple Time Columns with different geocentric "
f"observatory locations ({location}, {col.location}) encountered."
"This is not supported by the FITS standard."
)
return newtable, hdr
| time_to_fits |
astropy | 79 | astropy/timeseries/periodograms/lombscargle/implementations/utils.py | def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies.
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
| /usr/src/app/target_test_cases/failed_tests_trig_sum.txt | def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies.
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
| trig_sum |
astropy | 80 | astropy/time/core.py | def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
| /usr/src/app/target_test_cases/failed_tests_update_leap_seconds.txt | def update_leap_seconds(files=None):
"""If the current ERFA leap second table is out of date, try to update it.
Uses `astropy.utils.iers.LeapSeconds.auto_open` to try to find an
up-to-date table. See that routine for the definition of "out of date".
In order to make it safe to call this any time, all exceptions are turned
into warnings,
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses defined by
`astropy.utils.iers.LeapSeconds.auto_open`, which includes the table
used by ERFA itself, so if that is up to date, nothing will happen.
Returns
-------
n_update : int
Number of items updated.
"""
try:
from astropy.utils import iers
table = iers.LeapSeconds.auto_open(files)
return erfa.leap_seconds.update(table)
except Exception as exc:
warn(
f"leap-second auto-update failed due to the following exception: {exc!r}",
AstropyWarning,
)
return 0
| update_leap_seconds |
astropy | 81 | astropy/io/votable/table.py | def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
| /usr/src/app/target_test_cases/failed_tests_validate.txt | def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import color_print, print_code_line
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
if isinstance(source, str):
source = os.path.expanduser(source)
with data.get_readable_fileobj(source, encoding="binary") as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, "name"):
filename = source.name
elif hasattr(source, "url"):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify="warn", filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [
str(x.message)
for x in warning_lines
if issubclass(x.category, exceptions.VOWarning)
] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w["is_something"]:
output.write(w["message"])
output.write("\n\n")
else:
line = xml_lines[w["nline"] - 1]
warning = w["warning"]
if w["is_warning"]:
color = "yellow"
else:
color = "red"
color_print(
f"{w['nline']:d}: ",
"",
warning or "EXC",
color,
": ",
"",
textwrap.fill(
w["message"],
initial_indent=" ",
subsequent_indent=" ",
).lstrip(),
file=output,
)
print_code_line(line, w["nchar"], file=output)
output.write("\n")
else:
output.write("astropy.io.votable found no violations.\n\n")
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(filename, version)
if success != 0:
output.write("xmllint schema violations:\n\n")
output.write(stderr.decode("utf-8"))
else:
output.write("xmllint passed\n")
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
| validate |
astropy | 82 | astropy/wcs/utils.py | def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
| /usr/src/app/target_test_cases/failed_tests_wcs_to_celestial_frame.txt | def wcs_to_celestial_frame(wcs):
"""
For a given WCS, return the coordinate frame that matches the celestial
component of the WCS.
Parameters
----------
wcs : :class:`~astropy.wcs.WCS` instance
The WCS to find the frame for
Returns
-------
frame : :class:`~astropy.coordinates.BaseCoordinateFrame` subclass instance
An instance of a :class:`~astropy.coordinates.BaseCoordinateFrame`
subclass instance that best matches the specified WCS.
Notes
-----
To extend this function to frames not defined in astropy.coordinates, you
can write your own function which should take a :class:`~astropy.wcs.WCS`
instance and should return either an instance of a frame, or `None` if no
matching frame was found. You can register this function temporarily with::
>>> from astropy.wcs.utils import wcs_to_celestial_frame, custom_wcs_to_frame_mappings
>>> with custom_wcs_to_frame_mappings(my_function):
... wcs_to_celestial_frame(...)
"""
for mapping_set in WCS_FRAME_MAPPINGS:
for func in mapping_set:
frame = func(wcs)
if frame is not None:
return frame
raise ValueError(
"Could not determine celestial frame corresponding to the specified WCS object"
)
| wcs_to_celestial_frame |
astropy | 83 | astropy/utils/diff.py | def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array-like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of array
Indices where the two arrays differ.
"""
| /usr/src/app/target_test_cases/failed_tests_where_not_allclose.txt | def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array-like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of array
Indices where the two arrays differ.
"""
# Create fixed mask arrays to handle INF and NaN; currently INF and NaN
# are handled as equivalent
if not np.all(np.isfinite(a)):
a = np.ma.fix_invalid(a).data
if not np.all(np.isfinite(b)):
b = np.ma.fix_invalid(b).data
if atol == 0.0 and rtol == 0.0:
# Use a faster comparison for the most simple (and common) case
return np.where(a != b)
return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
| where_not_allclose |
astropy | 84 | astropy/io/votable/table.py | def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or :term:`file-like (writeable)`
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
| /usr/src/app/target_test_cases/failed_tests_writeto.txt | def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or :term:`file-like (writeable)`
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance"
)
table.to_xml(
file, tabledata_format=tabledata_format, _debug_python_based_parser=True
)
| writeto |
flask | 0 | src/flask/sansio/app.py | def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
"""Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
"""
| /usr/src/app/target_test_cases/failed_tests_app.App.handle_url_build_error.txt | def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
"""Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
"""
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
except BuildError as e:
# make error available outside except block
error = e
else:
if rv is not None:
return rv
# Re-raise if called with an active exception, otherwise raise
# the passed in exception.
if error is sys.exc_info()[1]:
raise
raise error
| app.App.handle_url_build_error |
flask | 1 | src/flask/sansio/app.py | def trap_http_exception(self, e: Exception) -> bool:
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
| /usr/src/app/target_test_cases/failed_tests_app.App.trap_http_exception.txt | def trap_http_exception(self, e: Exception) -> bool:
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionchanged:: 1.0
Bad request errors are not trapped by default in debug mode.
.. versionadded:: 0.8
"""
if self.config["TRAP_HTTP_EXCEPTIONS"]:
return True
trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"]
# if unset, trap key errors in debug mode
if (
trap_bad_request is None
and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
| app.App.trap_http_exception |
flask | 2 | src/flask/app.py | def app_context(self) -> AppContext:
"""Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
block to push the context, which will make :data:`current_app`
point at this application.
An application context is automatically pushed by
:meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
when handling a request, and when running a CLI command. Use
this to manually create a context outside of these situations.
::
with app.app_context():
init_db()
See :doc:`/appcontext`.
.. versionadded:: 0.9
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.app_context.txt | def app_context(self) -> AppContext:
"""Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
block to push the context, which will make :data:`current_app`
point at this application.
An application context is automatically pushed by
:meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
when handling a request, and when running a CLI command. Use
this to manually create a context outside of these situations.
::
with app.app_context():
init_db()
See :doc:`/appcontext`.
.. versionadded:: 0.9
"""
return AppContext(self)
| app.Flask.app_context |
flask | 3 | src/flask/app.py | def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.create_url_adapter.txt | def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
"""
if request is not None:
# If subdomain matching is disabled (the default), use the
# default subdomain in all cases. This should be the default
# in Werkzeug but it currently does not have that feature.
if not self.subdomain_matching:
subdomain = self.url_map.default_subdomain or None
else:
subdomain = None
return self.url_map.bind_to_environ(
request.environ,
server_name=self.config["SERVER_NAME"],
subdomain=subdomain,
)
# We need at the very least the server name to be set for this
# to work.
if self.config["SERVER_NAME"] is not None:
return self.url_map.bind(
self.config["SERVER_NAME"],
script_name=self.config["APPLICATION_ROOT"],
url_scheme=self.config["PREFERRED_URL_SCHEME"],
)
return None
| app.Flask.create_url_adapter |
flask | 4 | src/flask/app.py | def do_teardown_appcontext(
self,
exc: BaseException | None = _sentinel, # type: ignore[assignment]
) -> None:
"""Called right before the application context is popped.
When handling a request, the application context is popped
after the request context. See :meth:`do_teardown_request`.
This calls all functions decorated with
:meth:`teardown_appcontext`. Then the
:data:`appcontext_tearing_down` signal is sent.
This is called by
:meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
.. versionadded:: 0.9
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.do_teardown_appcontext.txt | def do_teardown_appcontext(
self,
exc: BaseException | None = _sentinel, # type: ignore[assignment]
) -> None:
"""Called right before the application context is popped.
When handling a request, the application context is popped
after the request context. See :meth:`do_teardown_request`.
This calls all functions decorated with
:meth:`teardown_appcontext`. Then the
:data:`appcontext_tearing_down` signal is sent.
This is called by
:meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
self.ensure_sync(func)(exc)
appcontext_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
| app.Flask.do_teardown_appcontext |
flask | 5 | src/flask/app.py | def do_teardown_request(
self,
exc: BaseException | None = _sentinel, # type: ignore[assignment]
) -> None:
"""Called after the request is dispatched and the response is
returned, right before the request context is popped.
This calls all functions decorated with
:meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
if a blueprint handled the request. Finally, the
:data:`request_tearing_down` signal is sent.
This is called by
:meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
which may be delayed during testing to maintain access to
resources.
:param exc: An unhandled exception raised while dispatching the
request. Detected from the current exception information if
not passed. Passed to each teardown function.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.do_teardown_request.txt | def do_teardown_request(
self,
exc: BaseException | None = _sentinel, # type: ignore[assignment]
) -> None:
"""Called after the request is dispatched and the response is
returned, right before the request context is popped.
This calls all functions decorated with
:meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
if a blueprint handled the request. Finally, the
:data:`request_tearing_down` signal is sent.
This is called by
:meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
which may be delayed during testing to maintain access to
resources.
:param exc: An unhandled exception raised while dispatching the
request. Detected from the current exception information if
not passed. Passed to each teardown function.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for name in chain(request.blueprints, (None,)):
if name in self.teardown_request_funcs:
for func in reversed(self.teardown_request_funcs[name]):
self.ensure_sync(func)(exc)
request_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
| app.Flask.do_teardown_request |
flask | 6 | src/flask/app.py | def get_send_file_max_age(self, filename: str | None) -> int | None:
"""Used by :func:`send_file` to determine the ``max_age`` cache
value for a given file path if it wasn't passed.
By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
the configuration of :data:`~flask.current_app`. This defaults
to ``None``, which tells the browser to use conditional requests
instead of a timed cache, which is usually preferable.
Note this is a duplicate of the same method in the Flask
class.
.. versionchanged:: 2.0
The default configuration is ``None`` instead of 12 hours.
.. versionadded:: 0.9
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.get_send_file_max_age.txt | def get_send_file_max_age(self, filename: str | None) -> int | None:
"""Used by :func:`send_file` to determine the ``max_age`` cache
value for a given file path if it wasn't passed.
By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
the configuration of :data:`~flask.current_app`. This defaults
to ``None``, which tells the browser to use conditional requests
instead of a timed cache, which is usually preferable.
Note this is a duplicate of the same method in the Flask
class.
.. versionchanged:: 2.0
The default configuration is ``None`` instead of 12 hours.
.. versionadded:: 0.9
"""
value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"]
if value is None:
return None
if isinstance(value, timedelta):
return int(value.total_seconds())
return value # type: ignore[no-any-return]
| app.Flask.get_send_file_max_age |
flask | 7 | src/flask/app.py | def make_response(self, rv: ft.ResponseReturnValue) -> Response:
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str``
A response object is created with the string encoded to UTF-8
as the body.
``bytes``
A response object is created with the bytes as the body.
``dict``
A dictionary that will be jsonify'd before being returned.
``list``
A list that will be jsonify'd before being returned.
``generator`` or ``iterator``
A generator that returns ``str`` or ``bytes`` to be
streamed as the response.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 2.2
A generator will be converted to a streaming response.
A list will be converted to a JSON response.
.. versionchanged:: 1.1
A dict will be converted to a JSON response.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.make_response.txt | def make_response(self, rv: ft.ResponseReturnValue) -> Response:
"""Convert the return value from a view function to an instance of
:attr:`response_class`.
:param rv: the return value from the view function. The view function
must return a response. Returning ``None``, or the view ending
without returning, is not allowed. The following types are allowed
for ``view_rv``:
``str``
A response object is created with the string encoded to UTF-8
as the body.
``bytes``
A response object is created with the bytes as the body.
``dict``
A dictionary that will be jsonify'd before being returned.
``list``
A list that will be jsonify'd before being returned.
``generator`` or ``iterator``
A generator that returns ``str`` or ``bytes`` to be
streamed as the response.
``tuple``
Either ``(body, status, headers)``, ``(body, status)``, or
``(body, headers)``, where ``body`` is any of the other types
allowed here, ``status`` is a string or an integer, and
``headers`` is a dictionary or a list of ``(key, value)``
tuples. If ``body`` is a :attr:`response_class` instance,
``status`` overwrites the exiting value and ``headers`` are
extended.
:attr:`response_class`
The object is returned unchanged.
other :class:`~werkzeug.wrappers.Response` class
The object is coerced to :attr:`response_class`.
:func:`callable`
The function is called as a WSGI application. The result is
used to create a response object.
.. versionchanged:: 2.2
A generator will be converted to a streaming response.
A list will be converted to a JSON response.
.. versionchanged:: 1.1
A dict will be converted to a JSON response.
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv # type: ignore[misc]
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv
else:
rv, status = rv # type: ignore[assignment,misc]
# other sized tuples are not allowed
else:
raise TypeError(
"The view function did not return a valid response tuple."
" The tuple must have the form (body, status, headers),"
" (body, status), or (body, headers)."
)
# the body must not be None
if rv is None:
raise TypeError(
f"The view function for {request.endpoint!r} did not"
" return a valid response. The function either returned"
" None or ended without a return statement."
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, cabc.Iterator):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(
rv,
status=status,
headers=headers, # type: ignore[arg-type]
)
status = headers = None
elif isinstance(rv, (dict, list)):
rv = self.json.response(rv)
elif isinstance(rv, BaseResponse) or callable(rv):
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(
rv, # type: ignore[arg-type]
request.environ,
)
except TypeError as e:
raise TypeError(
f"{e}\nThe view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it"
f" was a {type(rv).__name__}."
).with_traceback(sys.exc_info()[2]) from None
else:
raise TypeError(
"The view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it was a"
f" {type(rv).__name__}."
)
rv = t.cast(Response, rv)
# prefer the status if it was provided
if status is not None:
if isinstance(status, (str, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.update(headers) # type: ignore[arg-type]
return rv
| app.Flask.make_response |
flask | 8 | src/flask/app.py | def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = None
) -> t.IO[t.AnyStr]:
"""Open a resource file relative to :attr:`root_path` for reading.
For example, if the file ``schema.sql`` is next to the file
``app.py`` where the ``Flask`` app is defined, it can be opened
with:
.. code-block:: python
with app.open_resource("schema.sql") as f:
conn.executescript(f.read())
:param resource: Path to the resource relative to :attr:`root_path`.
:param mode: Open the file in this mode. Only reading is supported,
valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
:param encoding: Open the file with this encoding when opening in text
mode. This is ignored when opening in binary mode.
.. versionchanged:: 3.1
Added the ``encoding`` parameter.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.open_resource.txt | def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = None
) -> t.IO[t.AnyStr]:
"""Open a resource file relative to :attr:`root_path` for reading.
For example, if the file ``schema.sql`` is next to the file
``app.py`` where the ``Flask`` app is defined, it can be opened
with:
.. code-block:: python
with app.open_resource("schema.sql") as f:
conn.executescript(f.read())
:param resource: Path to the resource relative to :attr:`root_path`.
:param mode: Open the file in this mode. Only reading is supported,
valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
:param encoding: Open the file with this encoding when opening in text
mode. This is ignored when opening in binary mode.
.. versionchanged:: 3.1
Added the ``encoding`` parameter.
"""
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
path = os.path.join(self.root_path, resource)
if mode == "rb":
return open(path, mode)
return open(path, mode, encoding=encoding)
| app.Flask.open_resource |
flask | 9 | src/flask/app.py | def request_context(self, environ: WSGIEnvironment) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` representing a
WSGI environment. Use a ``with`` block to push the context,
which will make :data:`request` point at this request.
See :doc:`/reqcontext`.
Typically you should not call this from your own code. A request
context is automatically pushed by the :meth:`wsgi_app` when
handling a request. Use :meth:`test_request_context` to create
an environment and context instead of this method.
:param environ: a WSGI environment
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.request_context.txt | def request_context(self, environ: WSGIEnvironment) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` representing a
WSGI environment. Use a ``with`` block to push the context,
which will make :data:`request` point at this request.
See :doc:`/reqcontext`.
Typically you should not call this from your own code. A request
context is automatically pushed by the :meth:`wsgi_app` when
handling a request. Use :meth:`test_request_context` to create
an environment and context instead of this method.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
| app.Flask.request_context |
flask | 10 | src/flask/app.py | def run(
self,
host: str | None = None,
port: int | None = None,
debug: bool | None = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :doc:`/deploying/index` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.run.txt | def run(
self,
host: str | None = None,
port: int | None = None,
debug: bool | None = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :doc:`/deploying/index` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
if present.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if present.
:param debug: if given, enable or disable debug mode. See
:attr:`debug`.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param options: the options to be forwarded to the underlying Werkzeug
server. See :func:`werkzeug.serving.run_simple` for more
information.
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment
variables from :file:`.env` and :file:`.flaskenv` files.
The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
Threaded mode is enabled by default.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME``
variable.
"""
# Ignore this call so that it doesn't start another server if
# the 'flask run' command is used.
if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
if not is_running_from_reloader():
click.secho(
" * Ignoring a call to 'app.run()' that would block"
" the current 'flask' CLI command.\n"
" Only call 'app.run()' in an 'if __name__ =="
' "__main__"\' guard.',
fg="red",
)
return
if get_load_dotenv(load_dotenv):
cli.load_dotenv()
# if set, env var overrides existing value
if "FLASK_DEBUG" in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
server_name = self.config.get("SERVER_NAME")
sn_host = sn_port = None
if server_name:
sn_host, _, sn_port = server_name.partition(":")
if not host:
if sn_host:
host = sn_host
else:
host = "127.0.0.1"
if port or port == 0:
port = int(port)
elif sn_port:
port = int(sn_port)
else:
port = 5000
options.setdefault("use_reloader", self.debug)
options.setdefault("use_debugger", self.debug)
options.setdefault("threaded", True)
cli.show_server_banner(self.debug, self.name)
from werkzeug.serving import run_simple
try:
run_simple(t.cast(str, host), port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
| app.Flask.run |
flask | 11 | src/flask/app.py | def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient:
"""Creates a test client for this application. For information
about unit testing head over to :doc:`/testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, *args, **kwargs):
self._authentication = kwargs.pop("authentication")
super(CustomClient,self).__init__( *args, **kwargs)
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.test_client.txt | def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient:
"""Creates a test client for this application. For information
about unit testing head over to :doc:`/testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, *args, **kwargs):
self._authentication = kwargs.pop("authentication")
super(CustomClient,self).__init__( *args, **kwargs)
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from .testing import FlaskClient as cls
return cls( # type: ignore
self, self.response_class, use_cookies=use_cookies, **kwargs
)
| app.Flask.test_client |
flask | 12 | src/flask/app.py | def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` for a WSGI
environment created from the given values. This is mostly useful
during testing, where you may want to run a function that uses
request data without dispatching a full request.
See :doc:`/reqcontext`.
Use a ``with`` block to push the context, which will make
:data:`request` point at the request for the created
environment. ::
with app.test_request_context(...):
generate_report()
When using the shell, it may be easier to push and pop the
context manually to avoid indentation. ::
ctx = app.test_request_context(...)
ctx.push()
...
ctx.pop()
Takes the same arguments as Werkzeug's
:class:`~werkzeug.test.EnvironBuilder`, with some defaults from
the application. See the linked Werkzeug docs for most of the
available arguments. Flask-specific behavior is listed here.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to
:data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param data: The request body, either as a string or a dict of
form keys and values.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.test_request_context.txt | def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:
"""Create a :class:`~flask.ctx.RequestContext` for a WSGI
environment created from the given values. This is mostly useful
during testing, where you may want to run a function that uses
request data without dispatching a full request.
See :doc:`/reqcontext`.
Use a ``with`` block to push the context, which will make
:data:`request` point at the request for the created
environment. ::
with app.test_request_context(...):
generate_report()
When using the shell, it may be easier to push and pop the
context manually to avoid indentation. ::
ctx = app.test_request_context(...)
ctx.push()
...
ctx.pop()
Takes the same arguments as Werkzeug's
:class:`~werkzeug.test.EnvironBuilder`, with some defaults from
the application. See the linked Werkzeug docs for most of the
available arguments. Flask-specific behavior is listed here.
:param path: URL path being requested.
:param base_url: Base URL where the app is being served, which
``path`` is relative to. If not given, built from
:data:`PREFERRED_URL_SCHEME`, ``subdomain``,
:data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
:param subdomain: Subdomain name to append to
:data:`SERVER_NAME`.
:param url_scheme: Scheme to use instead of
:data:`PREFERRED_URL_SCHEME`.
:param data: The request body, either as a string or a dict of
form keys and values.
:param json: If given, this is serialized as JSON and passed as
``data``. Also defaults ``content_type`` to
``application/json``.
:param args: other positional arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
:param kwargs: other keyword arguments passed to
:class:`~werkzeug.test.EnvironBuilder`.
"""
from .testing import EnvironBuilder
builder = EnvironBuilder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
| app.Flask.test_request_context |
flask | 13 | src/flask/app.py | def url_for(
self,
/,
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This is called by :func:`flask.url_for`, and can be called
directly as well.
An *endpoint* is the name of a URL rule, usually added with
:meth:`@app.route() <route>`, and usually the same name as the
view function. A route defined in a :class:`~flask.Blueprint`
will prepend the blueprint's name separated by a ``.`` to the
endpoint.
In some cases, such as email messages, you want URLs to include
the scheme and domain, like ``https://example.com/hello``. When
not in an active request, URLs will be external by default, but
this requires setting :data:`SERVER_NAME` so Flask knows what
domain to use. :data:`APPLICATION_ROOT` and
:data:`PREFERRED_URL_SCHEME` should also be configured as
needed. This config is only used when not in an active request.
Functions can be decorated with :meth:`url_defaults` to modify
keyword arguments before the URL is built.
If building fails for some reason, such as an unknown endpoint
or incorrect values, the app's :meth:`handle_url_build_error`
method is called. If that returns a string, that is returned,
otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it
is external.
:param _external: If given, prefer the URL to be internal
(False) or require it to be external (True). External URLs
include the scheme and domain. When not in an active
request, URLs are external by default.
:param values: Values to use for the variable parts of the URL
rule. Unknown keys are appended as query string arguments,
like ``?a=b&c=d``.
.. versionadded:: 2.2
Moved from ``flask.url_for``, which calls this method.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.url_for.txt | def url_for(
self,
/,
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
"""Generate a URL to the given endpoint with the given values.
This is called by :func:`flask.url_for`, and can be called
directly as well.
An *endpoint* is the name of a URL rule, usually added with
:meth:`@app.route() <route>`, and usually the same name as the
view function. A route defined in a :class:`~flask.Blueprint`
will prepend the blueprint's name separated by a ``.`` to the
endpoint.
In some cases, such as email messages, you want URLs to include
the scheme and domain, like ``https://example.com/hello``. When
not in an active request, URLs will be external by default, but
this requires setting :data:`SERVER_NAME` so Flask knows what
domain to use. :data:`APPLICATION_ROOT` and
:data:`PREFERRED_URL_SCHEME` should also be configured as
needed. This config is only used when not in an active request.
Functions can be decorated with :meth:`url_defaults` to modify
keyword arguments before the URL is built.
If building fails for some reason, such as an unknown endpoint
or incorrect values, the app's :meth:`handle_url_build_error`
method is called. If that returns a string, that is returned,
otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
:param endpoint: The endpoint name associated with the URL to
generate. If this starts with a ``.``, the current blueprint
name (if any) will be used.
:param _anchor: If given, append this as ``#anchor`` to the URL.
:param _method: If given, generate the URL associated with this
method for the endpoint.
:param _scheme: If given, the URL will have this scheme if it
is external.
:param _external: If given, prefer the URL to be internal
(False) or require it to be external (True). External URLs
include the scheme and domain. When not in an active
request, URLs are external by default.
:param values: Values to use for the variable parts of the URL
rule. Unknown keys are appended as query string arguments,
like ``?a=b&c=d``.
.. versionadded:: 2.2
Moved from ``flask.url_for``, which calls this method.
"""
req_ctx = _cv_request.get(None)
if req_ctx is not None:
url_adapter = req_ctx.url_adapter
blueprint_name = req_ctx.request.blueprint
# If the endpoint starts with "." and the request matches a
# blueprint, the endpoint is relative to the blueprint.
if endpoint[:1] == ".":
if blueprint_name is not None:
endpoint = f"{blueprint_name}{endpoint}"
else:
endpoint = endpoint[1:]
# When in a request, generate a URL without scheme and
# domain by default, unless a scheme is given.
if _external is None:
_external = _scheme is not None
else:
app_ctx = _cv_app.get(None)
# If called by helpers.url_for, an app context is active,
# use its url_adapter. Otherwise, app.url_for was called
# directly, build an adapter.
if app_ctx is not None:
url_adapter = app_ctx.url_adapter
else:
url_adapter = self.create_url_adapter(None)
if url_adapter is None:
raise RuntimeError(
"Unable to build URLs outside an active request"
" without 'SERVER_NAME' configured. Also configure"
" 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as"
" needed."
)
# When outside a request, generate a URL with scheme and
# domain by default.
if _external is None:
_external = True
# It is an error to set _scheme when _external=False, in order
# to avoid accidental insecure URLs.
if _scheme is not None and not _external:
raise ValueError("When specifying '_scheme', '_external' must be True.")
self.inject_url_defaults(endpoint, values)
try:
rv = url_adapter.build( # type: ignore[union-attr]
endpoint,
values,
method=_method,
url_scheme=_scheme,
force_external=_external,
)
except BuildError as error:
values.update(
_anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external
)
return self.handle_url_build_error(error, endpoint, values)
if _anchor is not None:
_anchor = _url_quote(_anchor, safe="%!#$&'()*+,/:;=?@")
rv = f"{rv}#{_anchor}"
return rv
| app.Flask.url_for |
flask | 14 | src/flask/app.py | def wsgi_app(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
See :ref:`callbacks-and-errors`.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
| /usr/src/app/target_test_cases/failed_tests_app.Flask.wsgi_app.txt | def wsgi_app(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
See :ref:`callbacks-and-errors`.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
ctx = self.request_context(environ)
error: BaseException | None = None
try:
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
except: # noqa: B001
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if "werkzeug.debug.preserve_context" in environ:
environ["werkzeug.debug.preserve_context"](_cv_app.get())
environ["werkzeug.debug.preserve_context"](_cv_request.get())
if error is not None and self.should_ignore_error(error):
error = None
ctx.pop(error)
| app.Flask.wsgi_app |
flask | 15 | src/flask/sansio/app.py | def logger(self) -> logging.Logger:
"""A standard Python :class:`~logging.Logger` for the app, with
the same name as :attr:`name`.
In debug mode, the logger's :attr:`~logging.Logger.level` will
be set to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be
added. See :doc:`/logging` for more information.
.. versionchanged:: 1.1.0
The logger takes the same name as :attr:`name` rather than
hard-coding ``"flask.app"``.
.. versionchanged:: 1.0.0
Behavior was simplified. The logger is always named
``"flask.app"``. The level is only set during configuration,
it doesn't check ``app.debug`` each time. Only one format is
used, not different ones depending on ``app.debug``. No
handlers are removed, and a handler is only added if no
handlers are already configured.
.. versionadded:: 0.3
"""
| /usr/src/app/target_test_cases/failed_tests_app.logger.txt | def logger(self) -> logging.Logger:
"""A standard Python :class:`~logging.Logger` for the app, with
the same name as :attr:`name`.
In debug mode, the logger's :attr:`~logging.Logger.level` will
be set to :data:`~logging.DEBUG`.
If there are no handlers configured, a default handler will be
added. See :doc:`/logging` for more information.
.. versionchanged:: 1.1.0
The logger takes the same name as :attr:`name` rather than
hard-coding ``"flask.app"``.
.. versionchanged:: 1.0.0
Behavior was simplified. The logger is always named
``"flask.app"``. The level is only set during configuration,
it doesn't check ``app.debug`` each time. Only one format is
used, not different ones depending on ``app.debug``. No
handlers are removed, and a handler is only added if no
handlers are already configured.
.. versionadded:: 0.3
"""
return create_logger(self)
| app.logger |
flask | 16 | src/flask/sansio/app.py | def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on the application. Keyword
arguments passed to this method will override the defaults set on the
blueprint.
Calls the blueprint's :meth:`~flask.Blueprint.register` method after
recording the blueprint in the application's :attr:`blueprints`.
:param blueprint: The blueprint to register.
:param url_prefix: Blueprint routes will be prefixed with this.
:param subdomain: Blueprint routes will match on this subdomain.
:param url_defaults: Blueprint routes will use these default values for
view arguments.
:param options: Additional keyword arguments are passed to
:class:`~flask.blueprints.BlueprintSetupState`. They can be
accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 0.7
"""
| /usr/src/app/target_test_cases/failed_tests_app.register_blueprint.txt | def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on the application. Keyword
arguments passed to this method will override the defaults set on the
blueprint.
Calls the blueprint's :meth:`~flask.Blueprint.register` method after
recording the blueprint in the application's :attr:`blueprints`.
:param blueprint: The blueprint to register.
:param url_prefix: Blueprint routes will be prefixed with this.
:param subdomain: Blueprint routes will match on this subdomain.
:param url_defaults: Blueprint routes will use these default values for
view arguments.
:param options: Additional keyword arguments are passed to
:class:`~flask.blueprints.BlueprintSetupState`. They can be
accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 0.7
"""
blueprint.register(self, options)
| app.register_blueprint |
flask | 17 | src/flask/sansio/app.py | def teardown_appcontext(self, f: T_teardown) -> T_teardown:
"""Registers a function to be called when the application
context is popped. The application context is typically popped
after the request context for each request, at the end of CLI
commands, or after a manually pushed context ends.
.. code-block:: python
with app.app_context():
...
When the ``with`` block exits (or ``ctx.pop()`` is called), the
teardown functions are called just before the app context is
made inactive. Since a request context typically also manages an
application context it would also be called when you pop a
request context.
When a teardown function was called because of an unhandled
exception it will be passed an error object. If an
:meth:`errorhandler` is registered, it will handle the exception
and the teardown will not receive it.
Teardown functions must avoid raising exceptions. If they
execute code that might fail they must surround that code with a
``try``/``except`` block and log any errors.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
| /usr/src/app/target_test_cases/failed_tests_app.teardown_appcontext.txt | def teardown_appcontext(self, f: T_teardown) -> T_teardown:
"""Registers a function to be called when the application
context is popped. The application context is typically popped
after the request context for each request, at the end of CLI
commands, or after a manually pushed context ends.
.. code-block:: python
with app.app_context():
...
When the ``with`` block exits (or ``ctx.pop()`` is called), the
teardown functions are called just before the app context is
made inactive. Since a request context typically also manages an
application context it would also be called when you pop a
request context.
When a teardown function was called because of an unhandled
exception it will be passed an error object. If an
:meth:`errorhandler` is registered, it will handle the exception
and the teardown will not receive it.
Teardown functions must avoid raising exceptions. If they
execute code that might fail they must surround that code with a
``try``/``except`` block and log any errors.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
| app.teardown_appcontext |
flask | 18 | src/flask/sansio/app.py | def template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]:
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
| /usr/src/app/target_test_cases/failed_tests_app.template_global.txt | def template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]:
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f: T_template_global) -> T_template_global:
self.add_template_global(f, name=name)
return f
return decorator
| app.template_global |
flask | 19 | src/flask/sansio/app.py | def template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]:
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
| /usr/src/app/target_test_cases/failed_tests_app.template_test.txt | def template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]:
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f: T_template_test) -> T_template_test:
self.add_template_test(f, name=name)
return f
return decorator
| app.template_test |
flask | 20 | src/flask/sansio/blueprints.py | def register(self, app: App, options: dict[str, t.Any]) -> None:
"""Called by :meth:`Flask.register_blueprint` to register all
views and callbacks registered on the blueprint with the
application. Creates a :class:`.BlueprintSetupState` and calls
each :meth:`record` callback with it.
:param app: The application this blueprint is being registered
with.
:param options: Keyword arguments forwarded from
:meth:`~Flask.register_blueprint`.
.. versionchanged:: 2.3
Nested blueprints now correctly apply subdomains.
.. versionchanged:: 2.1
Registering the same blueprint with the same name multiple
times is an error.
.. versionchanged:: 2.0.1
Nested blueprints are registered with their dotted name.
This allows different blueprints with the same name to be
nested at different locations.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
"""
| /usr/src/app/target_test_cases/failed_tests_blueprints.Blueprint.register.txt | def register(self, app: App, options: dict[str, t.Any]) -> None:
"""Called by :meth:`Flask.register_blueprint` to register all
views and callbacks registered on the blueprint with the
application. Creates a :class:`.BlueprintSetupState` and calls
each :meth:`record` callback with it.
:param app: The application this blueprint is being registered
with.
:param options: Keyword arguments forwarded from
:meth:`~Flask.register_blueprint`.
.. versionchanged:: 2.3
Nested blueprints now correctly apply subdomains.
.. versionchanged:: 2.1
Registering the same blueprint with the same name multiple
times is an error.
.. versionchanged:: 2.0.1
Nested blueprints are registered with their dotted name.
This allows different blueprints with the same name to be
nested at different locations.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
"""
name_prefix = options.get("name_prefix", "")
self_name = options.get("name", self.name)
name = f"{name_prefix}.{self_name}".lstrip(".")
if name in app.blueprints:
bp_desc = "this" if app.blueprints[name] is self else "a different"
existing_at = f" '{name}'" if self_name != name else ""
raise ValueError(
f"The name '{self_name}' is already registered for"
f" {bp_desc} blueprint{existing_at}. Use 'name=' to"
f" provide a unique name."
)
first_bp_registration = not any(bp is self for bp in app.blueprints.values())
first_name_registration = name not in app.blueprints
app.blueprints[name] = self
self._got_registered_once = True
state = self.make_setup_state(app, options, first_bp_registration)
if self.has_static_folder:
state.add_url_rule(
f"{self.static_url_path}/<path:filename>",
view_func=self.send_static_file, # type: ignore[attr-defined]
endpoint="static",
)
# Merge blueprint data into parent.
if first_bp_registration or first_name_registration:
self._merge_blueprint_funcs(app, name)
for deferred in self.deferred_functions:
deferred(state)
cli_resolved_group = options.get("cli_group", self.cli_group)
if self.cli.commands:
if cli_resolved_group is None:
app.cli.commands.update(self.cli.commands)
elif cli_resolved_group is _sentinel:
self.cli.name = name
app.cli.add_command(self.cli)
else:
self.cli.name = cli_resolved_group
app.cli.add_command(self.cli)
for blueprint, bp_options in self._blueprints:
bp_options = bp_options.copy()
bp_url_prefix = bp_options.get("url_prefix")
bp_subdomain = bp_options.get("subdomain")
if bp_subdomain is None:
bp_subdomain = blueprint.subdomain
if state.subdomain is not None and bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain + "." + state.subdomain
elif bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain
elif state.subdomain is not None:
bp_options["subdomain"] = state.subdomain
if bp_url_prefix is None:
bp_url_prefix = blueprint.url_prefix
if state.url_prefix is not None and bp_url_prefix is not None:
bp_options["url_prefix"] = (
state.url_prefix.rstrip("/") + "/" + bp_url_prefix.lstrip("/")
)
elif bp_url_prefix is not None:
bp_options["url_prefix"] = bp_url_prefix
elif state.url_prefix is not None:
bp_options["url_prefix"] = state.url_prefix
bp_options["name_prefix"] = name
blueprint.register(app, bp_options)
| blueprints.Blueprint.register |
flask | 21 | src/flask/sansio/blueprints.py | def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on this blueprint. Keyword
arguments passed to this method will override the defaults set
on the blueprint.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 2.0
"""
| /usr/src/app/target_test_cases/failed_tests_blueprints.register_blueprint.txt | def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
"""Register a :class:`~flask.Blueprint` on this blueprint. Keyword
arguments passed to this method will override the defaults set
on the blueprint.
.. versionchanged:: 2.0.1
The ``name`` option can be used to change the (pre-dotted)
name the blueprint is registered with. This allows the same
blueprint to be registered multiple times with unique names
for ``url_for``.
.. versionadded:: 2.0
"""
if blueprint is self:
raise ValueError("Cannot register a blueprint on itself")
self._blueprints.append((blueprint, options))
| blueprints.register_blueprint |
flask | 22 | src/flask/cli.py | def load_dotenv(path: str | os.PathLike[str] | None = None) -> bool:
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 2.0
The current directory is not changed to the location of the
loaded file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
| /usr/src/app/target_test_cases/failed_tests_cli.load_dotenv.txt | def load_dotenv(path: str | os.PathLike[str] | None = None) -> bool:
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 2.0
The current directory is not changed to the location of the
loaded file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
try:
import dotenv
except ImportError:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# Always return after attempting to load a given path, don't load
# the default files.
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
loaded = False
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
dotenv.load_dotenv(path, encoding="utf-8")
loaded = True
return loaded # True if at least one file was located and loaded.
| cli.load_dotenv |