code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
from __future__ import absolute_import, division, print_function
"""
Parameter dictionary are included for xrf fitting.
Element data not included.
Some parameters are defined as
bound_type :
fixed: value is fixed
lohi: with both low and high boundary
lo: with low boundary
hi: with high boundary
none: no fitting boundary
Different fitting strategies are included to turn on or turn off some
parameters. Those strategies are default, linear, free_energy, free_all and
e_calibration. They are empirical experience from authors of the original code.
"""
# old param dict, keep it here for now.
para_dict = {
"coherent_sct_amplitude": {"bound_type": "none", "min": 7.0, "max": 8.0, "value": 6.0},
"coherent_sct_energy": {"bound_type": "none", "min": 10.4, "max": 12.4, "value": 11.8},
"compton_amplitude": {"bound_type": "none", "min": 0.0, "max": 10.0, "value": 5.0},
"compton_angle": {"bound_type": "lohi", "min": 75.0, "max": 90.0, "value": 90.0},
"compton_f_step": {"bound_type": "lohi", "min": 0.0, "max": 1.5, "value": 0.1},
"compton_f_tail": {"bound_type": "lohi", "min": 0.0, "max": 3.0, "value": 0.8},
"compton_fwhm_corr": {"bound_type": "lohi", "min": 0.1, "max": 3.0, "value": 1.4},
"compton_gamma": {"bound_type": "none", "min": 0.1, "max": 10.0, "value": 1.0},
"compton_hi_f_tail": {"bound_type": "none", "min": 1e-06, "max": 1.0, "value": 0.01},
"compton_hi_gamma": {"bound_type": "none", "min": 0.1, "max": 3.0, "value": 1.0},
"e_linear": {"bound_type": "fixed", "min": 0.001, "max": 0.1, "value": 1.0},
"e_offset": {"bound_type": "fixed", "min": -0.2, "max": 0.2, "value": 0.0},
"e_quadratic": {"bound_type": "none", "min": -0.0001, "max": 0.0001, "value": 0.0},
"f_step_linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"f_step_offset": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"f_step_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"f_tail_linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.01},
"f_tail_offset": {"bound_type": "none", "min": 0.0, "max": 0.1, "value": 0.04},
"f_tail_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.01, "value": 0.0},
"fwhm_fanoprime": {"bound_type": "lohi", "min": 1e-06, "max": 0.05, "value": 0.00012},
"fwhm_offset": {"bound_type": "lohi", "min": 0.005, "max": 0.5, "value": 0.12},
"gamma_linear": {"bound_type": "none", "min": 0.0, "max": 3.0, "value": 0.0},
"gamma_offset": {"bound_type": "none", "min": 0.1, "max": 10.0, "value": 2.0},
"gamma_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"ge_escape": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"kb_f_tail_linear": {"bound_type": "none", "min": 0.0, "max": 0.02, "value": 0.0},
"kb_f_tail_offset": {"bound_type": "none", "min": 0.0, "max": 0.2, "value": 0.0},
"kb_f_tail_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"pileup0": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup1": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup2": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup3": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup4": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup5": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup6": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup7": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup8": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"si_escape": {"bound_type": "none", "min": 0.0, "max": 0.5, "value": 0.0},
"snip_width": {"bound_type": "none", "min": 0.1, "max": 2.82842712475, "value": 0.15},
}
# fitting strategies
adjust_element = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "lohi",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "fixed",
"e_offset": "fixed",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
e_calibration = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "fixed",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
linear = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "fixed",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "fixed",
"e_offset": "fixed",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
free_more = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "lohi",
"compton_amplitude": "none",
"compton_angle": "lohi",
"compton_f_step": "lohi",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "lohi",
"compton_gamma": "lohi",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "lohi",
"fwhm_fanoprime": "lohi",
"fwhm_offset": "lohi",
"non_fitting_values": "fixed",
}
fit_with_tail = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "lohi",
"compton_amplitude": "none",
"compton_angle": "lohi",
"compton_f_step": "fixed",
"compton_f_tail": "lohi",
"compton_fwhm_corr": "lohi",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "lohi",
"fwhm_fanoprime": "lohi",
"fwhm_offset": "lohi",
"non_fitting_values": "fixed",
}
default_param = {
"coherent_sct_amplitude": {"bound_type": "none", "max": 10000000.0, "min": 0.10, "value": 100000},
"coherent_sct_energy": {
"bound_type": "lohi",
"description": "Incident E [keV]",
"max": 13.0,
"min": 9.0,
"value": 10.0,
},
"compton_amplitude": {"bound_type": "none", "max": 10000000.0, "min": 0.10, "value": 100000.0},
"compton_angle": {"bound_type": "lohi", "max": 100.0, "min": 80.0, "value": 90.0},
"compton_f_step": {"bound_type": "fixed", "max": 0.01, "min": 0.0, "value": 0.01},
"compton_f_tail": {"bound_type": "fixed", "max": 0.3, "min": 0.0001, "value": 0.05},
"compton_fwhm_corr": {
"bound_type": "lohi",
"description": "fwhm Coef, Compton",
"max": 2.5,
"min": 0.5,
"value": 1.5,
},
"compton_gamma": {"bound_type": "lohi", "max": 4.2, "min": 3.8, "value": 4.0},
"compton_hi_f_tail": {"bound_type": "fixed", "max": 1.0, "min": 1e-06, "value": 0.1},
"compton_hi_gamma": {"bound_type": "fixed", "max": 3.0, "min": 0.1, "value": 2.0},
"e_linear": {
"bound_type": "lohi",
"description": "E Calib. Coef, a1",
"max": 0.011,
"min": 0.009,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.01,
},
"e_offset": {
"bound_type": "lohi",
"description": "E Calib. Coef, a0",
"max": 0.015,
"min": -0.01,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.0,
},
"e_quadratic": {
"bound_type": "lohi",
"description": "E Calib. Coef, a2",
"max": 1e-06,
"min": -1e-06,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.0,
},
"fwhm_fanoprime": {
"bound_type": "fixed",
"description": "fwhm Coef, b2",
"max": 0.0001,
"min": 1e-07,
"value": 1e-06,
},
"fwhm_offset": {
"bound_type": "lohi",
"description": "fwhm Coef, b1 [keV]",
"max": 0.19,
"min": 0.16,
"tool_tip": "width**2 = (b1/2.3548)**2 + 3.85*b2*E",
"value": 0.178,
},
"non_fitting_values": {
"element_list": ["Ar", "Fe", "Ce_L", "Pt_M"],
"energy_bound_low": {"value": 1.5, "default_value": 1.5, "description": "E low [keV]"},
"energy_bound_high": {"value": 13.5, "default_value": 13.5, "description": "E high [keV]"},
"epsilon": 3.51, # electron hole energy
"background_width": 0.5,
},
}
def get_para():
"""More to be added here.
The para_dict will be updated
based on different algorithms.
Use copy for dict.
"""
return default_param | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/fitting/base/parameter_data.py | parameter_data.py | from __future__ import absolute_import, division, print_function
"""
Parameter dictionary are included for xrf fitting.
Element data not included.
Some parameters are defined as
bound_type :
fixed: value is fixed
lohi: with both low and high boundary
lo: with low boundary
hi: with high boundary
none: no fitting boundary
Different fitting strategies are included to turn on or turn off some
parameters. Those strategies are default, linear, free_energy, free_all and
e_calibration. They are empirical experience from authors of the original code.
"""
# old param dict, keep it here for now.
para_dict = {
"coherent_sct_amplitude": {"bound_type": "none", "min": 7.0, "max": 8.0, "value": 6.0},
"coherent_sct_energy": {"bound_type": "none", "min": 10.4, "max": 12.4, "value": 11.8},
"compton_amplitude": {"bound_type": "none", "min": 0.0, "max": 10.0, "value": 5.0},
"compton_angle": {"bound_type": "lohi", "min": 75.0, "max": 90.0, "value": 90.0},
"compton_f_step": {"bound_type": "lohi", "min": 0.0, "max": 1.5, "value": 0.1},
"compton_f_tail": {"bound_type": "lohi", "min": 0.0, "max": 3.0, "value": 0.8},
"compton_fwhm_corr": {"bound_type": "lohi", "min": 0.1, "max": 3.0, "value": 1.4},
"compton_gamma": {"bound_type": "none", "min": 0.1, "max": 10.0, "value": 1.0},
"compton_hi_f_tail": {"bound_type": "none", "min": 1e-06, "max": 1.0, "value": 0.01},
"compton_hi_gamma": {"bound_type": "none", "min": 0.1, "max": 3.0, "value": 1.0},
"e_linear": {"bound_type": "fixed", "min": 0.001, "max": 0.1, "value": 1.0},
"e_offset": {"bound_type": "fixed", "min": -0.2, "max": 0.2, "value": 0.0},
"e_quadratic": {"bound_type": "none", "min": -0.0001, "max": 0.0001, "value": 0.0},
"f_step_linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"f_step_offset": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"f_step_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"f_tail_linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.01},
"f_tail_offset": {"bound_type": "none", "min": 0.0, "max": 0.1, "value": 0.04},
"f_tail_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.01, "value": 0.0},
"fwhm_fanoprime": {"bound_type": "lohi", "min": 1e-06, "max": 0.05, "value": 0.00012},
"fwhm_offset": {"bound_type": "lohi", "min": 0.005, "max": 0.5, "value": 0.12},
"gamma_linear": {"bound_type": "none", "min": 0.0, "max": 3.0, "value": 0.0},
"gamma_offset": {"bound_type": "none", "min": 0.1, "max": 10.0, "value": 2.0},
"gamma_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"ge_escape": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"kb_f_tail_linear": {"bound_type": "none", "min": 0.0, "max": 0.02, "value": 0.0},
"kb_f_tail_offset": {"bound_type": "none", "min": 0.0, "max": 0.2, "value": 0.0},
"kb_f_tail_quadratic": {"bound_type": "none", "min": 0.0, "max": 0.0, "value": 0.0},
"linear": {"bound_type": "none", "min": 0.0, "max": 1.0, "value": 0.0},
"pileup0": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup1": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup2": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup3": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup4": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup5": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup6": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup7": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"pileup8": {"bound_type": "none", "min": -10.0, "max": 1.10, "value": 1e-10},
"si_escape": {"bound_type": "none", "min": 0.0, "max": 0.5, "value": 0.0},
"snip_width": {"bound_type": "none", "min": 0.1, "max": 2.82842712475, "value": 0.15},
}
# fitting strategies
adjust_element = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "lohi",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "fixed",
"e_offset": "fixed",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
e_calibration = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "fixed",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
linear = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "fixed",
"compton_amplitude": "none",
"compton_angle": "fixed",
"compton_f_step": "fixed",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "fixed",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "fixed",
"e_offset": "fixed",
"e_quadratic": "fixed",
"fwhm_fanoprime": "fixed",
"fwhm_offset": "fixed",
"non_fitting_values": "fixed",
}
free_more = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "lohi",
"compton_amplitude": "none",
"compton_angle": "lohi",
"compton_f_step": "lohi",
"compton_f_tail": "fixed",
"compton_fwhm_corr": "lohi",
"compton_gamma": "lohi",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "lohi",
"fwhm_fanoprime": "lohi",
"fwhm_offset": "lohi",
"non_fitting_values": "fixed",
}
fit_with_tail = {
"coherent_sct_amplitude": "none",
"coherent_sct_energy": "lohi",
"compton_amplitude": "none",
"compton_angle": "lohi",
"compton_f_step": "fixed",
"compton_f_tail": "lohi",
"compton_fwhm_corr": "lohi",
"compton_gamma": "fixed",
"compton_hi_f_tail": "fixed",
"compton_hi_gamma": "fixed",
"e_linear": "lohi",
"e_offset": "lohi",
"e_quadratic": "lohi",
"fwhm_fanoprime": "lohi",
"fwhm_offset": "lohi",
"non_fitting_values": "fixed",
}
default_param = {
"coherent_sct_amplitude": {"bound_type": "none", "max": 10000000.0, "min": 0.10, "value": 100000},
"coherent_sct_energy": {
"bound_type": "lohi",
"description": "Incident E [keV]",
"max": 13.0,
"min": 9.0,
"value": 10.0,
},
"compton_amplitude": {"bound_type": "none", "max": 10000000.0, "min": 0.10, "value": 100000.0},
"compton_angle": {"bound_type": "lohi", "max": 100.0, "min": 80.0, "value": 90.0},
"compton_f_step": {"bound_type": "fixed", "max": 0.01, "min": 0.0, "value": 0.01},
"compton_f_tail": {"bound_type": "fixed", "max": 0.3, "min": 0.0001, "value": 0.05},
"compton_fwhm_corr": {
"bound_type": "lohi",
"description": "fwhm Coef, Compton",
"max": 2.5,
"min": 0.5,
"value": 1.5,
},
"compton_gamma": {"bound_type": "lohi", "max": 4.2, "min": 3.8, "value": 4.0},
"compton_hi_f_tail": {"bound_type": "fixed", "max": 1.0, "min": 1e-06, "value": 0.1},
"compton_hi_gamma": {"bound_type": "fixed", "max": 3.0, "min": 0.1, "value": 2.0},
"e_linear": {
"bound_type": "lohi",
"description": "E Calib. Coef, a1",
"max": 0.011,
"min": 0.009,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.01,
},
"e_offset": {
"bound_type": "lohi",
"description": "E Calib. Coef, a0",
"max": 0.015,
"min": -0.01,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.0,
},
"e_quadratic": {
"bound_type": "lohi",
"description": "E Calib. Coef, a2",
"max": 1e-06,
"min": -1e-06,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"value": 0.0,
},
"fwhm_fanoprime": {
"bound_type": "fixed",
"description": "fwhm Coef, b2",
"max": 0.0001,
"min": 1e-07,
"value": 1e-06,
},
"fwhm_offset": {
"bound_type": "lohi",
"description": "fwhm Coef, b1 [keV]",
"max": 0.19,
"min": 0.16,
"tool_tip": "width**2 = (b1/2.3548)**2 + 3.85*b2*E",
"value": 0.178,
},
"non_fitting_values": {
"element_list": ["Ar", "Fe", "Ce_L", "Pt_M"],
"energy_bound_low": {"value": 1.5, "default_value": 1.5, "description": "E low [keV]"},
"energy_bound_high": {"value": 13.5, "default_value": 13.5, "description": "E high [keV]"},
"epsilon": 3.51, # electron hole energy
"background_width": 0.5,
},
}
def get_para():
"""More to be added here.
The para_dict will be updated
based on different algorithms.
Use copy for dict.
"""
return default_param | 0.684897 | 0.513607 |
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
from ..utils import angle_grid, bin_edges_to_centers, radial_grid
class BinnedStatisticDD(object):
std_ = ("mean", "median", "count", "sum", "std")
def __init__(self, sample, statistic="mean", bins=10, range=None, mask=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A
histogram divides the space into bins, and returns the count
of the number of points in each bin. This function allows the
computation of the sum, mean, median, or other statistic of
the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
statistic : string or callable, optional
The statistic to compute (default is 'mean'). To compute multiple
statistics efficiently, override this at __call__ time.
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each
dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the
edges are not given explicitely in `bins`. Defaults to the
minimum and maximum values along each dimension.
mask : array_like
array of ones and zeros with total size N (see documentation
for `sample`). Values with mask==0 will be ignored.
Note: If using numpy versions < 1.10.0, you may notice slow behavior of
this constructor. This has to do with digitize, which was optimized
from 1.10.0 onwards.
"""
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, self.D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, self.D = sample.shape
self.nbin = np.empty(self.D, int)
self.edges = self.D * [None]
self._centers = self.D * [None]
dedges = self.D * [None]
try:
M = len(bins)
if M != self.D:
raise AttributeError("The dimension of bins must be equal " "to the dimension of the sample x.")
except TypeError:
bins = self.D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(self.D)
smax = np.zeros(self.D)
for i in np.arange(self.D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - 0.5
smax[i] = smax[i] + 0.5
# Create edge arrays
for i in np.arange(self.D):
if np.isscalar(bins[i]):
self.nbin[i] = bins[i] + 2 # +2 for outlier bins
self.edges[i] = np.linspace(smin[i], smax[i], self.nbin[i] - 1)
else:
self.edges[i] = np.asarray(bins[i], float)
self.nbin[i] = len(self.edges[i]) + 1 # +1 for outlier bins
self._centers[i] = bin_edges_to_centers(self.edges[i])
dedges[i] = np.diff(self.edges[i])
self.nbin = np.asarray(self.nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(self.D):
# Apply mask in a non-ideal way by setting value outside range.
# Would be better to do this using bincount "weights", perhaps.
thissample = sample[:, i]
if mask is not None:
thissample[mask == 0] = self.edges[i][0] - 0.01 * (1 + np.fabs(self.edges[i][0]))
Ncount[i] = np.digitize(thissample, self.edges[i])
# Using digitize, values that fall on an edge are put in the
# right bin. For the rightmost bin, we want values equal to
# the right edge to be counted in the last bin, and not as an
# outlier.
for i in np.arange(self.D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) == np.around(self.edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
self.ni = self.nbin.argsort()
self.xy = np.zeros(N, int)
for i in np.arange(0, self.D - 1):
self.xy += Ncount[self.ni[i]] * self.nbin[self.ni[i + 1 :]].prod()
self.xy += Ncount[self.ni[-1]]
self._flatcount = None # will be computed if needed
self._argsort_index = None
self.statistic = statistic
@property
def binmap(self):
"""Return the map of the bins per dimension.
i.e. reverse transformation of flattened to unflattened bins
Returns
-------
D np.ndarrays of length N where D is the number of dimensions
and N is the number of data points.
For each dimension, the min bin id is 0 and max n+1 where n is
the number of bins in that dimension. The ids 0 and n+1 mark
the outliers of the bins.
"""
(N,) = self.xy.shape
binmap = np.zeros((self.D, N), dtype=int)
denominator = 1
for i in range(self.D):
ind = self.D - i - 1
subbinmap = self.xy // denominator
if i < self.D - 1:
subbinmap = subbinmap % self.nbin[self.ni[ind - 1]]
binmap[ind] = subbinmap
denominator *= self.nbin[self.ni[ind]]
return binmap
@property
def flatcount(self):
# Compute flatcount the first time it is accessed. Some statistics
# never access it.
if self._flatcount is None:
self._flatcount = np.bincount(self.xy, None)
return self._flatcount
@property
def argsort_index(self):
# Compute argsort the first time it is accessed. Some statistics
# never access it.
if self._argsort_index is None:
self._argsort_index = self.xy.argsort()
return self._argsort_index
@property
def bin_edges(self):
"""
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
"""
return self.edges
@property
def bin_centers(self):
"""
bin_centers : array of dtype float
Return the bin centers ``(length(statistic))``.
"""
return self._centers
@property
def statistic(self):
return self._statistic
@statistic.setter
def statistic(self, new_statistic):
if not callable(new_statistic) and new_statistic not in self.std_:
raise ValueError("invalid statistic %r" % (new_statistic,))
else:
self._statistic = new_statistic
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `sample` in the constructor.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
if statistic is None:
statistic = self.statistic
self.result = np.empty(self.nbin.prod(), float)
if statistic == "mean":
self.result.fill(np.nan)
flatsum = np.bincount(self.xy, values)
a = self.flatcount.nonzero()
self.result[a] = flatsum[a] / self.flatcount[a]
elif statistic == "std":
self.result.fill(0)
flatsum = np.bincount(self.xy, values)
flatsum2 = np.bincount(self.xy, values**2)
a = self.flatcount.nonzero()
self.result[a] = np.sqrt(flatsum2[a] / self.flatcount[a] - (flatsum[a] / self.flatcount[a]) ** 2)
elif statistic == "count":
self.result.fill(0)
a = np.arange(len(self.flatcount))
self.result[a] = self.flatcount
elif statistic == "sum":
self.result.fill(0)
flatsum = np.bincount(self.xy, values)
a = np.arange(len(flatsum))
self.result[a] = flatsum
elif callable(statistic) or statistic == "median":
if statistic == "median":
internal_statistic = np.median
else:
internal_statistic = statistic
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings("ignore", category=RuntimeWarning)
old = np.seterr(invalid="ignore")
try:
null = internal_statistic([])
except Exception:
null = np.nan
np.seterr(**old)
self.result.fill(null)
vfs = values[self.argsort_index]
i = 0
for j, k in enumerate(self.flatcount):
if k > 0:
self.result[j] = internal_statistic(vfs[i : i + k])
i += k
# Shape into a proper matrix
self.result = self.result.reshape(np.sort(self.nbin))
ni = np.copy(self.ni)
for i in np.arange(self.nbin.size):
j = ni.argsort()[i]
self.result = self.result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = self.D * [slice(1, -1)]
self.result = self.result[tuple(core)]
if (self.result.shape != self.nbin - 2).any():
raise RuntimeError("Internal Shape Error")
return self.result
class BinnedStatistic1D(BinnedStatisticDD):
def __init__(self, x, statistic="mean", bins=10, range=None, mask=None):
"""
A refactored version of scipy.stats.binned_statistic to improve
performance for the case where binning doesn't need to be
re-initialized on every call.
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean,
median, or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in
the given range (10 by default). If `bins` is a sequence, it
defines the bin edges, including the rightmost edge, allowing for
non-uniform bin widths. Values in `x` that are smaller than lowest
bin edge are assigned to bin number 0, values beyond the highest
bin are assigned to ``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
mask : array_like
ones and zeros with the same shape as `x`.
Values with mask==0 will be ignored.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including
1, but excluding 2) and the second ``[2, 3)``. The last bin, however,
is ``[3, 4]``, which *includes* 4.
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
super(BinnedStatistic1D, self).__init__([x], statistic=statistic, bins=bins, range=range, mask=mask)
@property
def bin_edges(self):
"""
bin_edges : 1D array of dtype float
Return the bin edges.
"""
return super(BinnedStatistic1D, self).bin_edges[0]
@property
def bin_centers(self):
"""
bin_centers : 1D array of dtype float
Return the bin centers.
"""
return super(BinnedStatistic1D, self).bin_centers[0]
class BinnedStatistic2D(BinnedStatisticDD):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
mask : array_like
ones and zeros with the same shape as `x`.
Values with mask==0 will be ignored.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
"""
def __init__(self, x, y, statistic="mean", bins=10, range=None, mask=None):
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
super(BinnedStatistic2D, self).__init__([x, y], statistic=statistic, bins=bins, range=range, mask=mask)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the dimensions of ``x`` and ``y`` that were passed in when
this object was instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
return super(BinnedStatistic2D, self).__call__(values, statistic)
class RPhiBinnedStatistic(BinnedStatistic2D):
"""
Create a 2-dimensional histogram by binning a 2-dimensional
image in both radius and phi.
"""
def __init__(self, shape, bins=10, range=None, origin=None, mask=None, r_map=None, statistic="mean"):
"""
Parameters:
-----------
shape : tuple of ints of length 2.
shape of image.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* number of bins for the two dimensions (nr=nphi=bins),
* number of bins in each dimension (nr, nphi = bins),
* bin edges for the two dimensions (r_edges = phi_edges = bins),
* the bin edges in each dimension (r_edges, phi_edges = bins).
Phi has a range of -pi to pi and is defined as arctan(row/col)
(i.e. x is column and y is row, or "cartesian" format,
not "matrix")
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[rmin, rmax], [phimin, phimax]]. All values outside of this range
will be considered outliers and not tallied in the histogram.
See "bins" parameter for definition of phi.
origin : tuple of floats with length 2, optional
location (in pixels) of origin (default: image center).
mask : 2-dimensional np.ndarray of ints, optional
array of zero/non-zero values, with shape `shape`.
zero values will be ignored.
r_map : 2d np.ndarray of floats, optional
The map of pixel radii for each pixel. For example, r_map can be
used to define the radius of each pixel relative to the origin in
reciprocal space (on the Ewald sphere).
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
"""
if origin is None:
origin = (shape[0] - 1) / 2.0, (shape[1] - 1) / 2.0
if r_map is None:
r_map = radial_grid(origin, shape)
phi_map = angle_grid(origin, shape)
self.expected_shape = tuple(shape)
if mask is not None:
if mask.shape != self.expected_shape:
raise ValueError(
'"mask" has incorrect shape. '
" Expected: " + str(self.expected_shape) + " Received: " + str(mask.shape)
)
mask = mask.reshape(-1)
super(RPhiBinnedStatistic, self).__init__(
r_map.reshape(-1), phi_map.reshape(-1), statistic, bins=bins, mask=mask, range=range
)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the ``shape`` that passed in when this object was
instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
# check for what I believe could be a common error
if values.shape != self.expected_shape:
raise ValueError(
'"values" has incorrect shape.'
" Expected: " + str(self.expected_shape) + " Received: " + str(values.shape)
)
return super(RPhiBinnedStatistic, self).__call__(values.reshape(-1), statistic)
class RadialBinnedStatistic(BinnedStatistic1D):
"""
Create a 1-dimensional histogram by binning a 2-dimensional
image in radius.
"""
def __init__(self, shape, bins=10, range=None, origin=None, mask=None, r_map=None, statistic="mean"):
"""
Parameters:
-----------
shape : tuple of ints of length 2.
shape of image.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in
the given range (10 by default). If `bins` is a sequence, it
defines the bin edges, including the rightmost edge, allowing for
non-uniform bin widths. Values in `x` that are smaller than lowest
bin edge are assigned to bin number 0, values beyond the highest
bin are assigned to ``bins[-1]``.
Phi has a range of -pi to pi and is defined as arctan(row/col)
(i.e. x is column and y is row, or "cartesian" format,
not "matrix")
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
See "bins" parameter for definition of phi.
origin : tuple of floats with length 2, optional
location (in pixels) of origin (default: image center).
mask : 2-dimensional np.ndarray of ints, optional
array of zero/non-zero values, with shape `shape`.
zero values will be ignored.
r_map : the map of pixel radii for each pixel. This is useful when the
detector has some curvature or is a more complex 2D shape embedded
in a 3D space (for example, Ewald curvature).
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
"""
if origin is None:
origin = (shape[0] - 1) / 2, (shape[1] - 1) / 2
if r_map is None:
r_map = radial_grid(origin, shape)
self.expected_shape = tuple(shape)
if mask is not None:
if mask.shape != self.expected_shape:
raise ValueError(
'"mask" has incorrect shape. '
" Expected: " + str(self.expected_shape) + " Received: " + str(mask.shape)
)
mask = mask.reshape(-1)
super(RadialBinnedStatistic, self).__init__(
r_map.reshape(-1), statistic, bins=bins, mask=mask, range=range
)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the ``shape`` that passed in when this object was
instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
# check for what I believe could be a common error
if values.shape != self.expected_shape:
raise ValueError(
'"values" has incorrect shape.'
" Expected: " + str(self.expected_shape) + " Received: " + str(values.shape)
)
return super(RadialBinnedStatistic, self).__call__(values.reshape(-1), statistic) | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/core/accumulators/binned_statistic.py | binned_statistic.py | from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
from ..utils import angle_grid, bin_edges_to_centers, radial_grid
class BinnedStatisticDD(object):
std_ = ("mean", "median", "count", "sum", "std")
def __init__(self, sample, statistic="mean", bins=10, range=None, mask=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A
histogram divides the space into bins, and returns the count
of the number of points in each bin. This function allows the
computation of the sum, mean, median, or other statistic of
the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
statistic : string or callable, optional
The statistic to compute (default is 'mean'). To compute multiple
statistics efficiently, override this at __call__ time.
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each
dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the
edges are not given explicitely in `bins`. Defaults to the
minimum and maximum values along each dimension.
mask : array_like
array of ones and zeros with total size N (see documentation
for `sample`). Values with mask==0 will be ignored.
Note: If using numpy versions < 1.10.0, you may notice slow behavior of
this constructor. This has to do with digitize, which was optimized
from 1.10.0 onwards.
"""
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, self.D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, self.D = sample.shape
self.nbin = np.empty(self.D, int)
self.edges = self.D * [None]
self._centers = self.D * [None]
dedges = self.D * [None]
try:
M = len(bins)
if M != self.D:
raise AttributeError("The dimension of bins must be equal " "to the dimension of the sample x.")
except TypeError:
bins = self.D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(self.D)
smax = np.zeros(self.D)
for i in np.arange(self.D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - 0.5
smax[i] = smax[i] + 0.5
# Create edge arrays
for i in np.arange(self.D):
if np.isscalar(bins[i]):
self.nbin[i] = bins[i] + 2 # +2 for outlier bins
self.edges[i] = np.linspace(smin[i], smax[i], self.nbin[i] - 1)
else:
self.edges[i] = np.asarray(bins[i], float)
self.nbin[i] = len(self.edges[i]) + 1 # +1 for outlier bins
self._centers[i] = bin_edges_to_centers(self.edges[i])
dedges[i] = np.diff(self.edges[i])
self.nbin = np.asarray(self.nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(self.D):
# Apply mask in a non-ideal way by setting value outside range.
# Would be better to do this using bincount "weights", perhaps.
thissample = sample[:, i]
if mask is not None:
thissample[mask == 0] = self.edges[i][0] - 0.01 * (1 + np.fabs(self.edges[i][0]))
Ncount[i] = np.digitize(thissample, self.edges[i])
# Using digitize, values that fall on an edge are put in the
# right bin. For the rightmost bin, we want values equal to
# the right edge to be counted in the last bin, and not as an
# outlier.
for i in np.arange(self.D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) == np.around(self.edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
self.ni = self.nbin.argsort()
self.xy = np.zeros(N, int)
for i in np.arange(0, self.D - 1):
self.xy += Ncount[self.ni[i]] * self.nbin[self.ni[i + 1 :]].prod()
self.xy += Ncount[self.ni[-1]]
self._flatcount = None # will be computed if needed
self._argsort_index = None
self.statistic = statistic
@property
def binmap(self):
"""Return the map of the bins per dimension.
i.e. reverse transformation of flattened to unflattened bins
Returns
-------
D np.ndarrays of length N where D is the number of dimensions
and N is the number of data points.
For each dimension, the min bin id is 0 and max n+1 where n is
the number of bins in that dimension. The ids 0 and n+1 mark
the outliers of the bins.
"""
(N,) = self.xy.shape
binmap = np.zeros((self.D, N), dtype=int)
denominator = 1
for i in range(self.D):
ind = self.D - i - 1
subbinmap = self.xy // denominator
if i < self.D - 1:
subbinmap = subbinmap % self.nbin[self.ni[ind - 1]]
binmap[ind] = subbinmap
denominator *= self.nbin[self.ni[ind]]
return binmap
@property
def flatcount(self):
# Compute flatcount the first time it is accessed. Some statistics
# never access it.
if self._flatcount is None:
self._flatcount = np.bincount(self.xy, None)
return self._flatcount
@property
def argsort_index(self):
# Compute argsort the first time it is accessed. Some statistics
# never access it.
if self._argsort_index is None:
self._argsort_index = self.xy.argsort()
return self._argsort_index
@property
def bin_edges(self):
"""
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
"""
return self.edges
@property
def bin_centers(self):
"""
bin_centers : array of dtype float
Return the bin centers ``(length(statistic))``.
"""
return self._centers
@property
def statistic(self):
return self._statistic
@statistic.setter
def statistic(self, new_statistic):
if not callable(new_statistic) and new_statistic not in self.std_:
raise ValueError("invalid statistic %r" % (new_statistic,))
else:
self._statistic = new_statistic
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `sample` in the constructor.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
if statistic is None:
statistic = self.statistic
self.result = np.empty(self.nbin.prod(), float)
if statistic == "mean":
self.result.fill(np.nan)
flatsum = np.bincount(self.xy, values)
a = self.flatcount.nonzero()
self.result[a] = flatsum[a] / self.flatcount[a]
elif statistic == "std":
self.result.fill(0)
flatsum = np.bincount(self.xy, values)
flatsum2 = np.bincount(self.xy, values**2)
a = self.flatcount.nonzero()
self.result[a] = np.sqrt(flatsum2[a] / self.flatcount[a] - (flatsum[a] / self.flatcount[a]) ** 2)
elif statistic == "count":
self.result.fill(0)
a = np.arange(len(self.flatcount))
self.result[a] = self.flatcount
elif statistic == "sum":
self.result.fill(0)
flatsum = np.bincount(self.xy, values)
a = np.arange(len(flatsum))
self.result[a] = flatsum
elif callable(statistic) or statistic == "median":
if statistic == "median":
internal_statistic = np.median
else:
internal_statistic = statistic
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings("ignore", category=RuntimeWarning)
old = np.seterr(invalid="ignore")
try:
null = internal_statistic([])
except Exception:
null = np.nan
np.seterr(**old)
self.result.fill(null)
vfs = values[self.argsort_index]
i = 0
for j, k in enumerate(self.flatcount):
if k > 0:
self.result[j] = internal_statistic(vfs[i : i + k])
i += k
# Shape into a proper matrix
self.result = self.result.reshape(np.sort(self.nbin))
ni = np.copy(self.ni)
for i in np.arange(self.nbin.size):
j = ni.argsort()[i]
self.result = self.result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = self.D * [slice(1, -1)]
self.result = self.result[tuple(core)]
if (self.result.shape != self.nbin - 2).any():
raise RuntimeError("Internal Shape Error")
return self.result
class BinnedStatistic1D(BinnedStatisticDD):
def __init__(self, x, statistic="mean", bins=10, range=None, mask=None):
"""
A refactored version of scipy.stats.binned_statistic to improve
performance for the case where binning doesn't need to be
re-initialized on every call.
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean,
median, or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in
the given range (10 by default). If `bins` is a sequence, it
defines the bin edges, including the rightmost edge, allowing for
non-uniform bin widths. Values in `x` that are smaller than lowest
bin edge are assigned to bin number 0, values beyond the highest
bin are assigned to ``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
mask : array_like
ones and zeros with the same shape as `x`.
Values with mask==0 will be ignored.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including
1, but excluding 2) and the second ``[2, 3)``. The last bin, however,
is ``[3, 4]``, which *includes* 4.
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
super(BinnedStatistic1D, self).__init__([x], statistic=statistic, bins=bins, range=range, mask=mask)
@property
def bin_edges(self):
"""
bin_edges : 1D array of dtype float
Return the bin edges.
"""
return super(BinnedStatistic1D, self).bin_edges[0]
@property
def bin_centers(self):
"""
bin_centers : 1D array of dtype float
Return the bin centers.
"""
return super(BinnedStatistic1D, self).bin_centers[0]
class BinnedStatistic2D(BinnedStatisticDD):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
mask : array_like
ones and zeros with the same shape as `x`.
Values with mask==0 will be ignored.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
"""
def __init__(self, x, y, statistic="mean", bins=10, range=None, mask=None):
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
super(BinnedStatistic2D, self).__init__([x, y], statistic=statistic, bins=bins, range=range, mask=mask)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the dimensions of ``x`` and ``y`` that were passed in when
this object was instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
return super(BinnedStatistic2D, self).__call__(values, statistic)
class RPhiBinnedStatistic(BinnedStatistic2D):
"""
Create a 2-dimensional histogram by binning a 2-dimensional
image in both radius and phi.
"""
def __init__(self, shape, bins=10, range=None, origin=None, mask=None, r_map=None, statistic="mean"):
"""
Parameters:
-----------
shape : tuple of ints of length 2.
shape of image.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* number of bins for the two dimensions (nr=nphi=bins),
* number of bins in each dimension (nr, nphi = bins),
* bin edges for the two dimensions (r_edges = phi_edges = bins),
* the bin edges in each dimension (r_edges, phi_edges = bins).
Phi has a range of -pi to pi and is defined as arctan(row/col)
(i.e. x is column and y is row, or "cartesian" format,
not "matrix")
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[rmin, rmax], [phimin, phimax]]. All values outside of this range
will be considered outliers and not tallied in the histogram.
See "bins" parameter for definition of phi.
origin : tuple of floats with length 2, optional
location (in pixels) of origin (default: image center).
mask : 2-dimensional np.ndarray of ints, optional
array of zero/non-zero values, with shape `shape`.
zero values will be ignored.
r_map : 2d np.ndarray of floats, optional
The map of pixel radii for each pixel. For example, r_map can be
used to define the radius of each pixel relative to the origin in
reciprocal space (on the Ewald sphere).
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
"""
if origin is None:
origin = (shape[0] - 1) / 2.0, (shape[1] - 1) / 2.0
if r_map is None:
r_map = radial_grid(origin, shape)
phi_map = angle_grid(origin, shape)
self.expected_shape = tuple(shape)
if mask is not None:
if mask.shape != self.expected_shape:
raise ValueError(
'"mask" has incorrect shape. '
" Expected: " + str(self.expected_shape) + " Received: " + str(mask.shape)
)
mask = mask.reshape(-1)
super(RPhiBinnedStatistic, self).__init__(
r_map.reshape(-1), phi_map.reshape(-1), statistic, bins=bins, mask=mask, range=range
)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the ``shape`` that passed in when this object was
instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
# check for what I believe could be a common error
if values.shape != self.expected_shape:
raise ValueError(
'"values" has incorrect shape.'
" Expected: " + str(self.expected_shape) + " Received: " + str(values.shape)
)
return super(RPhiBinnedStatistic, self).__call__(values.reshape(-1), statistic)
class RadialBinnedStatistic(BinnedStatistic1D):
"""
Create a 1-dimensional histogram by binning a 2-dimensional
image in radius.
"""
def __init__(self, shape, bins=10, range=None, origin=None, mask=None, r_map=None, statistic="mean"):
"""
Parameters:
-----------
shape : tuple of ints of length 2.
shape of image.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in
the given range (10 by default). If `bins` is a sequence, it
defines the bin edges, including the rightmost edge, allowing for
non-uniform bin widths. Values in `x` that are smaller than lowest
bin edge are assigned to bin number 0, values beyond the highest
bin are assigned to ``bins[-1]``.
Phi has a range of -pi to pi and is defined as arctan(row/col)
(i.e. x is column and y is row, or "cartesian" format,
not "matrix")
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
See "bins" parameter for definition of phi.
origin : tuple of floats with length 2, optional
location (in pixels) of origin (default: image center).
mask : 2-dimensional np.ndarray of ints, optional
array of zero/non-zero values, with shape `shape`.
zero values will be ignored.
r_map : the map of pixel radii for each pixel. This is useful when the
detector has some curvature or is a more complex 2D shape embedded
in a 3D space (for example, Ewald curvature).
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
"""
if origin is None:
origin = (shape[0] - 1) / 2, (shape[1] - 1) / 2
if r_map is None:
r_map = radial_grid(origin, shape)
self.expected_shape = tuple(shape)
if mask is not None:
if mask.shape != self.expected_shape:
raise ValueError(
'"mask" has incorrect shape. '
" Expected: " + str(self.expected_shape) + " Received: " + str(mask.shape)
)
mask = mask.reshape(-1)
super(RadialBinnedStatistic, self).__init__(
r_map.reshape(-1), statistic, bins=bins, mask=mask, range=range
)
def __call__(self, values, statistic=None):
"""
Parameters
----------
values : array_like
The values on which the statistic will be computed. This must
match the ``shape`` that passed in when this object was
instantiated.
statistic : string or callable, optional
The statistic to compute (default is whatever was passed in when
this object was instantiated).
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
Returns
-------
statistic_values : array
The values of the selected statistic in each bin.
"""
# check for what I believe could be a common error
if values.shape != self.expected_shape:
raise ValueError(
'"values" has incorrect shape.'
" Expected: " + str(self.expected_shape) + " Received: " + str(values.shape)
)
return super(RadialBinnedStatistic, self).__call__(values.reshape(-1), statistic) | 0.915832 | 0.682164 |
from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
logger = logging.getLogger(__name__)
def save_output(tth, intensity, output_name, q_or_2theta, ext=".chi", err=None, dir_path=None):
"""
Save output diffraction intensities into .chi, .dat or .xye file formats.
If the extension(ext) of the output file is not selected it will be
saved as a .chi file
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q values (Angstroms)
shape (N, ) array
intensity : ndarray
intensity values (N, ) array
output_name : str
name for the saved output diffraction intensities
q_or_2theta : {'Q', '2theta'}
twotheta (degrees) or Q (Angstroms) values
ext : {'.chi', '.dat', '.xye'}, optional
save output diffraction intensities into .chi, .dat or
.xye file formats. (If the extension of output file is not
selected it will be saved as a .chi file)
err : ndarray, optional
error value of intensity shape(N, ) array
dir_path : str, optional
new directory path to save the output data files
eg: /Volumes/Data/experiments/data/
"""
if q_or_2theta not in set(["Q", "2theta"]):
raise ValueError(
"It is expected to provide whether the data is"
" Q values(enter Q) or two theta values"
" (enter 2theta)"
)
if q_or_2theta == "Q":
des = """First column represents Q values (Angstroms) and second
column represents intensities and if there is a third
column it represents the error values of intensities."""
else:
des = """First column represents two theta values (degrees) and
second column represents intensities and if there is
a third column it represents the error values of intensities."""
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
with open(file_path, "wb") as f:
_HEADER = """{out_name}
This file contains integrated powder x-ray diffraction
intensities.
{des}
Number of data points in the file : {n_pts}
######################################################"""
_encoding_writer(f, _HEADER.format(n_pts=len(tth), out_name=output_name, des=des))
new_line = "\n"
_encoding_writer(f, new_line)
if err is None:
np.savetxt(f, np.c_[tth, intensity])
else:
np.savetxt(f, np.c_[tth, intensity, err])
def _encoding_writer(f, _HEADER):
"""
Encode the writer for python 3
Parameters
----------
f : str
file name
_HEADER : str
string need to be written in the file
"""
f.write(_HEADER.encode("utf-8"))
def gsas_writer(tth, intensity, output_name, mode=None, err=None, dir_path=None):
"""
Save diffraction intensities into .gsas file format
Parameters
----------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
output_name : str
name for the saved output diffraction intensities
mode : {'STD', 'ESD', 'FXYE'}, optional
GSAS file formats, could be 'STD', 'ESD', 'FXYE'
err : ndarray, optional
error value of intensity shape(N, ) array
err is None then mode will be 'STD'
dir_path : str, optional
new directory path to save the output data files
eg: /Data/experiments/data/
"""
# save output diffraction intensities into .gsas file extension.
ext = ".gsas"
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
max_intensity = 999999
log_scale = np.floor(np.log10(max_intensity / np.max(intensity)))
log_scale = min(log_scale, 0)
scale = 10 ** int(log_scale)
lines = []
title = "Angular Profile"
title += ": %s" % output_name
title += " scale=%g" % scale
title = title[:80]
lines.append("%-80s" % title)
i_bank = 1
n_chan = len(intensity)
# two-theta0 and dtwo-theta in centidegrees
tth0_cdg = tth[0] * 100
dtth_cdg = (tth[-1] - tth[0]) / (len(tth) - 1) * 100
if err is None:
mode = "STD"
if mode == "STD":
n_rec = int(np.ceil(n_chan / 10.0))
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
lrecs = ["%2i%6.0f" % (1, ii * scale) for ii in intensity]
for i in range(0, len(lrecs), 10):
lines.append("".join(lrecs[i : i + 10]))
elif mode == "ESD":
n_rec = int(np.ceil(n_chan / 5.0))
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
l_recs = ["%8.0f%8.0f" % (ii, ee * scale) for ii, ee in zip(intensity, err)]
for i in range(0, len(l_recs), 5):
lines.append("".join(l_recs[i : i + 5]))
elif mode == "FXYE":
n_rec = n_chan
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
l_recs = [
"%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale)
for xx, yy, ee in zip(tth, intensity, err)
]
for i in range(len(l_recs)):
lines.append("%-80s" % l_recs[i])
else:
raise ValueError(" Define the GSAS file type ")
lines[-1] = "%-80s" % lines[-1]
rv = "\r\n".join(lines) + "\r\n"
with open(file_path, "wt") as f:
f.write(rv)
def _validate_input(tth, intensity, err, ext):
"""
This function validate all the inputs
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q space values (Angstroms)
intensity : ndarray
intensity values
err : ndarray, optional
error value of intensity
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
"""
if len(tth) != len(intensity):
raise ValueError("Number of intensities and the number of Q or" " two theta values are different ")
if err is not None:
if len(intensity) != len(err):
raise ValueError("Number of intensities and the number of" " err values are different")
if ext == ".xye" and err is None:
raise ValueError("Provide the Error value of intensity" " (for .xye file format err != None)")
def _create_file_path(dir_path, output_name, ext):
"""
This function create a output file path to save
diffraction intensities.
Parameters
----------
dir_path : str
new directory path to save the output data files
eg: /Data/experiments/data/
output_name : str
name for the saved output diffraction intensities
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
Returns:
-------
file_path : str
path to save the diffraction intensities
"""
if (dir_path) is None:
file_path = output_name + ext
elif os.path.exists(dir_path):
file_path = os.path.join(dir_path, output_name) + ext
else:
raise ValueError("The given path does not exist.")
if os.path.isfile(file_path):
logger.info("Output file of diffraction intensities" " already exists")
os.remove(file_path)
return file_path | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/io/save_powder_output.py | save_powder_output.py | from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
logger = logging.getLogger(__name__)
def save_output(tth, intensity, output_name, q_or_2theta, ext=".chi", err=None, dir_path=None):
"""
Save output diffraction intensities into .chi, .dat or .xye file formats.
If the extension(ext) of the output file is not selected it will be
saved as a .chi file
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q values (Angstroms)
shape (N, ) array
intensity : ndarray
intensity values (N, ) array
output_name : str
name for the saved output diffraction intensities
q_or_2theta : {'Q', '2theta'}
twotheta (degrees) or Q (Angstroms) values
ext : {'.chi', '.dat', '.xye'}, optional
save output diffraction intensities into .chi, .dat or
.xye file formats. (If the extension of output file is not
selected it will be saved as a .chi file)
err : ndarray, optional
error value of intensity shape(N, ) array
dir_path : str, optional
new directory path to save the output data files
eg: /Volumes/Data/experiments/data/
"""
if q_or_2theta not in set(["Q", "2theta"]):
raise ValueError(
"It is expected to provide whether the data is"
" Q values(enter Q) or two theta values"
" (enter 2theta)"
)
if q_or_2theta == "Q":
des = """First column represents Q values (Angstroms) and second
column represents intensities and if there is a third
column it represents the error values of intensities."""
else:
des = """First column represents two theta values (degrees) and
second column represents intensities and if there is
a third column it represents the error values of intensities."""
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
with open(file_path, "wb") as f:
_HEADER = """{out_name}
This file contains integrated powder x-ray diffraction
intensities.
{des}
Number of data points in the file : {n_pts}
######################################################"""
_encoding_writer(f, _HEADER.format(n_pts=len(tth), out_name=output_name, des=des))
new_line = "\n"
_encoding_writer(f, new_line)
if err is None:
np.savetxt(f, np.c_[tth, intensity])
else:
np.savetxt(f, np.c_[tth, intensity, err])
def _encoding_writer(f, _HEADER):
"""
Encode the writer for python 3
Parameters
----------
f : str
file name
_HEADER : str
string need to be written in the file
"""
f.write(_HEADER.encode("utf-8"))
def gsas_writer(tth, intensity, output_name, mode=None, err=None, dir_path=None):
"""
Save diffraction intensities into .gsas file format
Parameters
----------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
output_name : str
name for the saved output diffraction intensities
mode : {'STD', 'ESD', 'FXYE'}, optional
GSAS file formats, could be 'STD', 'ESD', 'FXYE'
err : ndarray, optional
error value of intensity shape(N, ) array
err is None then mode will be 'STD'
dir_path : str, optional
new directory path to save the output data files
eg: /Data/experiments/data/
"""
# save output diffraction intensities into .gsas file extension.
ext = ".gsas"
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
max_intensity = 999999
log_scale = np.floor(np.log10(max_intensity / np.max(intensity)))
log_scale = min(log_scale, 0)
scale = 10 ** int(log_scale)
lines = []
title = "Angular Profile"
title += ": %s" % output_name
title += " scale=%g" % scale
title = title[:80]
lines.append("%-80s" % title)
i_bank = 1
n_chan = len(intensity)
# two-theta0 and dtwo-theta in centidegrees
tth0_cdg = tth[0] * 100
dtth_cdg = (tth[-1] - tth[0]) / (len(tth) - 1) * 100
if err is None:
mode = "STD"
if mode == "STD":
n_rec = int(np.ceil(n_chan / 10.0))
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
lrecs = ["%2i%6.0f" % (1, ii * scale) for ii in intensity]
for i in range(0, len(lrecs), 10):
lines.append("".join(lrecs[i : i + 10]))
elif mode == "ESD":
n_rec = int(np.ceil(n_chan / 5.0))
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
l_recs = ["%8.0f%8.0f" % (ii, ee * scale) for ii, ee in zip(intensity, err)]
for i in range(0, len(l_recs), 5):
lines.append("".join(l_recs[i : i + 5]))
elif mode == "FXYE":
n_rec = n_chan
l_bank = "BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" % (
i_bank,
n_chan,
n_rec,
tth0_cdg,
dtth_cdg,
0,
0,
)
lines.append("%-80s" % l_bank)
l_recs = [
"%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale)
for xx, yy, ee in zip(tth, intensity, err)
]
for i in range(len(l_recs)):
lines.append("%-80s" % l_recs[i])
else:
raise ValueError(" Define the GSAS file type ")
lines[-1] = "%-80s" % lines[-1]
rv = "\r\n".join(lines) + "\r\n"
with open(file_path, "wt") as f:
f.write(rv)
def _validate_input(tth, intensity, err, ext):
"""
This function validate all the inputs
Parameters
----------
tth : ndarray
twotheta values (degrees) or Q space values (Angstroms)
intensity : ndarray
intensity values
err : ndarray, optional
error value of intensity
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
"""
if len(tth) != len(intensity):
raise ValueError("Number of intensities and the number of Q or" " two theta values are different ")
if err is not None:
if len(intensity) != len(err):
raise ValueError("Number of intensities and the number of" " err values are different")
if ext == ".xye" and err is None:
raise ValueError("Provide the Error value of intensity" " (for .xye file format err != None)")
def _create_file_path(dir_path, output_name, ext):
"""
This function create a output file path to save
diffraction intensities.
Parameters
----------
dir_path : str
new directory path to save the output data files
eg: /Data/experiments/data/
output_name : str
name for the saved output diffraction intensities
ext : {'.chi', '.dat', '.xye'}
save output diffraction intensities into .chi,
.dat or .xye file formats.
Returns:
-------
file_path : str
path to save the diffraction intensities
"""
if (dir_path) is None:
file_path = output_name + ext
elif os.path.exists(dir_path):
file_path = os.path.join(dir_path, output_name) + ext
else:
raise ValueError("The given path does not exist.")
if os.path.isfile(file_path):
logger.info("Output file of diffraction intensities" " already exists")
os.remove(file_path)
return file_path | 0.793426 | 0.499451 |
from __future__ import absolute_import, division, print_function
import os
def load_netCDF(file_name):
"""
This function loads the specified netCDF file format data set (e.g.*.volume
APS-Sector 13 GSECARS extension) file into a numpy array for further
analysis.
Required Dependencies:
netcdf4 : Python/numpy interface to the netCDF ver. 4 library
Package name: netcdf4-python
Install from: https://github.com/Unidata/netcdf4-python
numpy
Cython -- optional
HDF5 C library version 1.8.8 or higher
Install from: ftp://ftp.hdfgroup.org/HDF5/current/src
Be sure to build with '--enable-hl --enable-shared'.
netCDF-4 C library
Install from:
ftp://ftp.unidata.ucar.edu/pub/netcdf. Version 4.1.1 or higher
Be sure to build with '--enable-netcdf-4 --enable-shared', and set
CPPFLAGS="-I $HDF5_DIR/include" and LDFLAGS="-L $HDF5_DIR/lib", where
$HDF5_DIR is the directory where HDF5 was installed.
If you want OPeNDAP support, add '--enable-dap'.
If you want HDF4 SD support, add '--enable-hdf4' and add the location
of the HDF4 headers and library to CPPFLAGS and LDFLAGS.
Parameters
----------
file_name: string
Complete path to the file to be loaded into memory
Returns
-------
md_dict: dict
Dictionary containing all metadata contained in the netCDF file.
This metadata contains data collection, and experiment information
as well as values and variables pertinent to the image data.
data: ndarray
ndarray containing the image data contained in the netCDF file.
The image data is scaled using the scale factor defined in the
netCDF metadata, if a scale factor was recorded during data
acquisition or reconstruction. If a scale factor is not present,
then a default value of 1.0 is used.
"""
from netCDF4 import Dataset
with Dataset(os.path.normpath(file_name), "r") as src_file:
data = src_file.variables["VOLUME"]
md_dict = src_file.__dict__
# Check for voxel intensity scale factor and apply if value is present
data /= data.scale_factor if data.scale_factor != 1.0 else 1.0
# Accounts for specific case where z_pixel_size doesn't get assigned
# even though dimensions are actuall isotropic. This occurs when
# reconstruction is completed using tomo_recon on data collected at
# APS-13BMD.
if md_dict["x_pixel_size"] == md_dict["y_pixel_size"] and md_dict["z_pixel_size"] == 0.0 and data.shape[0] > 1:
md_dict["voxel_size"] = {"value": md_dict["x_pixel_size"], "type": float, "units": ""}
return md_dict, data | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/io/net_cdf_io.py | net_cdf_io.py | from __future__ import absolute_import, division, print_function
import os
def load_netCDF(file_name):
"""
This function loads the specified netCDF file format data set (e.g.*.volume
APS-Sector 13 GSECARS extension) file into a numpy array for further
analysis.
Required Dependencies:
netcdf4 : Python/numpy interface to the netCDF ver. 4 library
Package name: netcdf4-python
Install from: https://github.com/Unidata/netcdf4-python
numpy
Cython -- optional
HDF5 C library version 1.8.8 or higher
Install from: ftp://ftp.hdfgroup.org/HDF5/current/src
Be sure to build with '--enable-hl --enable-shared'.
netCDF-4 C library
Install from:
ftp://ftp.unidata.ucar.edu/pub/netcdf. Version 4.1.1 or higher
Be sure to build with '--enable-netcdf-4 --enable-shared', and set
CPPFLAGS="-I $HDF5_DIR/include" and LDFLAGS="-L $HDF5_DIR/lib", where
$HDF5_DIR is the directory where HDF5 was installed.
If you want OPeNDAP support, add '--enable-dap'.
If you want HDF4 SD support, add '--enable-hdf4' and add the location
of the HDF4 headers and library to CPPFLAGS and LDFLAGS.
Parameters
----------
file_name: string
Complete path to the file to be loaded into memory
Returns
-------
md_dict: dict
Dictionary containing all metadata contained in the netCDF file.
This metadata contains data collection, and experiment information
as well as values and variables pertinent to the image data.
data: ndarray
ndarray containing the image data contained in the netCDF file.
The image data is scaled using the scale factor defined in the
netCDF metadata, if a scale factor was recorded during data
acquisition or reconstruction. If a scale factor is not present,
then a default value of 1.0 is used.
"""
from netCDF4 import Dataset
with Dataset(os.path.normpath(file_name), "r") as src_file:
data = src_file.variables["VOLUME"]
md_dict = src_file.__dict__
# Check for voxel intensity scale factor and apply if value is present
data /= data.scale_factor if data.scale_factor != 1.0 else 1.0
# Accounts for specific case where z_pixel_size doesn't get assigned
# even though dimensions are actuall isotropic. This occurs when
# reconstruction is completed using tomo_recon on data collected at
# APS-13BMD.
if md_dict["x_pixel_size"] == md_dict["y_pixel_size"] and md_dict["z_pixel_size"] == 0.0 and data.shape[0] > 1:
md_dict["voxel_size"] = {"value": md_dict["x_pixel_size"], "type": float, "units": ""}
return md_dict, data | 0.732305 | 0.324155 |
from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
def _read_amira(src_file):
"""
Reads all information contained within standard AmiraMesh data sets.
Separate the header information from the image/volume, data.
Parameters
----------
src_file : str
The path and file name pointing to the AmiraMesh file to be loaded.
Returns
-------
am_header : list of strings
This list contains all of the raw information contained in the
AmiraMesh file header. Contains all of the raw header information
am_data : str
A compiled string containing all of the image array data, that was
stored in the source AmiraMesh data file. Contains the raw image data
"""
am_header = []
am_data = []
with open(os.path.normpath(src_file), "r") as input_file:
while True:
line = input_file.readline()
am_header.append(line)
if line == "# Data section follows\n":
input_file.readline()
break
am_data = input_file.read()
return am_header, am_data
def _amira_data_to_numpy(am_data, header_dict, flip_z=True):
"""
Transform output of `_read_amira` to a numpy array of the dtype listed in
the AmiraMesh header dictionary. The standard format for Avizo Binary
files is IEEE binary. Big or little endian-ness is stipulated in the header
information, and is be assessed and taken into account by this function as
well, during the conversion process.
Parameters
----------
am_data : str
String object containing all of the image array data, formatted as IEEE
binary. Current dType options include:
float
short
ushort
byte
header_dict : dict
Metadata dictionary containing all relevant attributes pertaining to
the image array. This metadata dictionary is the output from the
function `_create_md_dict`.
flip_z : bool, optional.
Defaults to True
This option is included because the .am data sets evaluated thus far
have opposite z-axis indexing than numpy arrays. This switch currently
defaults to "True" in order to ensure that z-axis indexing remains
consistent with data processed using Avizo.
Setting this switch to "True" will flip the z-axis during processing,
and a value of "False" will keep the array is initially assigned during
the array reshaping step.
Returns
-------
output : ndarray
Numpy ndarray containing the image data converted from the AmiraMesh
file. This data array is ready for further processing using the NSLS-II
function library, or other operations able to operate on numpy arrays.
"""
Zdim = header_dict["array_dimensions"]["z_dimension"]
Ydim = header_dict["array_dimensions"]["y_dimension"]
Xdim = header_dict["array_dimensions"]["x_dimension"]
# Strip out null characters from the string of binary values
# Dictionary of the encoding types for AmiraMesh files
am_format_dict = {"BINARY-LITTLE-ENDIAN": "<", "BINARY": ">", "ASCII": "unknown"}
# Dictionary of the data types encountered so far in AmiraMesh files
am_dtype_dict = {"float": "f4", "short": "h4", "ushort": "H4", "byte": "b"}
# Had to split out the stripping of new line characters and conversion
# of the original string data based on whether source data is BINARY
# format or ASCII format. These format types require different stripping
# tools and different string conversion tools.
if header_dict["data_format"] == "BINARY-LITTLE-ENDIAN":
data_strip = am_data.strip("\n")
flt_values = np.fromstring(
data_strip, (am_format_dict[header_dict["data_format"]] + am_dtype_dict[header_dict["data_type"]])
)
if header_dict["data_format"] == "ASCII":
data_strip = am_data.translate(None, "\n")
string_list = data_strip.split(" ")
string_list = string_list[0 : (len(string_list) - 2)]
flt_values = np.array(string_list).astype(am_dtype_dict[header_dict["data_type"]])
# Resize the 1D array to the correct ndarray dimensions
# Note that resize is in-place whereas reshape is not
flt_values.resize(Zdim, Ydim, Xdim)
output = flt_values
if flip_z:
output = flt_values[::-1, ..., ...]
return output
def _clean_amira_header(header_list):
"""
Strip the string list of all "empty" characters,including new line
characters ('\n') and empty lines. Splits each header line (which
originally is stored as a single string) into individual words, numbers or
characters, using spaces between words as the separating operator. The
output of this function is used to generate the metadata dictionary for
the image data set.
Parameters
----------
header_list : list of strings
This is the header output from the function _read_amira()
Returns
-------
clean_header : list of strings
This header list has been stripped and sorted and is now ready for
populating the metadata dictionary for the image data set.
"""
clean_header = []
for row in header_list:
split_header = filter(None, [word.translate(None, ',"') for word in row.strip("\n").split()])
clean_header.append(split_header)
return clean_header
def _create_md_dict(clean_header):
"""
Populates the a dictionary with all information pertinent to the image
data set that was originally stored in the AmiraMesh file.
Parameters
----------
clean_header : list of strings
This is the output from the _sort_amira_header function.
"""
# Avizo specific metadata
md_dict = {
"software_src": clean_header[0][1],
"data_format": clean_header[0][2],
"data_format_version": clean_header[0][3],
}
if md_dict["data_format"] == "3D":
md_dict["data_format"] = clean_header[0][3]
md_dict["data_format_version"] = clean_header[0][4]
for header_line in clean_header:
hl = header_line
if "define" in hl:
hl = hl
md_dict["array_dimensions"] = {
"x_dimension": int(hl[hl.index("define") + 2]),
"y_dimension": int(hl[hl.index("define") + 3]),
"z_dimension": int(hl[hl.index("define") + 4]),
}
elif "Content" in hl:
md_dict["data_type"] = hl[hl.index("Content") + 2]
elif "CoordType" in hl:
md_dict["coord_type"] = hl[hl.index("CoordType") + 1]
elif "BoundingBox" in hl:
hl = hl
md_dict["bounding_box"] = {
"x_min": float(hl[hl.index("BoundingBox") + 1]),
"x_max": float(hl[hl.index("BoundingBox") + 2]),
"y_min": float(hl[hl.index("BoundingBox") + 3]),
"y_max": float(hl[hl.index("BoundingBox") + 4]),
"z_min": float(hl[hl.index("BoundingBox") + 5]),
"z_max": float(hl[hl.index("BoundingBox") + 6]),
}
# Parameter definition for voxel resolution calculations
bbox = [
md_dict["bounding_box"]["x_min"],
md_dict["bounding_box"]["x_max"],
md_dict["bounding_box"]["y_min"],
md_dict["bounding_box"]["y_max"],
md_dict["bounding_box"]["z_min"],
md_dict["bounding_box"]["z_max"],
]
dims = [
md_dict["array_dimensions"]["x_dimension"],
md_dict["array_dimensions"]["y_dimension"],
md_dict["array_dimensions"]["z_dimension"],
]
# Voxel resolution calculation
resolution_list = []
for index in np.arange(len(dims)):
if dims[index] > 1:
resolution_list.append((bbox[(2 * index + 1)] - bbox[(2 * index)]) / (dims[index] - 1))
else:
resolution_list.append(0)
# isotropy determination (isotropic res, or anisotropic res)
if (
resolution_list[1] / resolution_list[0] > 0.99
and resolution_list[2] / resolution_list[0] > 0.99
and resolution_list[1] / resolution_list[0] < 1.01
and resolution_list[2] / resolution_list[0] < 1.01
):
md_dict["resolution"] = {"zyx_value": resolution_list[0], "type": "isotropic"}
else:
md_dict["resolution"] = {
"zyx_value": (resolution_list[2], resolution_list[1], resolution_list[0]),
"type": "anisotropic",
}
elif "Units" in hl:
try:
units = str(hl[hl.index("Units") + 2])
md_dict["units"] = units
except Exception:
logging.debug(
"Units value undefined in source data set. " "Reverting to default units value of pixels"
)
md_dict["units"] = "pixels"
elif "Coordinates" in hl:
coords = str(hl[hl.index("Coordinates") + 1])
md_dict["coordinates"] = coords
return md_dict
def load_amiramesh(file_path):
"""
Load and convert an AmiraMesh binary file to a numpy array.
Parameters
----------
file_path : str
The path and file name of the AmiraMesh file to be loaded.
Returns
-------
md_dict : dict
Dictionary containing all pertinent header information associated with
the data set.
np_array : ndarray
An ndarray containing the image data set to be loaded. Values contained
in the resulting volume are set to be of float data type by default.
"""
header, data = _read_amira(file_path)
clean_header = _clean_amira_header(header)
md_dict = _create_md_dict(clean_header)
np_array = _amira_data_to_numpy(data, md_dict)
return md_dict, np_array | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/io/avizo_io.py | avizo_io.py | from __future__ import absolute_import, division, print_function
import logging
import os
import numpy as np
def _read_amira(src_file):
"""
Reads all information contained within standard AmiraMesh data sets.
Separate the header information from the image/volume, data.
Parameters
----------
src_file : str
The path and file name pointing to the AmiraMesh file to be loaded.
Returns
-------
am_header : list of strings
This list contains all of the raw information contained in the
AmiraMesh file header. Contains all of the raw header information
am_data : str
A compiled string containing all of the image array data, that was
stored in the source AmiraMesh data file. Contains the raw image data
"""
am_header = []
am_data = []
with open(os.path.normpath(src_file), "r") as input_file:
while True:
line = input_file.readline()
am_header.append(line)
if line == "# Data section follows\n":
input_file.readline()
break
am_data = input_file.read()
return am_header, am_data
def _amira_data_to_numpy(am_data, header_dict, flip_z=True):
"""
Transform output of `_read_amira` to a numpy array of the dtype listed in
the AmiraMesh header dictionary. The standard format for Avizo Binary
files is IEEE binary. Big or little endian-ness is stipulated in the header
information, and is be assessed and taken into account by this function as
well, during the conversion process.
Parameters
----------
am_data : str
String object containing all of the image array data, formatted as IEEE
binary. Current dType options include:
float
short
ushort
byte
header_dict : dict
Metadata dictionary containing all relevant attributes pertaining to
the image array. This metadata dictionary is the output from the
function `_create_md_dict`.
flip_z : bool, optional.
Defaults to True
This option is included because the .am data sets evaluated thus far
have opposite z-axis indexing than numpy arrays. This switch currently
defaults to "True" in order to ensure that z-axis indexing remains
consistent with data processed using Avizo.
Setting this switch to "True" will flip the z-axis during processing,
and a value of "False" will keep the array is initially assigned during
the array reshaping step.
Returns
-------
output : ndarray
Numpy ndarray containing the image data converted from the AmiraMesh
file. This data array is ready for further processing using the NSLS-II
function library, or other operations able to operate on numpy arrays.
"""
Zdim = header_dict["array_dimensions"]["z_dimension"]
Ydim = header_dict["array_dimensions"]["y_dimension"]
Xdim = header_dict["array_dimensions"]["x_dimension"]
# Strip out null characters from the string of binary values
# Dictionary of the encoding types for AmiraMesh files
am_format_dict = {"BINARY-LITTLE-ENDIAN": "<", "BINARY": ">", "ASCII": "unknown"}
# Dictionary of the data types encountered so far in AmiraMesh files
am_dtype_dict = {"float": "f4", "short": "h4", "ushort": "H4", "byte": "b"}
# Had to split out the stripping of new line characters and conversion
# of the original string data based on whether source data is BINARY
# format or ASCII format. These format types require different stripping
# tools and different string conversion tools.
if header_dict["data_format"] == "BINARY-LITTLE-ENDIAN":
data_strip = am_data.strip("\n")
flt_values = np.fromstring(
data_strip, (am_format_dict[header_dict["data_format"]] + am_dtype_dict[header_dict["data_type"]])
)
if header_dict["data_format"] == "ASCII":
data_strip = am_data.translate(None, "\n")
string_list = data_strip.split(" ")
string_list = string_list[0 : (len(string_list) - 2)]
flt_values = np.array(string_list).astype(am_dtype_dict[header_dict["data_type"]])
# Resize the 1D array to the correct ndarray dimensions
# Note that resize is in-place whereas reshape is not
flt_values.resize(Zdim, Ydim, Xdim)
output = flt_values
if flip_z:
output = flt_values[::-1, ..., ...]
return output
def _clean_amira_header(header_list):
"""
Strip the string list of all "empty" characters,including new line
characters ('\n') and empty lines. Splits each header line (which
originally is stored as a single string) into individual words, numbers or
characters, using spaces between words as the separating operator. The
output of this function is used to generate the metadata dictionary for
the image data set.
Parameters
----------
header_list : list of strings
This is the header output from the function _read_amira()
Returns
-------
clean_header : list of strings
This header list has been stripped and sorted and is now ready for
populating the metadata dictionary for the image data set.
"""
clean_header = []
for row in header_list:
split_header = filter(None, [word.translate(None, ',"') for word in row.strip("\n").split()])
clean_header.append(split_header)
return clean_header
def _create_md_dict(clean_header):
"""
Populates the a dictionary with all information pertinent to the image
data set that was originally stored in the AmiraMesh file.
Parameters
----------
clean_header : list of strings
This is the output from the _sort_amira_header function.
"""
# Avizo specific metadata
md_dict = {
"software_src": clean_header[0][1],
"data_format": clean_header[0][2],
"data_format_version": clean_header[0][3],
}
if md_dict["data_format"] == "3D":
md_dict["data_format"] = clean_header[0][3]
md_dict["data_format_version"] = clean_header[0][4]
for header_line in clean_header:
hl = header_line
if "define" in hl:
hl = hl
md_dict["array_dimensions"] = {
"x_dimension": int(hl[hl.index("define") + 2]),
"y_dimension": int(hl[hl.index("define") + 3]),
"z_dimension": int(hl[hl.index("define") + 4]),
}
elif "Content" in hl:
md_dict["data_type"] = hl[hl.index("Content") + 2]
elif "CoordType" in hl:
md_dict["coord_type"] = hl[hl.index("CoordType") + 1]
elif "BoundingBox" in hl:
hl = hl
md_dict["bounding_box"] = {
"x_min": float(hl[hl.index("BoundingBox") + 1]),
"x_max": float(hl[hl.index("BoundingBox") + 2]),
"y_min": float(hl[hl.index("BoundingBox") + 3]),
"y_max": float(hl[hl.index("BoundingBox") + 4]),
"z_min": float(hl[hl.index("BoundingBox") + 5]),
"z_max": float(hl[hl.index("BoundingBox") + 6]),
}
# Parameter definition for voxel resolution calculations
bbox = [
md_dict["bounding_box"]["x_min"],
md_dict["bounding_box"]["x_max"],
md_dict["bounding_box"]["y_min"],
md_dict["bounding_box"]["y_max"],
md_dict["bounding_box"]["z_min"],
md_dict["bounding_box"]["z_max"],
]
dims = [
md_dict["array_dimensions"]["x_dimension"],
md_dict["array_dimensions"]["y_dimension"],
md_dict["array_dimensions"]["z_dimension"],
]
# Voxel resolution calculation
resolution_list = []
for index in np.arange(len(dims)):
if dims[index] > 1:
resolution_list.append((bbox[(2 * index + 1)] - bbox[(2 * index)]) / (dims[index] - 1))
else:
resolution_list.append(0)
# isotropy determination (isotropic res, or anisotropic res)
if (
resolution_list[1] / resolution_list[0] > 0.99
and resolution_list[2] / resolution_list[0] > 0.99
and resolution_list[1] / resolution_list[0] < 1.01
and resolution_list[2] / resolution_list[0] < 1.01
):
md_dict["resolution"] = {"zyx_value": resolution_list[0], "type": "isotropic"}
else:
md_dict["resolution"] = {
"zyx_value": (resolution_list[2], resolution_list[1], resolution_list[0]),
"type": "anisotropic",
}
elif "Units" in hl:
try:
units = str(hl[hl.index("Units") + 2])
md_dict["units"] = units
except Exception:
logging.debug(
"Units value undefined in source data set. " "Reverting to default units value of pixels"
)
md_dict["units"] = "pixels"
elif "Coordinates" in hl:
coords = str(hl[hl.index("Coordinates") + 1])
md_dict["coordinates"] = coords
return md_dict
def load_amiramesh(file_path):
"""
Load and convert an AmiraMesh binary file to a numpy array.
Parameters
----------
file_path : str
The path and file name of the AmiraMesh file to be loaded.
Returns
-------
md_dict : dict
Dictionary containing all pertinent header information associated with
the data set.
np_array : ndarray
An ndarray containing the image data set to be loaded. Values contained
in the resulting volume are set to be of float data type by default.
"""
header, data = _read_amira(file_path)
clean_header = _clean_amira_header(header)
md_dict = _create_md_dict(clean_header)
np_array = _amira_data_to_numpy(data, md_dict)
return md_dict, np_array | 0.894562 | 0.55254 |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
def gsas_reader(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
if os.path.splitext(file)[1] != ".gsas":
raise IOError("Provide a file with diffraction data saved in GSAS," " file extension has to be .gsas ")
# find the file mode, could be 'std', 'esd', 'fxye'
with open(file, "r") as fi:
S = fi.readlines()[1]
mode = S.split()[9]
try:
tth, intensity, err = _func_look_up[mode](file)
except KeyError:
raise ValueError(
"Provide a correct mode of the GSAS file, " "file modes could be in 'STD', 'ESD', 'FXYE' "
)
return tth, intensity, err
def _get_fxye_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[2:]
for line in S:
vals = line.split()
tth.append(float(vals[0]))
f = float(vals[1])
s = float(vals[2])
if f <= 0.0:
intensity.append(0.0)
else:
intensity.append(float(vals[1]))
if s > 0.0:
err.append(1.0 / float(vals[2]) ** 2)
else:
err.append(0.0)
return [np.array(tth), np.array(intensity), np.array(err)]
def _get_esd_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[1:]
# convert from centidegrees to degrees
start = float(S[0].split()[5]) / 100.0
step = float(S[0].split()[6]) / 100.0
j = 0
for line in S[1:]:
for i in range(0, 80, 16):
xi = start + step * j
yi = _sfloat(line[i : i + 8])
ei = _sfloat(line[i + 8 : i + 16])
tth.append(xi)
if yi > 0.0:
intensity.append(yi)
else:
intensity.append(0.0)
if ei > 0.0:
err.append(1.0 / ei**2)
else:
err.append(0.0)
j += 1
return [np.array(tth), np.array(intensity), np.array(err)]
def _get_std_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[1:]
# convert from centidegrees to degrees
start = float(S[0].split()[5]) / 100.0
step = float(S[0].split()[6]) / 100.0
# number of data values(two theta or intensity)
nch = float(S[0].split()[2])
j = 0
for line in S[1:]:
for i in range(0, 80, 8):
xi = start + step * j
ni = max(_sint(line[i : i + 2]), 1)
yi = max(_sfloat(line[i + 2 : i + 8]), 0.0)
if yi:
vi = yi / ni
else:
yi = 0.0
vi = 0.0
if j < nch:
tth.append(xi)
if vi <= 0.0:
intensity.append(0.0)
err.append(0.0)
else:
intensity.append(yi)
err.append(1.0 / vi)
j += 1
return [np.array(tth), np.array(intensity), np.array(err)]
# find the which function to use according to mode of the GSAS file
# mode could be "STD", "ESD" or "FXYE"
_func_look_up = {"STD": _get_std_data, "ESD": _get_esd_data, "FXYE": _get_fxye_data}
def _sfloat(S):
"""
convert a string to a float, treating an all-blank string as zero
Parameter
---------
S : str
string that need to be converted as float treating an
all-blank string as zero
Returns
-------
float or zero
"""
if S.strip():
return float(S)
else:
return 0.0
def _sint(S):
"""
convert a string to an integer, treating an all-blank string as zero
Parameter
---------
S : str
string that need to be converted as integer treating an all-blank
strings as zero
Returns
-------
integer or zero
"""
if S.strip():
return int(S)
else:
return 0 | scikit-beam | /scikit-beam-0.0.26.tar.gz/scikit-beam-0.0.26/skbeam/io/gsas_file_reader.py | gsas_file_reader.py | from __future__ import absolute_import, division, print_function
import os
import numpy as np
def gsas_reader(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
if os.path.splitext(file)[1] != ".gsas":
raise IOError("Provide a file with diffraction data saved in GSAS," " file extension has to be .gsas ")
# find the file mode, could be 'std', 'esd', 'fxye'
with open(file, "r") as fi:
S = fi.readlines()[1]
mode = S.split()[9]
try:
tth, intensity, err = _func_look_up[mode](file)
except KeyError:
raise ValueError(
"Provide a correct mode of the GSAS file, " "file modes could be in 'STD', 'ESD', 'FXYE' "
)
return tth, intensity, err
def _get_fxye_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[2:]
for line in S:
vals = line.split()
tth.append(float(vals[0]))
f = float(vals[1])
s = float(vals[2])
if f <= 0.0:
intensity.append(0.0)
else:
intensity.append(float(vals[1]))
if s > 0.0:
err.append(1.0 / float(vals[2]) ** 2)
else:
err.append(0.0)
return [np.array(tth), np.array(intensity), np.array(err)]
def _get_esd_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[1:]
# convert from centidegrees to degrees
start = float(S[0].split()[5]) / 100.0
step = float(S[0].split()[6]) / 100.0
j = 0
for line in S[1:]:
for i in range(0, 80, 16):
xi = start + step * j
yi = _sfloat(line[i : i + 8])
ei = _sfloat(line[i + 8 : i + 16])
tth.append(xi)
if yi > 0.0:
intensity.append(yi)
else:
intensity.append(0.0)
if ei > 0.0:
err.append(1.0 / ei**2)
else:
err.append(0.0)
j += 1
return [np.array(tth), np.array(intensity), np.array(err)]
def _get_std_data(file):
"""
Parameters
----------
file: str
GSAS powder data file
Returns
-------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
err : ndarray
error value of intensity shape(N, ) array
"""
tth = []
intensity = []
err = []
with open(file, "r") as fi:
S = fi.readlines()[1:]
# convert from centidegrees to degrees
start = float(S[0].split()[5]) / 100.0
step = float(S[0].split()[6]) / 100.0
# number of data values(two theta or intensity)
nch = float(S[0].split()[2])
j = 0
for line in S[1:]:
for i in range(0, 80, 8):
xi = start + step * j
ni = max(_sint(line[i : i + 2]), 1)
yi = max(_sfloat(line[i + 2 : i + 8]), 0.0)
if yi:
vi = yi / ni
else:
yi = 0.0
vi = 0.0
if j < nch:
tth.append(xi)
if vi <= 0.0:
intensity.append(0.0)
err.append(0.0)
else:
intensity.append(yi)
err.append(1.0 / vi)
j += 1
return [np.array(tth), np.array(intensity), np.array(err)]
# find the which function to use according to mode of the GSAS file
# mode could be "STD", "ESD" or "FXYE"
_func_look_up = {"STD": _get_std_data, "ESD": _get_esd_data, "FXYE": _get_fxye_data}
def _sfloat(S):
"""
convert a string to a float, treating an all-blank string as zero
Parameter
---------
S : str
string that need to be converted as float treating an
all-blank string as zero
Returns
-------
float or zero
"""
if S.strip():
return float(S)
else:
return 0.0
def _sint(S):
"""
convert a string to an integer, treating an all-blank string as zero
Parameter
---------
S : str
string that need to be converted as integer treating an all-blank
strings as zero
Returns
-------
integer or zero
"""
if S.strip():
return int(S)
else:
return 0 | 0.822973 | 0.488649 |
from skbio.sequence import DNA, RNA, Protein
from skbio.alignment._tabular_msa import TabularMSA
import parasail
class SubstitutionMatrix(object):
""" Wrapper around a built-in Parasail substitution matrix.
"""
def __init__(self, parasail_matrix):
self._matrix = parasail_matrix
@classmethod
def from_name(cls, name):
matrix = getattr(parasail, name)
return cls(matrix)
@classmethod
def from_match_mismatch(cls, match, mismatch, alphabet='ACGTU'):
matrix = parasail.matrix_create(alphabet, match, mismatch)
return cls(matrix)
@classmethod
def from_dict(cls, d):
alphabet = str(d.keys())
matrix = parasail.matrix_create(alphabet, 1, -1)
for i, x in enumerate(alphabet):
for j, y in enumerate(alphabet):
value = d.get(x, {}).get(y)
if value is not None:
matrix.set_value(i, j, value)
return cls(matrix)
class Aligner(object):
def __init__(self,
gap_open, gap_extend,
match_mismatch=None, matrix=None,
method=None):
self.align_method = _init_parasail_method(method)
self.matrix = _init_substitution_matrix(match_mismatch, matrix)
self.gap_open = gap_open
self.gap_extend = gap_extend
def align(self, s1, s2):
s1_str = str(s1)
s2_str = str(s2)
matrix = self.matrix._matrix
result = self.align_method(
s1_str, s2_str, self.gap_open, self.gap_extend, matrix
)
cigar = result.cigar
aligned1, aligned2 = _expand_aligned(cigar, s1_str, s2_str)
msa = TabularMSA([_wrap_aligned(s1, aligned1),
_wrap_aligned(s2, aligned2)])
score = result.score
start_end_positions = [(cigar.beg_query, result.end_query),
(cigar.beg_ref, result.end_ref)]
return msa, score, start_end_positions
# Local alignment functions
def local_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
# TODO: allow specifying subst. matrix as dict
_check_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return local_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein,))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='sw'
)
return aln.align(seq1, seq2)
# Global alignment functions
def global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(DNA, RNA, TabularMSA))
_check_nucleotide_seq_types(seq1, seq2, types=(DNA, RNA))
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return global_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein, TabularMSA))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='nw',
)
return aln.align(seq1, seq2)
# Semiglobal alignment functions
def semiglobal_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
# TODO: allow specifying subst. matrix as dict
_check_seq_types(seq1, seq2, types=(DNA, RNA, TabularMSA))
_check_nucleotide_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return semiglobal_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def semiglobal_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein, TabularMSA))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return semiglobal_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def semiglobal_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='sg'
)
return aln.align(seq1, seq2)
# Internal helpers
def _expand_aligned(cigar, seq1, seq2):
""" Expand a parasail cigar sequence into two aligned sequences.
"""
aligned1 = []
aligned2 = []
pos1 = cigar.beg_query
pos2 = cigar.beg_ref
for s in cigar.seq:
op = parasail.Cigar.decode_op(s)
ln = parasail.Cigar.decode_len(s)
for j in range(0, ln):
if op == b'=' or op == b'X':
c1 = seq1[pos1]
c2 = seq2[pos2]
pos1 += 1
pos2 += 1
elif op == b'I':
c1 = seq1[pos1]
c2 = '-'
pos1 += 1
elif op == b'D':
c1 = '-'
c2 = seq2[pos2]
pos2 += 1
else:
msg = "Invalid character in cigar string: {!r}".format(op)
raise ValueError(msg)
aligned1.append(c1)
aligned2.append(c2)
return "".join(aligned1), "".join(aligned2)
def _wrap_aligned(original, aligned):
""" Wrap aligned string so that it has the same type as the original
sequence.
"""
constructor = type(original)
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned = constructor(aligned, metadata=metadata, validate=False)
return aligned
def _check_seq_types(*seqs, types=(DNA, RNA)):
""" Check type of sequences to align.
Raises
------
TypeError
"""
if len(seqs) == 0:
return
seq_types = set(type(seq) for seq in seqs)
if len(seq_types) > 1:
msg = "sequences must be the same type, but got {}"
raise TypeError(msg.format(
", ".join(typ.__name__ for typ in seq_types)
))
seq_type = next(iter(seq_types))
if not issubclass(seq_type, types):
msg = "sequences must be one of the following: {}, but got type {!r}"
raise TypeError(
msg.format(
", ".join(typ.__name__ for typ in types),
seq_type.__name__
)
)
def _check_protein_seq_types(*seqs):
if len(seqs) == 0:
return
for seq in seqs:
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__
)
def _check_nucleotide_seq_types(*seqs, types=(DNA, RNA)):
if len(seqs) == 0:
return
for seq in seqs:
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, types):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__
)
def _init_substitution_matrix(match_mismatch_score=None, matrix=None):
if matrix is not None:
if isinstance(matrix, dict):
matrix = SubstitutionMatrix.from_dict(
matrix
)
elif isinstance(matrix, str):
matrix = SubstitutionMatrix.from_name(matrix)
elif match_mismatch_score is not None:
matrix = SubstitutionMatrix.from_match_mismatch(
*match_mismatch_score
)
else:
raise ValueError("Supply either a match/mismatch score, "
"a name of a substitution matrix (e.g. "
"'blosum50'), or a substitution matrix "
"instance")
return matrix
def _init_parasail_method(method):
if isinstance(method, str):
try:
method_name = {
'nw': 'nw_trace',
'sw': 'sw_trace',
'sg': 'sg_trace',
}[method]
except KeyError:
raise ValueError("No such alignment method: {!r}".format(method))
else:
method = getattr(parasail, method_name)
return method | scikit-bio-parasail | /scikit-bio-parasail-0.0.4.tar.gz/scikit-bio-parasail-0.0.4/skbio_parasail/__init__.py | __init__.py | from skbio.sequence import DNA, RNA, Protein
from skbio.alignment._tabular_msa import TabularMSA
import parasail
class SubstitutionMatrix(object):
""" Wrapper around a built-in Parasail substitution matrix.
"""
def __init__(self, parasail_matrix):
self._matrix = parasail_matrix
@classmethod
def from_name(cls, name):
matrix = getattr(parasail, name)
return cls(matrix)
@classmethod
def from_match_mismatch(cls, match, mismatch, alphabet='ACGTU'):
matrix = parasail.matrix_create(alphabet, match, mismatch)
return cls(matrix)
@classmethod
def from_dict(cls, d):
alphabet = str(d.keys())
matrix = parasail.matrix_create(alphabet, 1, -1)
for i, x in enumerate(alphabet):
for j, y in enumerate(alphabet):
value = d.get(x, {}).get(y)
if value is not None:
matrix.set_value(i, j, value)
return cls(matrix)
class Aligner(object):
def __init__(self,
gap_open, gap_extend,
match_mismatch=None, matrix=None,
method=None):
self.align_method = _init_parasail_method(method)
self.matrix = _init_substitution_matrix(match_mismatch, matrix)
self.gap_open = gap_open
self.gap_extend = gap_extend
def align(self, s1, s2):
s1_str = str(s1)
s2_str = str(s2)
matrix = self.matrix._matrix
result = self.align_method(
s1_str, s2_str, self.gap_open, self.gap_extend, matrix
)
cigar = result.cigar
aligned1, aligned2 = _expand_aligned(cigar, s1_str, s2_str)
msa = TabularMSA([_wrap_aligned(s1, aligned1),
_wrap_aligned(s2, aligned2)])
score = result.score
start_end_positions = [(cigar.beg_query, result.end_query),
(cigar.beg_ref, result.end_ref)]
return msa, score, start_end_positions
# Local alignment functions
def local_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
# TODO: allow specifying subst. matrix as dict
_check_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return local_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein,))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='sw'
)
return aln.align(seq1, seq2)
# Global alignment functions
def global_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(DNA, RNA, TabularMSA))
_check_nucleotide_seq_types(seq1, seq2, types=(DNA, RNA))
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return global_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein, TabularMSA))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='nw',
)
return aln.align(seq1, seq2)
# Semiglobal alignment functions
def semiglobal_pairwise_align_nucleotide(
seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
# TODO: allow specifying subst. matrix as dict
_check_seq_types(seq1, seq2, types=(DNA, RNA, TabularMSA))
_check_nucleotide_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_match_mismatch(
match_score, mismatch_score
)
return semiglobal_pairwise_align(
seq1, seq2, gap_open_penalty, gap_extend_penalty, substitution_matrix
)
def semiglobal_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
_check_seq_types(seq1, seq2, types=(Protein, TabularMSA))
_check_protein_seq_types(seq1, seq2)
if substitution_matrix is None:
substitution_matrix = SubstitutionMatrix.from_name("blosum50")
return semiglobal_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
def semiglobal_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
aln = Aligner(
gap_open_penalty, gap_extend_penalty, matrix=substitution_matrix,
method='sg'
)
return aln.align(seq1, seq2)
# Internal helpers
def _expand_aligned(cigar, seq1, seq2):
""" Expand a parasail cigar sequence into two aligned sequences.
"""
aligned1 = []
aligned2 = []
pos1 = cigar.beg_query
pos2 = cigar.beg_ref
for s in cigar.seq:
op = parasail.Cigar.decode_op(s)
ln = parasail.Cigar.decode_len(s)
for j in range(0, ln):
if op == b'=' or op == b'X':
c1 = seq1[pos1]
c2 = seq2[pos2]
pos1 += 1
pos2 += 1
elif op == b'I':
c1 = seq1[pos1]
c2 = '-'
pos1 += 1
elif op == b'D':
c1 = '-'
c2 = seq2[pos2]
pos2 += 1
else:
msg = "Invalid character in cigar string: {!r}".format(op)
raise ValueError(msg)
aligned1.append(c1)
aligned2.append(c2)
return "".join(aligned1), "".join(aligned2)
def _wrap_aligned(original, aligned):
""" Wrap aligned string so that it has the same type as the original
sequence.
"""
constructor = type(original)
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned = constructor(aligned, metadata=metadata, validate=False)
return aligned
def _check_seq_types(*seqs, types=(DNA, RNA)):
""" Check type of sequences to align.
Raises
------
TypeError
"""
if len(seqs) == 0:
return
seq_types = set(type(seq) for seq in seqs)
if len(seq_types) > 1:
msg = "sequences must be the same type, but got {}"
raise TypeError(msg.format(
", ".join(typ.__name__ for typ in seq_types)
))
seq_type = next(iter(seq_types))
if not issubclass(seq_type, types):
msg = "sequences must be one of the following: {}, but got type {!r}"
raise TypeError(
msg.format(
", ".join(typ.__name__ for typ in types),
seq_type.__name__
)
)
def _check_protein_seq_types(*seqs):
if len(seqs) == 0:
return
for seq in seqs:
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__
)
def _check_nucleotide_seq_types(*seqs, types=(DNA, RNA)):
if len(seqs) == 0:
return
for seq in seqs:
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, types):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__
)
def _init_substitution_matrix(match_mismatch_score=None, matrix=None):
if matrix is not None:
if isinstance(matrix, dict):
matrix = SubstitutionMatrix.from_dict(
matrix
)
elif isinstance(matrix, str):
matrix = SubstitutionMatrix.from_name(matrix)
elif match_mismatch_score is not None:
matrix = SubstitutionMatrix.from_match_mismatch(
*match_mismatch_score
)
else:
raise ValueError("Supply either a match/mismatch score, "
"a name of a substitution matrix (e.g. "
"'blosum50'), or a substitution matrix "
"instance")
return matrix
def _init_parasail_method(method):
if isinstance(method, str):
try:
method_name = {
'nw': 'nw_trace',
'sw': 'sw_trace',
'sg': 'sg_trace',
}[method]
except KeyError:
raise ValueError("No such alignment method: {!r}".format(method))
else:
method = getattr(parasail, method_name)
return method | 0.661267 | 0.422862 |
**Important project update (April 2022):** scikit-bio is currently in maintenance mode. Due to limited developer bandwidth, we are focusing on keeping scikit-bio up-to-date with Python and Python scientific computing libraries. We plan to do this through two annual releases of scikit-bio. At this time, we have less availability for reviewing or adding new features. We realize that scikit-bio is an important tool for the bioinformatics community, and we hope to transition back to more active development in the future. If you're interested in helping by taking a leadership role in the project, please reach out.
.. image:: http://scikit-bio.org/assets/logo.svg
:target: http://scikit-bio.org
:alt: scikit-bio logo
|Build Status| |Coverage Status| |ASV Benchmarks| |Gitter Badge| |Depsy Badge| |Anaconda Build Platforms| |Anaconda Build Version| |License| |Downloads| |Install|
scikit-bio is an open-source, BSD-licensed Python 3 package providing data structures, algorithms and educational resources for bioinformatics.
To view scikit-bio's documentation, visit `scikit-bio.org
<http://scikit-bio.org>`__.
**Note:** scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.8 and later.
scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/blob/master/doc/source/user/api_stability.rst>`_ for more details, including what we mean by *stable* and *experimental* in this context.
Installing
----------
The recommended way to install scikit-bio is via the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_.
To install the latest release of scikit-bio::
conda install -c conda-forge scikit-bio
Alternatively, you can install scikit-bio using ``pip``::
pip install scikit-bio
You can verify your installation by running the scikit-bio unit tests::
python -m skbio.test
For users of Debian, ``skbio`` is in the Debian software distribution and may
be installed using::
sudo apt-get install python3-skbio python-skbio-doc
Getting help
------------
To get help with scikit-bio, you should use the `skbio <http://stackoverflow.com/questions/tagged/skbio>`_ tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to `ask a question <http://stackoverflow.com/questions/how-to-ask>`_. The scikit-bio developers regularly monitor the ``skbio`` SO tag.
Projects using scikit-bio
-------------------------
Some of the projects that we know of that are using scikit-bio are:
- `QIIME 2 <http://qiime2.org/>`__
- `Emperor <http://biocore.github.io/emperor/>`__
- `An Introduction to Applied
Bioinformatics <http://readIAB.org>`__
- `tax2tree <https://github.com/biocore/tax2tree>`__
- `Qiita <http://qiita.microbio.me>`__
- `ghost-tree <https://github.com/JTFouquier/ghost-tree>`__
- `Platypus-Conquistador <https://github.com/biocore/Platypus-Conquistador>`__
If you're using scikit-bio in your own projects, feel free to issue a pull request to add them to this list.
scikit-bio development
----------------------
If you're interested in getting involved in scikit-bio development, see `CONTRIBUTING.md <https://github.com/biocore/scikit-bio/blob/master/.github/CONTRIBUTING.md>`__.
See the list of `scikit-bio's contributors
<https://github.com/biocore/scikit-bio/graphs/contributors>`__.
Licensing
---------
scikit-bio is available under the new BSD license. See
`LICENSE.txt <https://github.com/biocore/scikit-bio/blob/master/LICENSE.txt>`__ for scikit-bio's license, and the
`licenses directory <https://github.com/biocore/scikit-bio/tree/master/licenses>`_ for the licenses of third-party software that is
(either partially or entirely) distributed with scikit-bio.
The pre-history of scikit-bio
-----------------------------
scikit-bio began from code derived from `PyCogent
<http://www.pycogent.org>`__ and `QIIME <http://www.qiime.org>`__, and
the contributors and/or copyright holders have agreed to make the code
they wrote for PyCogent and/or QIIME available under the BSD
license. The contributors to PyCogent and/or QIIME modules that have
been ported to scikit-bio are: Rob Knight (`@rob-knight
<https://github.com/rob-knight>`__), Gavin Huttley (`@gavinhuttley
<https://github.com/gavinhuttley>`__), Daniel McDonald (`@wasade
<https://github.com/wasade>`__), Micah Hamady, Antonio Gonzalez
(`@antgonza <https://github.com/antgonza>`__), Sandra Smit, Greg
Caporaso (`@gregcaporaso <https://github.com/gregcaporaso>`__), Jai
Ram Rideout (`@jairideout <https://github.com/jairideout>`__),
Cathy Lozupone (`@clozupone <https://github.com/clozupone>`__), Mike Robeson
(`@mikerobeson <https://github.com/mikerobeson>`__), Marcin Cieslik,
Peter Maxwell, Jeremy Widmann, Zongzhi Liu, Michael Dwan, Logan Knecht
(`@loganknecht <https://github.com/loganknecht>`__), Andrew Cochran,
Jose Carlos Clemente (`@cleme <https://github.com/cleme>`__), Damien
Coy, Levi McCracken, Andrew Butterfield, Will Van Treuren (`@wdwvt1
<https://github.com/wdwvt1>`__), Justin Kuczynski (`@justin212k
<https://github.com/justin212k>`__), Jose Antonio Navas Molina
(`@josenavas <https://github.com/josenavas>`__), Matthew Wakefield
(`@genomematt <https://github.com/genomematt>`__) and Jens Reeder
(`@jensreeder <https://github.com/jensreeder>`__).
Logo
----
scikit-bio's logo was created by `Alina Prassas <http://cargocollective.com/alinaprassas>`_.
.. |Build Status| image:: https://travis-ci.org/biocore/scikit-bio.svg?branch=master
:target: https://travis-ci.org/biocore/scikit-bio
.. |Coverage Status| image:: https://coveralls.io/repos/biocore/scikit-bio/badge.png
:target: https://coveralls.io/r/biocore/scikit-bio
.. |ASV Benchmarks| image:: http://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat
:target: https://s3-us-west-2.amazonaws.com/scikit-bio.org/benchmarks/master/index.html
.. |Gitter Badge| image:: https://badges.gitter.im/Join%20Chat.svg
:alt: Join the chat at https://gitter.im/biocore/scikit-bio
:target: https://gitter.im/biocore/scikit-bio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
.. |Depsy Badge| image:: http://depsy.org/api/package/pypi/scikit-bio/badge.svg
:target: http://depsy.org/package/python/scikit-bio
.. |Anaconda Build Platforms| image:: https://anaconda.org/conda-forge/scikit-bio/badges/platforms.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Anaconda Build Version| image:: https://anaconda.org/conda-forge/scikit-bio/badges/version.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |License| image:: https://anaconda.org/conda-forge/scikit-bio/badges/license.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Downloads| image:: https://anaconda.org/conda-forge/scikit-bio/badges/downloads.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Install| image:: https://anaconda.org/conda-forge/scikit-bio/badges/installer/conda.svg
:target: https://conda.anaconda.org/conda-forge
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/README.rst | README.rst | **Important project update (April 2022):** scikit-bio is currently in maintenance mode. Due to limited developer bandwidth, we are focusing on keeping scikit-bio up-to-date with Python and Python scientific computing libraries. We plan to do this through two annual releases of scikit-bio. At this time, we have less availability for reviewing or adding new features. We realize that scikit-bio is an important tool for the bioinformatics community, and we hope to transition back to more active development in the future. If you're interested in helping by taking a leadership role in the project, please reach out.
.. image:: http://scikit-bio.org/assets/logo.svg
:target: http://scikit-bio.org
:alt: scikit-bio logo
|Build Status| |Coverage Status| |ASV Benchmarks| |Gitter Badge| |Depsy Badge| |Anaconda Build Platforms| |Anaconda Build Version| |License| |Downloads| |Install|
scikit-bio is an open-source, BSD-licensed Python 3 package providing data structures, algorithms and educational resources for bioinformatics.
To view scikit-bio's documentation, visit `scikit-bio.org
<http://scikit-bio.org>`__.
**Note:** scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.8 and later.
scikit-bio is currently in beta. We are very actively developing it, and **backward-incompatible interface changes can and will arise**. To avoid these types of changes being a surprise to our users, our public APIs are decorated to make it clear to users when an API can be relied upon (stable) and when it may be subject to change (experimental). See the `API stability docs <https://github.com/biocore/scikit-bio/blob/master/doc/source/user/api_stability.rst>`_ for more details, including what we mean by *stable* and *experimental* in this context.
Installing
----------
The recommended way to install scikit-bio is via the ``conda`` package manager available in `Anaconda <http://continuum.io/downloads>`_ or `miniconda <http://conda.pydata.org/miniconda.html>`_.
To install the latest release of scikit-bio::
conda install -c conda-forge scikit-bio
Alternatively, you can install scikit-bio using ``pip``::
pip install scikit-bio
You can verify your installation by running the scikit-bio unit tests::
python -m skbio.test
For users of Debian, ``skbio`` is in the Debian software distribution and may
be installed using::
sudo apt-get install python3-skbio python-skbio-doc
Getting help
------------
To get help with scikit-bio, you should use the `skbio <http://stackoverflow.com/questions/tagged/skbio>`_ tag on StackOverflow (SO). Before posting a question, check out SO's guide on how to `ask a question <http://stackoverflow.com/questions/how-to-ask>`_. The scikit-bio developers regularly monitor the ``skbio`` SO tag.
Projects using scikit-bio
-------------------------
Some of the projects that we know of that are using scikit-bio are:
- `QIIME 2 <http://qiime2.org/>`__
- `Emperor <http://biocore.github.io/emperor/>`__
- `An Introduction to Applied
Bioinformatics <http://readIAB.org>`__
- `tax2tree <https://github.com/biocore/tax2tree>`__
- `Qiita <http://qiita.microbio.me>`__
- `ghost-tree <https://github.com/JTFouquier/ghost-tree>`__
- `Platypus-Conquistador <https://github.com/biocore/Platypus-Conquistador>`__
If you're using scikit-bio in your own projects, feel free to issue a pull request to add them to this list.
scikit-bio development
----------------------
If you're interested in getting involved in scikit-bio development, see `CONTRIBUTING.md <https://github.com/biocore/scikit-bio/blob/master/.github/CONTRIBUTING.md>`__.
See the list of `scikit-bio's contributors
<https://github.com/biocore/scikit-bio/graphs/contributors>`__.
Licensing
---------
scikit-bio is available under the new BSD license. See
`LICENSE.txt <https://github.com/biocore/scikit-bio/blob/master/LICENSE.txt>`__ for scikit-bio's license, and the
`licenses directory <https://github.com/biocore/scikit-bio/tree/master/licenses>`_ for the licenses of third-party software that is
(either partially or entirely) distributed with scikit-bio.
The pre-history of scikit-bio
-----------------------------
scikit-bio began from code derived from `PyCogent
<http://www.pycogent.org>`__ and `QIIME <http://www.qiime.org>`__, and
the contributors and/or copyright holders have agreed to make the code
they wrote for PyCogent and/or QIIME available under the BSD
license. The contributors to PyCogent and/or QIIME modules that have
been ported to scikit-bio are: Rob Knight (`@rob-knight
<https://github.com/rob-knight>`__), Gavin Huttley (`@gavinhuttley
<https://github.com/gavinhuttley>`__), Daniel McDonald (`@wasade
<https://github.com/wasade>`__), Micah Hamady, Antonio Gonzalez
(`@antgonza <https://github.com/antgonza>`__), Sandra Smit, Greg
Caporaso (`@gregcaporaso <https://github.com/gregcaporaso>`__), Jai
Ram Rideout (`@jairideout <https://github.com/jairideout>`__),
Cathy Lozupone (`@clozupone <https://github.com/clozupone>`__), Mike Robeson
(`@mikerobeson <https://github.com/mikerobeson>`__), Marcin Cieslik,
Peter Maxwell, Jeremy Widmann, Zongzhi Liu, Michael Dwan, Logan Knecht
(`@loganknecht <https://github.com/loganknecht>`__), Andrew Cochran,
Jose Carlos Clemente (`@cleme <https://github.com/cleme>`__), Damien
Coy, Levi McCracken, Andrew Butterfield, Will Van Treuren (`@wdwvt1
<https://github.com/wdwvt1>`__), Justin Kuczynski (`@justin212k
<https://github.com/justin212k>`__), Jose Antonio Navas Molina
(`@josenavas <https://github.com/josenavas>`__), Matthew Wakefield
(`@genomematt <https://github.com/genomematt>`__) and Jens Reeder
(`@jensreeder <https://github.com/jensreeder>`__).
Logo
----
scikit-bio's logo was created by `Alina Prassas <http://cargocollective.com/alinaprassas>`_.
.. |Build Status| image:: https://travis-ci.org/biocore/scikit-bio.svg?branch=master
:target: https://travis-ci.org/biocore/scikit-bio
.. |Coverage Status| image:: https://coveralls.io/repos/biocore/scikit-bio/badge.png
:target: https://coveralls.io/r/biocore/scikit-bio
.. |ASV Benchmarks| image:: http://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat
:target: https://s3-us-west-2.amazonaws.com/scikit-bio.org/benchmarks/master/index.html
.. |Gitter Badge| image:: https://badges.gitter.im/Join%20Chat.svg
:alt: Join the chat at https://gitter.im/biocore/scikit-bio
:target: https://gitter.im/biocore/scikit-bio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
.. |Depsy Badge| image:: http://depsy.org/api/package/pypi/scikit-bio/badge.svg
:target: http://depsy.org/package/python/scikit-bio
.. |Anaconda Build Platforms| image:: https://anaconda.org/conda-forge/scikit-bio/badges/platforms.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Anaconda Build Version| image:: https://anaconda.org/conda-forge/scikit-bio/badges/version.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |License| image:: https://anaconda.org/conda-forge/scikit-bio/badges/license.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Downloads| image:: https://anaconda.org/conda-forge/scikit-bio/badges/downloads.svg
:target: https://anaconda.org/conda-forge/scikit-bio
.. |Install| image:: https://anaconda.org/conda-forge/scikit-bio/badges/installer/conda.svg
:target: https://conda.anaconda.org/conda-forge
| 0.762778 | 0.654177 |
# scikit-bio changelog
## Version 0.5.9
### Features
* Adding Variance log ratio estimators in `skbio.stats.composition.vlr` and `skbio.stats.composition.pairwise_vlr` ([#1803](https://github.com/biocore/scikit-bio/pull/1803))
* Added `skbio.stats.composition.tree_basis` to construct ILR bases from `TreeNode` objects. ([#1862](https://github.com/biocore/scikit-bio/pull/1862))
* `IntervalMetadata.query` now defaults to obtaining all results, see ([#1817](https://github.com/biocore/scikit-bio/issues/1817).
### Backward-incompatible changes [experimental]
* With the introduction of the `tree_basis` object, the ILR bases are now represented in log-odds coordinates rather than in probabilities to minimize issues with numerical stability. Furthermore, the `ilr` and `ilr_inv` functions now takes the `basis` input parameter in terms of log-odds coordinates. This affects the `skbio.stats.composition.sbp_basis` as well. ([#1862](https://github.com/biocore/scikit-bio/pull/1862))
### Important
* Complex multiple axis indexing operations with `TabularMSA` have been removed from testing due to incompatibilities with modern versions of Pandas. ([#1851](https://github.com/biocore/scikit-bio/pull/1851))
* Pinning `scipy <= 1.10.1` ([#1851](https://github.com/biocore/scikit-bio/pull/1867))
### Bug fixes
* Fixed a bug that caused build failure on the ARM64 microarchitecture due to floating-point number handling. ([#1859](https://github.com/biocore/scikit-bio/pull/1859))
* Never let the Gini index go below 0.0, see [#1844](https://github.com/biocore/scikit-bio/issue/1844).
* Fixed bug [#1847](https://github.com/biocore/scikit-bio/issues/1847) in which the edge from the root was inadvertantly included in the calculation for `descending_branch_length`
### Miscellaneous
* Replaced dependencies `CacheControl` and `lockfile` with `requests` to avoid a dependency inconsistency issue of the former. (See [#1863](https://github.com/biocore/scikit-bio/pull/1863), merged in [#1859](https://github.com/biocore/scikit-bio/pull/1859))
* Updated installation instructions for developers in `CONTRIBUTING.md` ([#1860](https://github.com/biocore/scikit-bio/pull/1860))
## Version 0.5.8
### Features
* Added NCBI taxonomy database dump format (`taxdump`) ([#1810](https://github.com/biocore/scikit-bio/pull/1810)).
* Added `TreeNode.from_taxdump` for converting taxdump into a tree ([#1810](https://github.com/biocore/scikit-bio/pull/1810)).
* scikit-learn has been removed as a dependency. This was a fairly heavy-weight dependency that was providing minor functionality to scikit-bio. The critical components have been implemented in scikit-bio directly, and the non-criticial components are listed under "Backward-incompatible changes [experimental]".
* Python 3.11 is now supported.
### Backward-incompatible changes [experimental]
* With the removal of the scikit-learn dependency, three beta diversity metric names can no longer be specified. These are `wminkowski`, `nan_euclidean`, and `haversine`. On testing, `wminkowski` and `haversine` did not work through `skbio.diversity.beta_diversity` (or `sklearn.metrics.pairwise_distances`). The former was deprecated in favor of calling `minkowski` with a vector of weights provided as kwarg `w` (example below), and the latter does not work with data of this shape. `nan_euclidean` can still be accessed fron scikit-learn directly if needed, if a user installs scikit-learn in their environment (example below).
```
counts = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0],
[88, 31, 0, 5, 5, 5, 5],
[44, 39, 0, 0, 0, 0, 0]]
# new mechanism of accessing wminkowski
from skbio.diversity import beta_diversity
beta_diversity("minkowski", counts, w=[1,1,1,1,1,1,2])
# accessing nan_euclidean through scikit-learn directly
import skbio
from sklearn.metrics import pairwise_distances
sklearn_dm = pairwise_distances(counts, metric="nan_euclidean")
skbio_dm = skbio.DistanceMatrix(sklearn_dm)
```
### Deprecated functionality [experimental]
* `skbio.alignment.local_pairwise_align_ssw` has been deprecated ([#1814](https://github.com/biocore/scikit-bio/issues/1814)) and will be removed or replaced in scikit-bio 0.6.0.
### Bug fixes
* Use `oldest-supported-numpy` as build dependency. This fixes problems with environments that use an older version of numpy than the one used to build scikit-bio ([#1813](https://github.com/biocore/scikit-bio/pull/1813)).
## Version 0.5.7
### Features
* Introduce support for Python 3.10 ([#1801](https://github.com/biocore/scikit-bio/pull/1801)).
* Tentative support for Apple M1 ([#1709](https://github.com/biocore/scikit-bio/pull/1709)).
* Added support for reading and writing a binary distance matrix object format. ([#1716](https://github.com/biocore/scikit-bio/pull/1716))
* Added support for `np.float32` with `DissimilarityMatrix` objects.
* Added support for method and number_of_dimensions to permdisp reducing the runtime by 100x at 4000 samples, [issue #1769](https://github.com/biocore/scikit-bio/pull/1769).
* OrdinationResults object is now accepted as input for permdisp.
### Performance enhancements
* Avoid an implicit data copy on construction of `DissimilarityMatrix` objects.
* Avoid validation on copy of `DissimilarityMatrix` and `DistanceMatrix` objects, see [PR #1747](https://github.com/biocore/scikit-bio/pull/1747)
* Use an optimized version of symmetry check in DistanceMatrix, see [PR #1747](https://github.com/biocore/scikit-bio/pull/1747)
* Avoid performing filtering when ids are identical, see [PR #1752](https://github.com/biocore/scikit-bio/pull/1752)
* center_distance_matrix has been re-implemented in cython for both speed and memory use. Indirectly speeds up pcoa [PR #1749](https://github.com/biocore/scikit-bio/pull/1749)
* Use a memory-optimized version of permute in DistanceMatrix, see [PR #1756](https://github.com/biocore/scikit-bio/pull/1756).
* Refactor pearson and spearman skbio.stats.distance.mantel implementations to drastically improve memory locality. Also cache intermediate results that are invariant across permutations, see [PR #1756](https://github.com/biocore/scikit-bio/pull/1756).
* Refactor permanova to remove intermediate buffers and cythonize the internals, see [PR #1768](https://github.com/biocore/scikit-bio/pull/1768).
### Bug fixes
* Fix windows and 32bit incompatibility in `unweighted_unifrac`.
### Miscellaneous
* Python 3.6 has been removed from our testing matrix.
* Specify build dependencies in pyproject.toml. This allows the package to be installed without having to first manually install numpy.
* Update hdmedians package to a version which doesn't require an initial manual numpy install.
* Now buildable on non-x86 platforms due to use of the [SIMD Everywhere](https://github.com/simd-everywhere/simde) library.
* Regenerate Cython wrapper by default to avoid incompatibilities with installed CPython.
* Update documentation for the `skbio.stats.composition.ancom` function. ([#1741](https://github.com/biocore/scikit-bio/pull/1741))
## Version 0.5.6
### Features
* Added option to return a capture group compiled regex pattern to any class inheriting ``GrammaredSequence`` through the ``to_regex`` method. ([#1431](https://github.com/biocore/scikit-bio/issues/1431))
* Added `Dissimilarity.within` and `.between` to obtain the respective distances and express them as a `DataFrame`. ([#1662](https://github.com/biocore/scikit-bio/pull/1662))
* Added Kendall Tau as possible correlation method in the `skbio.stats.distance.mantel` function ([#1675](https://github.com/biocore/scikit-bio/issues/1675)).
* Added support for IUPAC amino acid codes U (selenocysteine), O (pyrrolysine), and J (leucine or isoleucine). ([#1576](https://github.com/biocore/scikit-bio/issues/1576)
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
* Changed `skbio.tree.TreeNode.support` from a method to a property.
* Added `assign_supports` method to `skbio.tree.TreeNode` to extract branch support values from node labels.
* Modified the way a node's label is printed: `support:name` if both exist, or `support` or `name` if either exists.
### Performance enhancements
### Bug fixes
* Require `Sphinx <= 3.0`. Newer Sphinx versions caused build errors. [#1719](https://github.com/biocore/scikit-bio/pull/1719)
* * `skbio.stats.ordination` tests have been relaxed. ([#1713](https://github.com/biocore/scikit-bio/issues/1713))
* Fixes build errors for newer versions of NumPy, Pandas, and SciPy.
* Corrected a criticial bug in `skbio.alignment.StripedSmithWaterman`/`skbio.alignment.local_pairwise_align_ssw` which would cause the formatting of the aligned sequences to misplace gap characters by the number of gap characters present in the opposing aligned sequence up to that point. This was caused by a faulty implementation of CIGAR string parsing, see [#1679](https://github.com/biocore/scikit-bio/pull/1679) for full details.
* Fixes build errors for newer versions of NumPy, Pandas, and SciPy.
* Corrected a criticial bug in `skbio.alignment.StripedSmithWaterman`/`skbio.alignment.local_pairwise_align_ssw` which would cause the formatting of the aligned sequences to misplace gap characters by the number of gap characters present in the opposing aligned sequence up to that point. This was caused by a faulty implementation of CIGAR string parsing, see [#1679](https://github.com/biocore/scikit-bio/pull/1679) for full details.
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* `skbio.diversity.beta_diversity` now accepts a pandas DataFrame as input.
* Avoid pandas 1.0.0 import warning ([#1688](https://github.com/biocore/scikit-bio/issues/1688))
* Added support for Python 3.8 and dropped support for Python 3.5.
* This version now depends on `scipy >= 1.3` and `pandas >= 1.0`.
## Version 0.5.5 (2018-12-10)
### Features
* `skbio.stats.composition` now has methods to compute additive log-ratio transformation and inverse additive log-ratio transformation (`alr`, `alr_inv`) as well as a method to build a basis from a sequential binary partition (`sbp_basis`).
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
### Bug fixes
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* Python 3.6 and 3.7 compatibility is now supported
* A pytest runner is shipped with every installation ([#1633](https://github.com/biocore/scikit-bio/pull/1633))
* The nosetest framework has been replaced in favor of pytest ([#1624](https://github.com/biocore/scikit-bio/pull/1624))
* The numpy docs are deprecated in favor of [Napoleon](http://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html) ([#1629](https://github.com/biocore/scikit-bio/pull/1629))
* This version is now compatible with NumPy >= 1.9.2 and Pandas >= 0.23. ([#1627](https://github.com/biocore/scikit-bio/pull/1627))
## Version 0.5.4 (2018-08-23)
### Features
* Added `FSVD`, an alternative fast heuristic method to perform Principal Coordinates Analysis, to `skbio.stats.ordination.pcoa`.
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
* Added optimized utility methods `f_matrix_inplace` and `e_matrix_inplace` which perform `f_matrix` and `e_matrix` computations in-place and are used by the new `center_distance_matrix` method in `skbio.stats.ordination`.
### Bug fixes
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
## Version 0.5.3 (2018-08-07)
### Features
* Added `unpack` and `unpack_by_func` methods to `skbio.tree.TreeNode` to unpack one or multiple internal nodes. The `unpack` operation removes an internal node and regrafts its children to its parent while retaining the overall length. ([#1572](https://github.com/biocore/scikit-bio/pull/1572))
* Added `support` to `skbio.tree.TreeNode` to return the support value of a node.
* Added `permdisp` to `skbio.stats.distance` to test for the homogeniety of groups. ([#1228](https://github.com/biocore/scikit-bio/issues/1228)).
* Added `pcoa_biplot` to `skbio.stats.ordination` to project descriptors into a PCoA plot.
* Fixed pandas to 0.22.0 due to this: https://github.com/pandas-dev/pandas/issues/20527
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
### Bug fixes
* Relaxing type checking in diversity calculations. ([#1583](https://github.com/biocore/scikit-bio/issues/1583)).
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
## Version 0.5.2 (2018-04-18)
### Features
* Added ``skbio.io.format.embl`` for reading and writing EMBL files for ``DNA``, ``RNA`` and ``Sequence`` classes.
* Removing ValueError check in `skbio.stats._subsample.subsample_counts` when `replace=True` and `n` is greater than the number of items in counts. [#1527](https://github.com/biocore/scikit-bio/pull/1527)
* Added ``skbio.io.format.gff3`` for reading and writing GFF3 files for ``DNA``, ``Sequence``, and ``IntervalMetadata`` classes. ([#1450](https://github.com/biocore/scikit-bio/pull/1450))
* `skbio.metadata.IntervalMetadata` constructor has a new keyword argument, `copy_from`, for creating an `IntervalMetadata` object from an existing `IntervalMetadata` object with specified `upper_bound`.
* `skbio.metadata.IntervalMetadata` constructor allows `None` as a valid value for `upper_bound`. An `upper_bound` of `None` means that the `IntervalMetadata` object has no upper bound.
* `skbio.metadata.IntervalMetadata.drop` has a new boolean parameter `negate` to indicate whether to drop or keep the specified `Interval` objects.
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
* `skbio.tree.nj` wall-clock runtime was decreased by 99% for a 500x500 distance matrix and 93% for a 100x100 distance matrix. ([#1512](https://github.com/biocore/scikit-bio/pull/1512), [#1513](https://github.com/biocore/scikit-bio/pull/1513))
### Bug fixes
* The `include_self` parameter was not being honored in `skbio.TreeNode.tips`. The scope of this bug was that if `TreeNode.tips` was called on a tip, it would always result in an empty `list` when unrolled.
* In `skbio.stats.ordination.ca`, `proportion_explained` was missing in the returned `OrdinationResults` object. ([#1345](https://github.com/biocore/scikit-bio/issues/1345))
* `skbio.diversity.beta_diversity` now handles qualitative metrics as expected such that `beta_diversity('jaccard', mat) == beta_diversity('jaccard', mat > 0)`. Please see [#1549](https://github.com/biocore/scikit-bio/issues/1549) for further detail.
* `skbio.stats.ordination.rda` The occasional column mismatch in output `biplot_scores` is fixed ([#1519](https://github.com/biocore/scikit-bio/issues/1519)).
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* scikit-bio now depends on pandas >= 0.19.2, and is compatible with newer pandas versions (e.g. 0.20.3) that were previously incompatible.
* scikit-bio now depends on `numpy >= 1.9.2, < 1.14.0` for compatibility with Python 3.4, 3.5, and 3.6 and the available numpy conda packages in `defaults` and `conda-forge` channels.
* added support for running tests from `setup.py`. Both `python setup.py nosetests` and `python setup.py test` are now supported, however `python setup.py test` will only run a subset of the full test suite. ([#1341](https://github.com/biocore/scikit-bio/issues/1341))
## Version 0.5.1 (2016-11-12)
### Features
* Added `IntervalMetadata` and `Interval` classes in `skbio.metadata` to store, query, and manipulate information of a sub-region of a sequence. ([#1414](https://github.com/biocore/scikit-bio/issues/1414))
* `Sequence` and its child classes (including `GrammaredSequence`, `RNA`, `DNA`, `Protein`) now accept `IntervalMetadata` in their constructor API. Some of their relevant methods are also updated accordingly. ([#1430](https://github.com/biocore/scikit-bio/pull/1430))
* GenBank parser now reads and writes `Sequence` or its subclass objects with `IntervalMetadata`. ([#1440](https://github.com/biocore/scikit-bio/pull/1440))
* `DissimilarityMatrix` now has a new constructor method called `from_iterable`. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* `DissimilarityMatrix` now allows non-hollow matrices. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* `DistanceMatrix.from_iterable` now accepts a `validate=True` parameter. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* ``DistanceMatrix`` now has a new method called ``to_series`` to create a ``pandas.Series`` from a ``DistanceMatrix`` ([#1397](https://github.com/biocore/scikit-bio/issues/1397)).
* Added parallel beta diversity calculation support via `skbio.diversity.block_beta_diversity`. The issue and idea is discussed in ([#1181](https://github.com/biocore/scikit-bio/issues/1181), while the actual code changes are in [#1352](https://github.com/biocore/scikit-bio/pull/1352)).
### Backward-incompatible changes [stable]
* The constructor API for `Sequence` and its child classes (including `GrammaredSequence`, `RNA`, `DNA`, `Protein`) are changed from `(sequence, metadata=None, positional_metadata=None, lowercase=False)` to `(sequence, metadata=None, positional_metadata=None, interval_metadata=None, lowercase=False)`
The changes are made to allow these classes to adopt `IntervalMetadata` object for interval features on the sequence. The `interval_metadata` parameter is added imediately after `positional_metadata` instead of appended to the end, because it is more natural and logical and, more importantly, because it is unlikely in practice to break user code. A user's code would break only if they had supplied `metadata`, `postional_metadata`, and `lowercase` parameters positionally. In the unlikely event that this happens, users will get an error telling them a bool isn't a valid `IntervalMetadata` type, so it won't silently produce buggy behavior.
### Backward-incompatible changes [experimental]
* Modifying basis handling in `skbio.stats.composition.ilr_inv` prior to checking for orthogonality. Now the basis is strictly assumed to be in the Aitchison simplex.
* `DistanceMatrix.from_iterable` default behavior is now to validate matrix by computing all pairwise distances. Pass `validate=False` to get the previous behavior (no validation, but faster execution).([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* GenBank I/O now parses sequence features into the attribute of `interval_metadata` instead of `positiona_metadata`. And the key of `FEATURES` is removed from `metadata` attribute.
### Performance enhancements
* `TreeNode.shear` was rewritten for approximately a 25% performance increase. ([#1399](https://github.com/biocore/scikit-bio/pull/1399))
* The `IntervalMetadata` allows dramatic decrease in memory usage in reading GenBank files of feature rich sequences. ([#1159](https://github.com/biocore/scikit-bio/issues/1159))
### Bug fixes
* `skbio.tree.TreeNode.prune` and implicitly `skbio.tree.TreeNode.shear` were not handling a situation in which a parent was validly removed during pruning operations as may happen if the resulting subtree does not include the root. Previously, an `AttributeError` would raise as `parent` would be `None` in this situation.
* numpy linking was fixed for installation under El Capitan.
* A bug was introduced in #1398 into `TreeNode.prune` and fixed in #1416 in which, under the special case of a single descendent existing from the root, the resulting children parent references were not updated. The cause of the bug was a call made to `self.children.extend` as opposed to `self.extend` where the former is a `list.extend` without knowledge of the tree, while the latter is `TreeNode.extend` which is able to adjust references to `self.parent`.
### Miscellaneous
* Removed deprecated functions from `skbio.util`: `is_casava_v180_or_later`, `remove_files`, and `create_dir`.
* Removed deprecated `skbio.Sequence.copy` method.
## Version 0.5.0 (2016-06-14)
**IMPORTANT**: scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.4 and later.
### Features
* Added more descriptive error message to `skbio.io.registry` when attempting to read without specifying `into` and when there is no generator reader. ([#1326](https://github.com/biocore/scikit-bio/issues/1326))
* Added support for reference tags to `skbio.io.format.stockholm` reader and writer. ([#1348](https://github.com/biocore/scikit-bio/issues/1348))
* Expanded error message in `skbio.io.format.stockholm` reader when `constructor` is not passed, in order to provide better explanation to user. ([#1327](https://github.com/biocore/scikit-bio/issues/1327))
* Added `skbio.sequence.distance.kmer_distance` for computing the kmer distance between two sequences. ([#913](https://github.com/biocore/scikit-bio/issues/913))
* Added `skbio.sequence.Sequence.replace` for assigning a character to positions in a `Sequence`. ([#1222](https://github.com/biocore/scikit-bio/issues/1222))
* Added support for `pandas.RangeIndex`, lowering the memory footprint of default integer index objects. `Sequence.positional_metadata` and `TabularMSA.positional_metadata` now use `pd.RangeIndex` as the positional metadata index. `TabularMSA` now uses `pd.RangeIndex` as the default index. Usage of `pd.RangeIndex` over the previous `pd.Int64Index` [should be transparent](http://pandas.pydata.org/pandas-docs/version/0.18.0/whatsnew.html#range-index), so these changes should be non-breaking to users. scikit-bio now depends on pandas >= 0.18.0 ([#1308](https://github.com/biocore/scikit-bio/issues/1308))
* Added `reset_index=False` parameter to `TabularMSA.append` and `TabularMSA.extend` for resetting the MSA's index to the default index after appending/extending.
* Added support for partial pairwise calculations via `skbio.diversity.partial_beta_diversity`. ([#1221](https://github.com/biocore/scikit-bio/issues/1221), [#1337](https://github.com/biocore/scikit-bio/pull/1337)). This function is immediately deprecated as its return type will change in the future and should be used with caution in its present form (see the function's documentation for details).
* `TemporaryFile` and `NamedTemporaryFile` are now supported IO sources for `skbio.io` and related functionality. ([#1291](https://github.com/biocore/scikit-bio/issues/1291))
* Added `tree_node_class=TreeNode` parameter to `skbio.tree.majority_rule` to support returning consensus trees of type `TreeNode` (the default) or a type that has the same interface as `TreeNode` (e.g. `TreeNode` subclasses) ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
* `TreeNode.from_linkage_matrix` and `TreeNode.from_taxonomy` now support constructing `TreeNode` subclasses. `TreeNode.bifurcate` now supports `TreeNode` subclasses ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
* The `ignore_metadata` keyword has been added to `TabularMSA.iter_positions` to improve performance when metadata is not necessary.
* Pairwise aligners in `skbio.alignment` now propagate per-sequence `metadata` objects (this does not include `positional_metadata`).
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
* `TabularMSA.append` and `TabularMSA.extend` now require one of `minter`, `index`, or `reset_index` to be provided when incorporating new sequences into an MSA. Previous behavior was to auto-increment the index labels if `minter` and `index` weren't provided and the MSA had a default integer index, otherwise error. Use `reset_index=True` to obtain the previous behavior in a more explicit way.
* `skbio.stats.composition.ancom` now returns two `pd.DataFrame` objects, where it previously returned one. The first contains the ANCOM test results, as before, and the second contains percentile abundances of each feature in each group. The specific percentiles that are computed and returned is controlled by the new `percentiles` parameter to `skbio.stats.composition.ancom`. In the future, this second `pd.DataFrame` will not be returned by this function, but will be available through the [contingency table API](https://github.com/biocore/scikit-bio/issues/848). ([#1293](https://github.com/biocore/scikit-bio/issues/1293))
* `skbio.stats.composition.ancom` now performs multiple comparisons correction by default. The previous behavior of not performing multiple comparisons correction can be achieved by passing ``multiple_comparisons_correction=None``.
* The ``reject`` column in the first ``pd.DataFrame`` returned from `skbio.stats.composition.ancom` has been renamed ``Reject null hypothesis`` for clarity. ([#1375](https://github.com/biocore/scikit-bio/issues/1375))
### Bug fixes
* Fixed row and column names to `biplot_scores` in the `OrdinationResults` object from `skbio.stats.ordination`. This fix affect the `cca` and `rda` methods. ([#1322](https://github.com/biocore/scikit-bio/issues/1322))
* Fixed bug when using `skbio.io.format.stockholm` reader on file with multi-line tree with no id. Previously this raised an `AttributeError`, now it correctly handles this type of tree. ([#1334](https://github.com/biocore/scikit-bio/issues/1334))
* Fixed bug when reading Stockholm files with GF or GS features split over multiple lines. Previously, the feature text was simply concatenated because it was assumed to have trailing whitespace. There are examples of Stockholm files with and without trailing whitespace for multi-line features, so the `skbio.io.format.stockholm` reader now adds a single space when concatenating feature text without trailing whitespace to avoid joining words together. Multi-line trees stored as GF metadata are concatenated as they appear in the file; a space is not added when concatenating. ([#1328](https://github.com/biocore/scikit-bio/issues/1328))
* Fixed bug when using `Sequence.iter_kmers` on empty `Sequence` object. Previously this raised a `ValueError`, now it returns
an empty generator.
* Fixed minor bug where adding sequences to an empty `TabularMSA` with MSA-wide `positional_metadata` would result in a `TabularMSA` object in an inconsistent state. This could happen using `TabularMSA.append` or `TabularMSA.extend`. This bug only affects a `TabularMSA` object *without* sequences that has MSA-wide `positional_metadata` (for example, `TabularMSA([], positional_metadata={'column': []})`).
* `TreeNode.distance` now handles the situation in which `self` or `other` are ancestors. Previosly, a node further up the tree was used resulting in inflated distances. ([#807](https://github.com/biocore/scikit-bio/issues/807))
* `TreeNode.prune` can now handle a root with a single descendent. Previously, the root was ignored from possibly having a single descendent. ([#1247](https://github.com/biocore/scikit-bio/issues/1247))
* Providing the `format` keyword to `skbio.io.read` when creating a generator with an empty file will now return an empty generator instead of raising `StopIteration`. ([#1313](https://github.com/biocore/scikit-bio/issues/1313))
* `OrdinationResults` is now importable from `skbio` and `skbio.stats.ordination` and correctly linked from the documentation ([#1205](https://github.com/biocore/scikit-bio/issues/1205))
* Fixed performance bug in pairwise aligners resulting in 100x worse performance than in 0.2.4.
### Deprecated functionality [stable]
* Deprecated use of the term "non-degenerate", in favor of "definite". `GrammaredSequence.nondegenerate_chars`, `GrammaredSequence.nondegenerates`, and `GrammaredSequence.has_nondegenerates` have been renamed to `GrammaredSequence.definite_chars`, `GrammaredSequence.definites`, and `GrammaredSequence.has_definites`, respectively. The old names will be removed in scikit-bio 0.5.2. Relevant affected public classes include `GrammaredSequence`, `DNA`, `RNA`, and `Protein`.
### Deprecated functionality [experimental]
* Deprecated function `skbio.util.create_dir`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
functionality described [here](https://docs.python.org/2/library/os.html#os.makedirs). ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated function `skbio.util.remove_files`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
functionality described [here](https://docs.python.org/2/library/os.html#os.remove). ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated function `skbio.util.is_casava_v180_or_later`. This function will be removed in 0.5.1. Functionality moved to FASTQ sniffer.
([#833](https://github.com/biocore/scikit-bio/issues/833))
### Miscellaneous
* When installing scikit-bio via `pip`, numpy must now be installed first ([#1296](https://github.com/biocore/scikit-bio/issues/1296))
## Version 0.4.2 (2016-02-17)
Minor maintenance release. **This is the last Python 2.7 compatible release. Future scikit-bio releases will only support Python 3.**
### Features
* Added `skbio.tree.TreeNode.bifurcate` for converting multifurcating trees into bifurcating trees. ([#896](https://github.com/biocore/scikit-bio/issues/896))
* Added `skbio.io.format.stockholm` for reading Stockholm files into a `TabularMSA` and writing from a `TabularMSA`. ([#967](https://github.com/biocore/scikit-bio/issues/967))
* scikit-bio `Sequence` objects have better compatibility with numpy. For example, calling `np.asarray(sequence)` now converts the sequence to a numpy array of characters (the same as calling `sequence.values`).
* Added `skbio.sequence.distance` subpackage for computing distances between scikit-bio `Sequence` objects ([#913](https://github.com/biocore/scikit-bio/issues/913))
* Added ``skbio.sequence.GrammaredSequence``, which can be inherited from to create grammared sequences with custom alphabets (e.g., for use with TabularMSA) ([#1175](https://github.com/biocore/scikit-bio/issues/1175))
* Added ``skbio.util.classproperty`` decorator
### Backward-incompatible changes [stable]
* When sniffing or reading a file (`skbio.io.sniff`, `skbio.io.read`, or the object-oriented `.read()` interface), passing `newline` as a keyword argument to `skbio.io.open` now raises a `TypeError`. This backward-incompatible change to a stable API is necessary because it fixes a bug (more details in bug fix section below).
* When reading a FASTQ or QSEQ file and passing `variant='solexa'`, `ValueError` is now raised instead of `NotImplementedError`. This backward-incompatible change to a stable API is necessary to avoid creating a spin-locked process due to [a bug in Python](https://bugs.python.org/issue25786). See [#1256](https://github.com/biocore/scikit-bio/issues/1256) for details. This change is temporary and will be reverted to `NotImplementedError` when the bug is fixed in Python.
### Backward-incompatible changes [experimental]
* `skbio.io.format.genbank`: When reading GenBank files, the date field of the LOCUS line is no longer parsed into a `datetime.datetime` object and is left as a string. When writing GenBank files, the locus date metadata is expected to be a string instead of a `datetime.datetime` object ([#1153](https://github.com/biocore/scikit-bio/issues/1153))
* `Sequence.distance` now converts the input sequence (`other`) to its type before passing both sequences to `metric`. Previous behavior was to always convert to `Sequence`.
### Bug fixes
* Fixed bug when using `Sequence.distance` or `DistanceMatrix.from_iterable` to compute distances between `Sequence` objects with differing `metadata`/`positional_metadata` and passing `metric=scipy.spatial.distance.hamming` ([#1254](https://github.com/biocore/scikit-bio/issues/1254))
* Fixed performance bug when computing Hamming distances between `Sequence` objects in `DistanceMatrix.from_iterable` ([#1250](https://github.com/biocore/scikit-bio/issues/1250))
* Changed `skbio.stats.composition.multiplicative_replacement` to raise an error whenever a large value of `delta` is chosen ([#1241](https://github.com/biocore/scikit-bio/issues/1241))
* When sniffing or reading a file (`skbio.io.sniff`, `skbio.io.read`, or the object-oriented `.read()` interface), passing `newline` as a keyword argument to `skbio.io.open` now raises a `TypeError`. The file format's `newline` character will be used when opening the file. Previous behavior allowed overriding the format's `newline` character but this could cause issues with readers that assume newline characters are those defined by the file format (which is an entirely reasonable assumption). This bug is very unlikely to have surfaced in practice as the default `newline` behavior is *universal newlines mode*.
* DNA, RNA, and Protein are no longer inheritable because they assume an IUPAC alphabet.
* `DistanceMatrix` constructor provides more informative error message when data contains NaNs ([#1276](https://github.com/biocore/scikit-bio/issues/1276))
### Miscellaneous
* Warnings raised by scikit-bio now share a common subclass ``skbio.util.SkbioWarning``.
## Version 0.4.1 (2015-12-09)
### Features
* The ``TabularMSA`` object was added to represent and operate on tabular multiple sequence alignments. This satisfies [RFC 1](https://github.com/biocore/scikit-bio-rfcs/blob/master/active/001-tabular-msa.md). See the ``TabularMSA`` docs for full details.
* Added phylogenetic diversity metrics, including weighted UniFrac, unweighted UniFrac, and Faith's Phylogenetic Diversity. These are accessible as ``skbio.diversity.beta.unweighted_unifrac``, ``skbio.diversity.beta.weighted_unifrac``, and ``skbio.diversity.alpha.faith_pd``, respectively.
* Addition of the function ``skbio.diversity.alpha_diversity`` to support applying an alpha diversity metric to multiple samples in one call.
* Addition of the functions ``skbio.diversity.get_alpha_diversity_metrics`` and ``skbio.diversity.get_beta_diversity_metrics`` to support discovery of the alpha and beta diversity metrics implemented in scikit-bio.
* Added `skbio.stats.composition.ancom` function, a test for OTU differential abundance across sample categories. ([#1054](https://github.com/biocore/scikit-bio/issues/1054))
* Added `skbio.io.format.blast7` for reading BLAST+ output format 7 or BLAST output format 9 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
* Added `skbio.DissimilarityMatrix.to_data_frame` method for creating a ``pandas.DataFrame`` from a `DissimilarityMatrix` or `DistanceMatrix`. ([#757](https://github.com/biocore/scikit-bio/issues/757))
* Added support for one-dimensional vector of dissimilarities in `skbio.stats.distance.DissimilarityMatrix`
constructor. ([#6240](https://github.com/biocore/scikit-bio/issues/624))
* Added `skbio.io.format.blast6` for reading BLAST+ output format 6 or BLAST output format 8 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
* Added `inner`, `ilr`, `ilr_inv` and `clr_inv`, ``skbio.stats.composition``, which enables linear transformations on compositions ([#892](https://github.com/biocore/scikit-bio/issues/892)
* Added ``skbio.diversity.alpha.pielou_e`` function as an evenness metric of alpha diversity. ([#1068](https://github.com/biocore/scikit-bio/issues/1068))
* Added `to_regex` method to `skbio.sequence._iupac_sequence` ABC - it returns a regex object that matches all non-degenerate versions of the sequence.
* Added ``skbio.util.assert_ordination_results_equal`` function for comparing ``OrdinationResults`` objects in unit tests.
* Added ``skbio.io.format.genbank`` for reading and writing GenBank/GenPept for ``DNA``, ``RNA``, ``Protein`` and ``Sequence`` classes.
* Added ``skbio.util.RepresentationWarning`` for warning about substitutions, assumptions, or particular alterations that were made for the successful completion of a process.
* ``TreeNode.tip_tip_distances`` now supports nodes without an associated length. In this case, a length of 0.0 is assumed and an ``skbio.util.RepresentationWarning`` is raised. Previous behavior was to raise a ``NoLengthError``. ([#791](https://github.com/biocore/scikit-bio/issues/791))
* ``DistanceMatrix`` now has a new constructor method called `from_iterable`.
* ``Sequence`` now accepts ``lowercase`` keyword like ``DNA`` and others. Updated ``fasta``, ``fastq``, and ``qseq`` readers/writers for ``Sequence`` to reflect this.
* The ``lowercase`` method has been moved up to ``Sequence`` meaning all sequence objects now have a ``lowercase`` method.
* Added ``reverse_transcribe`` class method to ``RNA``.
* Added `Sequence.observed_chars` property for obtaining the set of observed characters in a sequence. ([#1075](https://github.com/biocore/scikit-bio/issues/1075))
* Added `Sequence.frequencies` method for computing character frequencies in a sequence. ([#1074](https://github.com/biocore/scikit-bio/issues/1074))
* Added experimental class-method ``Sequence.concat`` which will produce a new sequence from an iterable of existing sequences. Parameters control how positional metadata is propagated during a concatenation.
* ``TreeNode.to_array`` now supports replacing ``nan`` branch lengths in the resulting branch length vector with the value provided as ``nan_length_value``.
* ``skbio.io.format.phylip`` now supports sniffing and reading strict, sequential PHYLIP-formatted files into ``skbio.Alignment`` objects. ([#1006](https://github.com/biocore/scikit-bio/issues/1006))
* Added `default_gap_char` class property to ``DNA``, ``RNA``, and ``Protein`` for representing gap characters in a new sequence.
### Backward-incompatible changes [stable]
* `Sequence.kmer_frequencies` now returns a `dict`. Previous behavior was to return a `collections.Counter` if `relative=False` was passed, and a `collections.defaultdict` if `relative=True` was passed. In the case of a missing key, the `Counter` would return 0 and the `defaultdict` would return 0.0. Because the return type is now always a `dict`, attempting to access a missing key will raise a `KeyError`. This change *may* break backwards-compatibility depending on how the `Counter`/`defaultdict` is being used. We hope that in most cases this change will not break backwards-compatibility because both `Counter` and `defaultdict` are `dict` subclasses.
If the previous behavior is desired, convert the `dict` into a `Counter`/`defaultdict`:
```python
import collections
from skbio import Sequence
seq = Sequence('ACCGAGTTTAACCGAATA')
# Counter
freqs_dict = seq.kmer_frequencies(k=8)
freqs_counter = collections.Counter(freqs_dict)
# defaultdict
freqs_dict = seq.kmer_frequencies(k=8, relative=True)
freqs_default_dict = collections.defaultdict(float, freqs_dict)
```
**Rationale:** We believe it is safer to return `dict` instead of `Counter`/`defaultdict` as this may prevent error-prone usage of the return value. Previous behavior allowed accessing missing kmers, returning 0 or 0.0 depending on the `relative` parameter. This is convenient in many cases but also potentially misleading. For example, consider the following code:
```python
from skbio import Sequence
seq = Sequence('ACCGAGTTTAACCGAATA')
freqs = seq.kmer_frequencies(k=8)
freqs['ACCGA']
```
Previous behavior would return 0 because the kmer `'ACCGA'` is not present in the `Counter`. In one respect this is the correct answer because we asked for kmers of length 8; `'ACCGA'` is a different length so it is not included in the results. However, we believe it is safer to avoid this implicit behavior in case the user assumes there are no `'ACCGA'` kmers in the sequence (which there are!). A `KeyError` in this case is more explicit and forces the user to consider their query. Returning a `dict` will also be consistent with `Sequence.frequencies`.
### Backward-incompatible changes [experimental]
* Replaced ``PCoA``, ``CCA``, ``CA`` and ``RDA`` in ``skbio.stats.ordination`` with equivalent functions ``pcoa``, ``cca``, ``ca`` and ``rda``. These functions now take ``pd.DataFrame`` objects.
* Change ``OrdinationResults`` to have its attributes based on ``pd.DataFrame`` and ``pd.Series`` objects, instead of pairs of identifiers and values. The changes are as follows:
- ``species`` and ``species_ids`` have been replaced by a ``pd.DataFrame`` named ``features``.
- ``site`` and ``site_ids`` have been replaced by a ``pd.DataFrame`` named ``samples``.
- ``eigvals`` is now a ``pd.Series`` object.
- ``proportion_explained`` is now a ``pd.Series`` object.
- ``biplot`` is now a ``pd.DataFrame`` object named ``biplot_scores``.
- ``site_constraints`` is now a ``pd.DataFrame`` object named ``sample_constraints``.
* ``short_method_name`` and ``long_method_name`` are now required arguments of the ``OrdinationResults`` object.
* Removed `skbio.diversity.alpha.equitability`. Please use `skbio.diversity.alpha.pielou_e`, which is more accurately named and better documented. Note that `equitability` by default used logarithm base 2 while `pielou_e` uses logarithm base `e` as described in Heip 1974.
* ``skbio.diversity.beta.pw_distances`` is now called ``skbio.diversity.beta_diversity``. This function no longer defines a default metric, and ``metric`` is now the first argument to this function. This function can also now take a pairwise distances function as ``pairwise_func``.
* Deprecated function ``skbio.diversity.beta.pw_distances_from_table`` has been removed from scikit-bio as scheduled. Code that used this should be adapted to use ``skbio.diversity.beta_diversity``.
* ``TreeNode.index_tree`` now returns a 2-D numpy array as its second return value (the child node index) instead of a 1-D numpy array.
* Deprecated functions `skbio.draw.boxplots` and `skbio.draw.grouped_distributions` have been removed from scikit-bio as scheduled. These functions generated plots that were not specific to bioinformatics. These types of plots can be generated with seaborn or another general-purpose plotting package.
* Deprecated function `skbio.stats.power.bootstrap_power_curve` has been removed from scikit-bio as scheduled. Use `skbio.stats.power.subsample_power` or `skbio.stats.power.subsample_paired_power` followed by `skbio.stats.power.confidence_bound`.
* Deprecated function `skbio.stats.spatial.procrustes` has been removed from scikit-bio as scheduled in favor of `scipy.spatial.procrustes`.
* Deprecated class `skbio.tree.CompressedTrie` and function `skbio.tree.fasta_to_pairlist` have been removed from scikit-bio as scheduled in favor of existing general-purpose Python trie packages.
* Deprecated function `skbio.util.flatten` has been removed from scikit-bio as scheduled in favor of solutions available in the Python standard library (see [here](http://stackoverflow.com/a/952952/3639023) and [here](http://stackoverflow.com/a/406199/3639023) for examples).
* Pairwise alignment functions in `skbio.alignment` now return a tuple containing the `TabularMSA` alignment, alignment score, and start/end positions. The returned `TabularMSA`'s `index` is always the default integer index; sequence IDs are no longer propagated to the MSA. Additionally, the pairwise alignment functions now accept the following input types to align:
- `local_pairwise_align_nucleotide`: `DNA` or `RNA`
- `local_pairwise_align_protein`: `Protein`
- `local_pairwise_align`: `IUPACSequence`
- `global_pairwise_align_nucleotide`: `DNA`, `RNA`, or `TabularMSA[DNA|RNA]`
- `global_pairwise_align_protein`: `Protein` or `TabularMSA[Protein]`
- `global_pairwise_align`: `IUPACSequence` or `TabularMSA`
- `local_pairwise_align_ssw`: `DNA`, `RNA`, or `Protein`. Additionally, this function now overrides the `protein` kwarg based on input type. `constructor` parameter was removed because the function now determines the return type based on input type.
* Removed `skbio.alignment.SequenceCollection` in favor of using a list or other standard library containers to store scikit-bio sequence objects (most `SequenceCollection` operations were simple list comprehensions). Use `DistanceMatrix.from_iterable` instead of `SequenceCollection.distances` (pass `key="id"` to exactly match original behavior).
* Removed `skbio.alignment.Alignment` in favor of `skbio.alignment.TabularMSA`.
* Removed `skbio.alignment.SequenceCollectionError` and `skbio.alignment.AlignmentError` exceptions as their corresponding classes no longer exist.
### Bug Fixes
* ``Sequence`` objects now handle slicing of empty positional metadata correctly. Any metadata that is empty will no longer be propagated by the internal ``_to`` constructor. ([#1133](https://github.com/biocore/scikit-bio/issues/1133))
* ``DissimilarityMatrix.plot()`` no longer leaves a white border around the
heatmap it plots (PR #1070).
* TreeNode.root_at_midpoint`` no longer fails when a node with two equal length child branches exists in the tree. ([#1077](https://github.com/biocore/scikit-bio/issues/1077))
* ``TreeNode._set_max_distance``, as called through ``TreeNode.get_max_distance`` or ``TreeNode.root_at_midpoint`` would store distance information as ``list``s in the attribute ``MaxDistTips`` on each node in the tree, however, these distances were only valid for the node in which the call to ``_set_max_distance`` was made. The values contained in ``MaxDistTips`` are now correct across the tree following a call to ``get_max_distance``. The scope of impact of this bug is limited to users that were interacting directly with ``MaxDistTips`` on descendant nodes; this bug does not impact any known method within scikit-bio. ([#1223](https://github.com/biocore/scikit-bio/issues/1223))
* Added missing `nose` dependency to setup.py's `install_requires`. ([#1214](https://github.com/biocore/scikit-bio/issues/1214))
* Fixed issue that resulted in legends of ``OrdinationResult`` plots sometimes being truncated. ([#1210](https://github.com/biocore/scikit-bio/issues/1210))
### Deprecated functionality [stable]
* `skbio.Sequence.copy` has been deprecated in favor of `copy.copy(seq)` and `copy.deepcopy(seq)`.
### Miscellaneous
* Doctests are now written in Python 3.
* ``make test`` now validates MANIFEST.in using [check-manifest](https://github.com/mgedmin/check-manifest). ([#461](https://github.com/biocore/scikit-bio/issues/461))
* Many new alpha diversity equations added to ``skbio.diversity.alpha`` documentation. ([#321](https://github.com/biocore/scikit-bio/issues/321))
* Order of ``lowercase`` and ``validate`` keywords swapped in ``DNA``, ``RNA``, and ``Protein``.
## Version 0.4.0 (2015-07-08)
Initial beta release. In addition to the changes detailed below, the following
subpackages have been mostly or entirely rewritten and most of their APIs are
substantially different (and improved!):
* `skbio.sequence`
* `skbio.io`
The APIs of these subpackages are now stable, and all others are experimental. See the [API stability docs](https://github.com/biocore/scikit-bio/tree/0.4.0/doc/source/user/api_stability.rst) for more details, including what we mean by *stable* and *experimental* in this context. We recognize that this is a lot of backward-incompatible changes. To avoid these types of changes being a surprise to our users, our public APIs are now decorated to make it clear to developers when an API can be relied upon (stable) and when it may be subject to change (experimental).
### Features
* Added `skbio.stats.composition` for analyzing data made up of proportions
* Added new ``skbio.stats.evolve`` subpackage for evolutionary statistics. Currently contains a single function, ``hommola_cospeciation``, which implements a permutation-based test of correlation between two distance matrices.
* Added support for ``skbio.io.util.open_file`` and ``skbio.io.util.open_files`` to pull files from HTTP and HTTPS URLs. This behavior propagates to the I/O registry.
* FASTA/QUAL (``skbio.io.format.fasta``) and FASTQ (``skbio.io.format.fastq``) readers now allow blank or whitespace-only lines at the beginning of the file, between records, or at the end of the file. A blank or whitespace-only line in any other location will continue to raise an error [#781](https://github.com/biocore/scikit-bio/issues/781).
* scikit-bio now ignores leading and trailing whitespace characters on each line while reading FASTA/QUAL and FASTQ files.
* Added `ratio` parameter to `skbio.stats.power.subsample_power`. This allows the user to calculate power on groups for uneven size (For example, draw twice as many samples from Group B than Group A). If `ratio` is not set, group sizes will remain equal across all groups.
* Power calculations (`skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`) can use test functions that return multiple p values, like some multivariate linear regression models. Previously, the power calculations required the test to return a single p value.
* Added ``skbio.util.assert_data_frame_almost_equal`` function for comparing ``pd.DataFrame`` objects in unit tests.
### Performance enhancements
* The speed of quality score decoding has been significantly improved (~2x) when reading `fastq` files.
* The speed of `NucleotideSequence.reverse_complement` has been improved (~6x).
### Bug fixes
* Changed `Sequence.distance` to raise an error any time two sequences are passed of different lengths regardless of the `distance_fn` being passed. [(#514)](https://github.com/biocore/scikit-bio/issues/514)
* Fixed issue with ``TreeNode.extend`` where if given the children of another ``TreeNode`` object (``tree.children``), both trees would be left in an incorrect and unpredictable state. ([#889](https://github.com/biocore/scikit-bio/issues/889))
* Changed the way power was calculated in `subsample_paired_power` to move the subsample selection before the test is performed. This increases the number of Monte Carlo simulations performed during power estimation, and improves the accuracy of the returned estimate. Previous power estimates from `subsample_paired_power` should be disregarded and re-calculated. ([#910](https://github.com/biocore/scikit-bio/issues/910))
* Fixed issue where `randdm` was attempting to create asymmetric distance matrices.This was causing an error to be raised by the `DistanceMatrix` constructor inside of the `randdm` function, so that `randdm` would fail when attempting to create large distance matrices. ([#943](https://github.com/biocore/scikit-bio/issues/943))
### Deprecated functionality
* Deprecated `skbio.util.flatten`. This function will be removed in scikit-bio 0.3.1. Please use standard python library functionality
described here [Making a flat list out of lists of lists](http://stackoverflow.com/a/952952/3639023), [Flattening a shallow list](http://stackoverflow.com/a/406199/3639023) ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated `skbio.stats.power.bootstrap_power_curve` will be removed in scikit-bio 0.4.1. It is deprecated in favor of using ``subsample_power`` or ``sample_paired_power`` to calculate a power matrix, and then the use of ``confidence_bounds`` to calculate the average and confidence intervals.
### Backward-incompatible changes
* Removed the following deprecated functionality:
- `skbio.parse` subpackage, including `SequenceIterator`, `FastaIterator`, `FastqIterator`, `load`, `parse_fasta`, `parse_fastq`, `parse_qual`, `write_clustal`, `parse_clustal`, and `FastqParseError`; please use `skbio.io` instead.
- `skbio.format` subpackage, including `fasta_from_sequence`, `fasta_from_alignment`, and `format_fastq_record`; please use `skbio.io` instead.
- `skbio.alignment.SequenceCollection.int_map`; please use `SequenceCollection.update_ids` instead.
- `skbio.alignment.SequenceCollection` methods `to_fasta` and `toFasta`; please use `SequenceCollection.write` instead.
- `constructor` parameter in `skbio.alignment.Alignment.majority_consensus`; please convert returned biological sequence object manually as desired (e.g., `str(seq)`).
- `skbio.alignment.Alignment.to_phylip`; please use `Alignment.write` instead.
- `skbio.sequence.BiologicalSequence.to_fasta`; please use `BiologicalSequence.write` instead.
- `skbio.tree.TreeNode` methods `from_newick`, `from_file`, and `to_newick`; please use `TreeNode.read` and `TreeNode.write` instead.
- `skbio.stats.distance.DissimilarityMatrix` methods `from_file` and `to_file`; please use `DissimilarityMatrix.read` and `DissimilarityMatrix.write` instead.
- `skbio.stats.ordination.OrdinationResults` methods `from_file` and `to_file`; please use `OrdinationResults.read` and `OrdinationResults.write` instead.
- `skbio.stats.p_value_to_str`; there is no replacement.
- `skbio.stats.subsample`; please use `skbio.stats.subsample_counts` instead.
- `skbio.stats.distance.ANOSIM`; please use `skbio.stats.distance.anosim` instead.
- `skbio.stats.distance.PERMANOVA`; please use `skbio.stats.distance.permanova` instead.
- `skbio.stats.distance.CategoricalStatsResults`; there is no replacement, please use `skbio.stats.distance.anosim` or `skbio.stats.distance.permanova`, which will return a `pandas.Series` object.
* `skbio.alignment.Alignment.majority_consensus` now returns `BiologicalSequence('')` if the alignment is empty. Previously, `''` was returned.
* `min_observations` was removed from `skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`. The minimum number of samples for subsampling depends on the data set and statistical tests. Having a default parameter to set unnecessary limitations on the technique.
### Miscellaneous
* Changed testing procedures
- Developers should now use `make test`
- Users can use `python -m skbio.test`
- Added `skbio.util._testing.TestRunner` (available through `skbio.util.TestRunner`). Used to provide a `test` method for each module init file. This class represents a unified testing path which wraps all `skbio` testing functionality.
- Autodetect Python version and disable doctests for Python 3.
* `numpy` is no longer required to be installed before installing scikit-bio!
* Upgraded checklist.py to check source files non-conforming to [new header style](http://scikit-bio.org/docs/latest/development/new_module.html). ([#855](https://github.com/biocore/scikit-bio/issues/855))
* Updated to use `natsort` >= 4.0.0.
* The method of subsampling was changed for ``skbio.stats.power.subsample_paired_power``. Rather than drawing a paired sample for the run and then subsampling for each count, the subsample is now drawn for each sample and each run. In test data, this did not significantly alter the power results.
* checklist.py now enforces `__future__` imports in .py files.
## Version 0.2.3 (2015-02-13)
### Features
* Modified ``skbio.stats.distance.pwmantel`` to accept a list of filepaths. This is useful as it allows for a smaller amount of memory consumption as it only loads two matrices at a time as opposed to requiring that all distance matrices are loaded into memory.
* Added ``skbio.util.find_duplicates`` for finding duplicate elements in an iterable.
### Bug fixes
* Fixed floating point precision bugs in ``Alignment.position_frequencies``, ``Alignment.position_entropies``, ``Alignment.omit_gap_positions``, ``Alignment.omit_gap_sequences``, ``BiologicalSequence.k_word_frequencies``, and ``SequenceCollection.k_word_frequencies`` ([#801](https://github.com/biocore/scikit-bio/issues/801)).
### Backward-incompatible changes
* Removed ``feature_types`` attribute from ``BiologicalSequence`` and all subclasses ([#797](https://github.com/biocore/scikit-bio/pull/797)).
* Removed ``find_features`` method from ``BiologicalSequence`` and ``ProteinSequence`` ([#797](https://github.com/biocore/scikit-bio/pull/797)).
* ``BiologicalSequence.k_word_frequencies`` now returns a ``collections.defaultdict`` of type ``float`` instead of type ``int``. This only affects the "default" case, when a key isn't present in the dictionary. Previous behavior would return ``0`` as an ``int``, while the new behavior is to return ``0.0`` as a ``float``. This change also affects the ``defaultdict``s that are returned by ``SequenceCollection.k_word_frequencies``.
### Miscellaneous
* ``DissimilarityMatrix`` and ``DistanceMatrix`` now report duplicate IDs in the ``DissimilarityMatrixError`` message that can be raised during validation.
## Version 0.2.2 (2014-12-04)
### Features
* Added ``plot`` method to ``skbio.stats.distance.DissimilarityMatrix`` for creating basic heatmaps of a dissimilarity/distance matrix (see [#684](https://github.com/biocore/scikit-bio/issues/684)). Also added ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
* Added `__str__` method to `skbio.stats.ordination.OrdinationResults`.
* Added ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova`` functions, which replace the ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` classes. These new functions provide simpler procedural interfaces to running these statistical methods. They also provide more convenient access to results by returning a ``pandas.Series`` instead of a ``CategoricalStatsResults`` object. These functions have more extensive documentation than their previous versions. If significance tests are suppressed, p-values are returned as ``np.nan`` instead of ``None`` for consistency with other statistical methods in scikit-bio. [#754](https://github.com/biocore/scikit-bio/issues/754)
* Added `skbio.stats.power` for performing empirical power analysis. The module uses existing datasets and iteratively draws samples to estimate the number of samples needed to see a significant difference for a given critical value.
* Added `skbio.stats.isubsample` for subsampling from an unknown number of values. This method supports subsampling from multiple partitions and does not require that all items be stored in memory, requiring approximately `O(N*M)`` space where `N` is the number of partitions and `M` is the maximum subsample size.
* Added ``skbio.stats.subsample_counts``, which replaces ``skbio.stats.subsample``. See deprecation section below for more details ([#770](https://github.com/biocore/scikit-bio/issues/770)).
### Bug fixes
* Fixed issue where SSW wouldn't compile on i686 architectures ([#409](https://github.com/biocore/scikit-bio/issues/409)).
### Deprecated functionality
* Deprecated ``skbio.stats.p_value_to_str``. This function will be removed in scikit-bio 0.3.0. Permutation-based p-values in scikit-bio are calculated as ``(num_extreme + 1) / (num_permutations + 1)``, so it is impossible to obtain a p-value of zero. This function historically existed for correcting the number of digits displayed when obtaining a p-value of zero. Since this is no longer possible, this functionality will be removed.
* Deprecated ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` in favor of ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova``, respectively.
* Deprecated ``skbio.stats.distance.CategoricalStatsResults`` in favor of using ``pandas.Series`` to store statistical method results. ``anosim`` and ``permanova`` return ``pandas.Series`` instead of ``CategoricalStatsResults``.
* Deprecated ``skbio.stats.subsample`` in favor of ``skbio.stats.subsample_counts``, which provides an identical interface; only the function name has changed. ``skbio.stats.subsample`` will be removed in scikit-bio 0.3.0.
### Backward-incompatible changes
* Deprecation warnings are now raised using ``DeprecationWarning`` instead of ``UserWarning`` ([#774](https://github.com/biocore/scikit-bio/issues/774)).
### Miscellaneous
* The ``pandas.DataFrame`` returned by ``skbio.stats.distance.pwmantel`` now stores p-values as floats and does not convert them to strings with a specific number of digits. p-values that were previously stored as "N/A" are now stored as ``np.nan`` for consistency with other statistical methods in scikit-bio. See note in "Deprecated functionality" above regarding ``p_value_to_str`` for details.
* scikit-bio now supports versions of IPython < 2.0.0 ([#767](https://github.com/biocore/scikit-bio/issues/767)).
## Version 0.2.1 (2014-10-27)
This is an alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Unified I/O with the scikit-bio I/O registry was the focus of this release.
### Features
* Added ``strict`` and ``lookup`` optional parameters to ``skbio.stats.distance.mantel`` for handling reordering and matching of IDs when provided ``DistanceMatrix`` instances as input (these parameters were previously only available in ``skbio.stats.distance.pwmantel``).
* ``skbio.stats.distance.pwmantel`` now accepts an iterable of ``array_like`` objects. Previously, only ``DistanceMatrix`` instances were allowed.
* Added ``plot`` method to ``skbio.stats.ordination.OrdinationResults`` for creating basic 3-D matplotlib scatterplots of ordination results, optionally colored by metadata in a ``pandas.DataFrame`` (see [#518](https://github.com/biocore/scikit-bio/issues/518)). Also added ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
* Added ``skbio.stats.ordination.assert_ordination_results_equal`` for comparing ``OrdinationResults`` objects for equality in unit tests.
* ``BiologicalSequence`` (and its subclasses) now optionally store Phred quality scores. A biological sequence's quality scores are stored as a 1-D ``numpy.ndarray`` of nonnegative integers that is the same length as the biological sequence. Quality scores can be provided upon object instantiation via the keyword argument ``quality``, and can be retrieved via the ``BiologicalSequence.quality`` property. ``BiologicalSequence.has_quality`` is also provided for determining whether a biological sequence has quality scores or not. See [#616](https://github.com/biocore/scikit-bio/issues/616) for more details.
* Added ``BiologicalSequence.sequence`` property for retrieving the underlying string representing the sequence characters. This was previously (and still is) accessible via ``BiologicalSequence.__str__``. It is provided via a property for convenience and explicitness.
* Added ``BiologicalSequence.equals`` for full control over equality testing of biological sequences. By default, biological sequences must have the same type, underlying sequence of characters, identifier, description, and quality scores to compare equal. These properties can be ignored via the keyword argument ``ignore``. The behavior of ``BiologicalSequence.__eq__``/``__ne__`` remains unchanged (only type and underlying sequence of characters are compared).
* Added ``BiologicalSequence.copy`` for creating a copy of a biological sequence, optionally with one or more attributes updated.
* ``BiologicalSequence.__getitem__`` now supports specifying a sequence of indices to take from the biological sequence.
* Methods to read and write taxonomies are now available under ``skbio.tree.TreeNode.from_taxonomy`` and ``skbio.tree.TreeNode.to_taxonomy`` respectively.
* Added ``SequenceCollection.update_ids``, which provides a flexible way of updating sequence IDs on a ``SequenceCollection`` or ``Alignment`` (note that a new object is returned, since instances of these classes are immutable). Deprecated ``SequenceCollection.int_map`` in favor of this new method; it will be removed in scikit-bio 0.3.0.
* Added ``skbio.util.cardinal_to_ordinal`` for converting a cardinal number to ordinal string (e.g., useful for error messages).
* New I/O Registry: supports multiple file formats, automatic file format detection when reading, unified procedural ``skbio.io.read`` and ``skbio.io.write`` in addition to OOP interfaces (``read/write`` methods) on the below objects. See ``skbio.io`` for more details.
- Added "clustal" format support:
* Has sniffer
* Readers: ``Alignment``
* Writers: ``Alignment``
- Added "lsmat" format support:
* Has sniffer
* Readers: ``DissimilarityMatrix``, ``DistanceMatrix``
* Writers: ``DissimilarityMatrix``, ``DistanceMatrix``
- Added "ordination" format support:
* Has sniffer
* Readers: ``OrdinationResults``
* Writers: ``OrdinationResults``
- Added "newick" format support:
* Has sniffer
* Readers: ``TreeNode``
* Writers: ``TreeNode``
- Added "phylip" format support:
* No sniffer
* Readers: None
* Writers: ``Alignment``
- Added "qseq" format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: None
- Added "fasta"/QUAL format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: same as readers
- Added "fastq" format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: same as readers
### Bug fixes
* Removed ``constructor`` parameter from ``Alignment.k_word_frequencies``, ``BiologicalSequence.k_words``, ``BiologicalSequence.k_word_counts``, and ``BiologicalSequence.k_word_frequencies`` as it had no effect (it was never hooked up in the underlying code). ``BiologicalSequence.k_words`` now returns a generator of ``BiologicalSequence`` objects instead of strings.
* Modified the ``Alignment`` constructor to verify that all sequences have the same length, if not, raise an ``AlignmentError`` exception. Updated the method ``Alignment.subalignment`` to calculate the indices only once now that identical sequence length is guaranteed.
### Deprecated functionality
* Deprecated ``constructor`` parameter in ``Alignment.majority_consensus`` in favor of having users call ``str`` on the returned ``BiologicalSequence``. This parameter will be removed in scikit-bio 0.3.0.
* Existing I/O functionality deprecated in favor of I/O registry, old functionality will be removed in scikit-bio 0.3.0. All functionality can be found at ``skbio.io.read``, ``skbio.io.write``, and the methods listed below:
* Deprecated the following "clustal" readers/writers:
- ``write_clustal`` -> ``Alignment.write``
- ``parse_clustal`` -> ``Alignment.read``
* Deprecated the following distance matrix format ("lsmat") readers/writers:
- ``DissimilarityMatrix.from_file`` -> ``DissimilarityMatrix.read``
- ``DissimilarityMatrix.to_file`` -> ``DissimilarityMatrix.write``
- ``DistanceMatrix.from_file`` -> ``DistanceMatrix.read``
- ``DistanceMatrix.to_file`` -> ``DistanceMatrix.write``
* Deprecated the following ordination format ("ordination") readers/writers:
- ``OrdinationResults.from_file`` -> ``OrdinationResults.read``
- ``OrdinationResults.to_file`` -> ``OrdinationResults.write``
* Deprecated the following "newick" readers/writers:
- ``TreeNode.from_file`` -> ``TreeNode.read``
- ``TreeNode.from_newick`` -> ``TreeNode.read``
- ``TreeNode.to_newick`` -> ``TreeNode.write``
* Deprecated the following "phylip" writers:
- ``Alignment.to_phylip`` -> ``Alignment.write``
* Deprecated the following "fasta"/QUAL readers/writers:
- ``SequenceCollection.from_fasta_records`` -> ``SequenceCollection.read``
- ``SequenceCollection.to_fasta`` -> ``SequenceCollection.write``
- ``fasta_from_sequences`` -> ``skbio.io.write(obj, into=<file>, format='fasta')``
- ``fasta_from_alignment`` -> ``Alignment.write``
- ``parse_fasta`` -> ``skbio.io.read(<fasta>, format='fasta')``
- ``parse_qual`` -> ``skbio.io.read(<fasta>, format='fasta', qual=<file>)``
- ``BiologicalSequence.to_fasta`` -> ``BiologicalSequence.write``
* Deprecated the following "fastq" readers/writers:
- ``parse_fastq`` -> ``skbio.io.read(<fastq>, format='fastq')``
- ``format_fastq_record`` -> ``skbio.io.write(<fastq>, format='fastq')``
### Backward-incompatible changes
* ``skbio.stats.distance.mantel`` now returns a 3-element tuple containing correlation coefficient, p-value, and the number of matching rows/cols in the distance matrices (``n``). The return value was previously a 2-element tuple containing only the correlation coefficient and p-value.
* ``skbio.stats.distance.mantel`` reorders input ``DistanceMatrix`` instances based on matching IDs (see optional parameters ``strict`` and ``lookup`` for controlling this behavior). In the past, ``DistanceMatrix`` instances were treated the same as ``array_like`` input and no reordering took place, regardless of ID (mis)matches. ``array_like`` input behavior remains the same.
* If mismatched types are provided to ``skbio.stats.distance.mantel`` (e.g., a ``DistanceMatrix`` and ``array_like``), a ``TypeError`` will be raised.
### Miscellaneous
* Added git timestamp checking to checklist.py, ensuring that when changes are made to Cython (.pyx) files, their corresponding generated C files are also updated.
* Fixed performance bug when instantiating ``BiologicalSequence`` objects. The previous runtime scaled linearly with sequence length; it is now constant time when the sequence is already a string. See [#623](https://github.com/biocore/scikit-bio/issues/623) for details.
* IPython and six are now required dependencies.
## Version 0.2.0 (2014-08-07)
This is an initial alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Many backwards-incompatible API changes were made since the previous release.
### Features
* Added ability to compute distances between sequences in a ``SequenceCollection`` object ([#509](https://github.com/biocore/scikit-bio/issues/509)), and expanded ``Alignment.distance`` to allow the user to pass a function for computing distances (the default distance metric is still ``scipy.spatial.distance.hamming``) ([#194](https://github.com/biocore/scikit-bio/issues/194)).
* Added functionality to not penalize terminal gaps in global alignment. This functionality results in more biologically relevant global alignments (see [#537](https://github.com/biocore/scikit-bio/issues/537) for discussion of the issue) and is now the default behavior for global alignment.
* The python global aligners (``global_pairwise_align``, ``global_pairwise_align_nucleotide``, and ``global_pairwise_align_protein``) now support aligning pairs of sequences, pairs of alignments, and a sequence and an alignment (see [#550](https://github.com/biocore/scikit-bio/issues/550)). This functionality supports progressive multiple sequence alignment, among other things such as adding a sequence to an existing alignment.
* Added ``StockholmAlignment.to_file`` for writing Stockholm-formatted files.
* Added ``strict=True`` optional parameter to ``DissimilarityMatrix.filter``.
* Added ``TreeNode.find_all`` for finding all tree nodes that match a given name.
### Bug fixes
* Fixed bug that resulted in a ``ValueError`` from ``local_align_pairwise_nucleotide`` (see [#504](https://github.com/biocore/scikit-bio/issues/504)) under many circumstances. This would not generate incorrect results, but would cause the code to fail.
### Backward-incompatible changes
* Removed ``skbio.math``, leaving ``stats`` and ``diversity`` to become top level packages. For example, instead of ``from skbio.math.stats.ordination import PCoA`` you would now import ``from skbio.stats.ordination import PCoA``.
* The module ``skbio.math.gradient`` as well as the contents of ``skbio.math.subsample`` and ``skbio.math.stats.misc`` are now found in ``skbio.stats``. As an example, to import subsample: ``from skbio.stats import subsample``; to import everything from gradient: ``from skbio.stats.gradient import *``.
* The contents of ``skbio.math.stats.ordination.utils`` are now in ``skbio.stats.ordination``.
* Removed ``skbio.app`` subpackage (i.e., the *application controller framework*) as this code has been ported to the standalone [burrito](https://github.com/biocore/burrito) Python package. This code was not specific to bioinformatics and is useful for wrapping command-line applications in general.
* Removed ``skbio.core``, leaving ``alignment``, ``genetic_code``, ``sequence``, ``tree``, and ``workflow`` to become top level packages. For example, instead of ``from skbio.core.sequence import DNA`` you would now import ``from skbio.sequence import DNA``.
* Removed ``skbio.util.exception`` and ``skbio.util.warning`` (see [#577](https://github.com/biocore/scikit-bio/issues/577) for the reasoning behind this change). The exceptions/warnings were moved to the following locations:
- ``FileFormatError``, ``RecordError``, ``FieldError``, and ``EfficiencyWarning`` have been moved to ``skbio.util``
- ``BiologicalSequenceError`` has been moved to ``skbio.sequence``
- ``SequenceCollectionError`` and ``StockholmParseError`` have been moved to ``skbio.alignment``
- ``DissimilarityMatrixError``, ``DistanceMatrixError``, ``DissimilarityMatrixFormatError``, and ``MissingIDError`` have been moved to ``skbio.stats.distance``
- ``TreeError``, ``NoLengthError``, ``DuplicateNodeError``, ``MissingNodeError``, and ``NoParentError`` have been moved to ``skbio.tree``
- ``FastqParseError`` has been moved to ``skbio.parse.sequences``
- ``GeneticCodeError``, ``GeneticCodeInitError``, and ``InvalidCodonError`` have been moved to ``skbio.genetic_code``
* The contents of ``skbio.genetic_code`` formerly ``skbio.core.genetic_code`` are now in ``skbio.sequence``. The ``GeneticCodes`` dictionary is now a function ``genetic_code``. The functionality is the same, except that because this is now a function rather than a dict, retrieving a genetic code is done using a function call rather than a lookup (so, for example, ``GeneticCodes[2]`` becomes ``genetic_code(2)``.
* Many submodules have been made private with the intention of simplifying imports for users. See [#562](https://github.com/biocore/scikit-bio/issues/562) for discussion of this change. The following list contains the previous module name and where imports from that module should now come from.
- ``skbio.alignment.ssw`` to ``skbio.alignment``
- ``skbio.alignment.alignment`` to ``skbio.alignment``
- ``skbio.alignment.pairwise`` to ``skbio.alignment``
- ``skbio.diversity.alpha.base`` to ``skbio.diversity.alpha``
- ``skbio.diversity.alpha.gini`` to ``skbio.diversity.alpha``
- ``skbio.diversity.alpha.lladser`` to ``skbio.diversity.alpha``
- ``skbio.diversity.beta.base`` to ``skbio.diversity.beta``
- ``skbio.draw.distributions`` to ``skbio.draw``
- ``skbio.stats.distance.anosim`` to ``skbio.stats.distance``
- ``skbio.stats.distance.base`` to ``skbio.stats.distance``
- ``skbio.stats.distance.permanova`` to ``skbio.stats.distance``
- ``skbio.distance`` to ``skbio.stats.distance``
- ``skbio.stats.ordination.base`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.canonical_correspondence_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.correspondence_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.principal_coordinate_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.redundancy_analysis`` to ``skbio.stats.ordination``
- ``skbio.tree.tree`` to ``skbio.tree``
- ``skbio.tree.trie`` to ``skbio.tree``
- ``skbio.util.misc`` to ``skbio.util``
- ``skbio.util.testing`` to ``skbio.util``
- ``skbio.util.exception`` to ``skbio.util``
- ``skbio.util.warning`` to ``skbio.util``
* Moved ``skbio.distance`` contents into ``skbio.stats.distance``.
### Miscellaneous
* Relaxed requirement in ``BiologicalSequence.distance`` that sequences being compared are of equal length. This is relevant for Hamming distance, so the check is still performed in that case, but other distance metrics may not have that requirement. See [#504](https://github.com/biocore/scikit-bio/issues/507)).
* Renamed ``powertrip.py`` repo-checking script to ``checklist.py`` for clarity.
* ``checklist.py`` now ensures that all unit tests import from a minimally deep API. For example, it will produce an error if ``skbio.core.distance.DistanceMatrix`` is used over ``skbio.DistanceMatrix``.
* Extra dimension is no longer calculated in ``skbio.stats.spatial.procrustes``.
* Expanded documentation in various subpackages.
* Added new scikit-bio logo. Thanks [Alina Prassas](http://cargocollective.com/alinaprassas)!
## Version 0.1.4 (2014-06-25)
This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
### Features
* Added Python implementations of Smith-Waterman and Needleman-Wunsch alignment as ``skbio.core.alignment.pairwise.local_pairwise_align`` and ``skbio.core.alignment.pairwise.global_pairwise_align``. These are much slower than native C implementations (e.g., ``skbio.core.alignment.local_pairwise_align_ssw``) and as a result raise an ``EfficencyWarning`` when called, but are included as they serve as useful educational examples as they’re simple to experiment with.
* Added ``skbio.core.diversity.beta.pw_distances`` and ``skbio.core.diversity.beta.pw_distances_from_table``. These provide convenient access to the ``scipy.spatial.distance.pdist`` *beta diversity* metrics from within scikit-bio. The ``skbio.core.diversity.beta.pw_distances_from_table`` function will only be available temporarily, until the ``biom.table.Table`` object is merged into scikit-bio (see [#489](https://github.com/biocore/scikit-bio/issues/489)), at which point ``skbio.core.diversity.beta.pw_distances`` will be updated to use that.
* Added ``skbio.core.alignment.StockholmAlignment``, which provides support for parsing [Stockholm-formatted alignment files](http://sonnhammer.sbc.su.se/Stockholm.html) and working with those alignments in the context RNA secondary structural information.
* Added ``skbio.core.tree.majority_rule`` function for computing consensus trees from a list of trees.
### Backward-incompatible changes
* Function ``skbio.core.alignment.align_striped_smith_waterman`` renamed to ``local_pairwise_align_ssw`` and now returns an ``Alignment`` object instead of an ``AlignmentStructure``
* The following keyword-arguments for ``StripedSmithWaterman`` and ``local_pairwise_align_ssw`` have been renamed:
* ``gap_open`` -> ``gap_open_penalty``
* ``gap_extend`` -> ``gap_extend_penalty``
* ``match`` -> ``match_score``
* ``mismatch`` -> ``mismatch_score``
* Removed ``skbio.util.sort`` module in favor of [natsort](https://pypi.python.org/pypi/natsort) package.
### Miscellaneous
* Added powertrip.py script to perform basic sanity-checking of the repo based on recurring issues that weren't being caught until release time; added to Travis build.
* Added RELEASE.md with release instructions.
* Added intersphinx mappings to docs so that "See Also" references to numpy, scipy, matplotlib, and pandas are hyperlinks.
* The following classes are no longer ``namedtuple`` subclasses (see [#359](https://github.com/biocore/scikit-bio/issues/359) for the rationale):
* ``skbio.math.stats.ordination.OrdinationResults``
* ``skbio.math.gradient.GroupResults``
* ``skbio.math.gradient.CategoryResults``
* ``skbio.math.gradient.GradientANOVAResults``
* Added coding guidelines draft.
* Added new alpha diversity formulas to the ``skbio.math.diversity.alpha`` documentation.
## Version 0.1.3 (2014-06-12)
This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
### Features
* Added ``enforce_qual_range`` parameter to ``parse_fastq`` (on by default, maintaining backward compatibility). This allows disabling of the quality score range-checking.
* Added ``skbio.core.tree.nj``, which applies neighbor-joining for phylogenetic reconstruction.
* Added ``bioenv``, ``mantel``, and ``pwmantel`` distance-based statistics to ``skbio.math.stats.distance`` subpackage.
* Added ``skbio.math.stats.misc`` module for miscellaneous stats utility functions.
* IDs are now optional when constructing a ``DissimilarityMatrix`` or ``DistanceMatrix`` (monotonically-increasing integers cast as strings are automatically used).
* Added ``DistanceMatrix.permute`` method for randomly permuting rows and columns of a distance matrix.
* Added the following methods to ``DissimilarityMatrix``: ``filter``, ``index``, and ``__contains__`` for ID-based filtering, index lookup, and membership testing, respectively.
* Added ``ignore_comment`` parameter to ``parse_fasta`` (off by default, maintaining backward compatibility). This handles stripping the comment field from the header line (i.e., all characters beginning with the first space) before returning the label.
* Added imports of ``BiologicalSequence``, ``NucleotideSequence``, ``DNA``, ``DNASequence``, ``RNA``, ``RNASequence``, ``Protein``, ``ProteinSequence``, ``DistanceMatrix``, ``align_striped_smith_waterman``, `` SequenceCollection``, ``Alignment``, ``TreeNode``, ``nj``, ``parse_fasta``, ``parse_fastq``, ``parse_qual``, ``FastaIterator``, ``FastqIterator``, ``SequenceIterator`` in ``skbio/__init__.py`` for convenient importing. For example, it's now possible to ``from skbio import Alignment``, rather than ``from skbio.core.alignment import Alignment``.
### Bug fixes
* Fixed a couple of unit tests that could fail stochastically.
* Added missing ``__init__.py`` files to a couple of test directories so that these tests won't be skipped.
* ``parse_fastq`` now raises an error on dangling records.
* Fixed several warnings that were raised while running the test suite with Python 3.4.
### Backward-incompatible changes
* Functionality imported from ``skbio.core.ssw`` must now be imported from ``skbio.core.alignment`` instead.
### Miscellaneous
* Code is now flake8-compliant; added flake8 checking to Travis build.
* Various additions and improvements to documentation (API, installation instructions, developer instructions, etc.).
* ``__future__`` imports are now standardized across the codebase.
* New website front page and styling changes throughout. Moved docs site to its own versioned subdirectories.
* Reorganized alignment data structures and algorithms (e.g., SSW code, ``Alignment`` class, etc.) into an ``skbio.core.alignment`` subpackage.
## Version 0.1.1 (2014-05-16)
Fixes to setup.py. This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
## Version 0.1.0 (2014-05-15)
Initial pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/CHANGELOG.md | CHANGELOG.md | # scikit-bio changelog
## Version 0.5.9
### Features
* Adding Variance log ratio estimators in `skbio.stats.composition.vlr` and `skbio.stats.composition.pairwise_vlr` ([#1803](https://github.com/biocore/scikit-bio/pull/1803))
* Added `skbio.stats.composition.tree_basis` to construct ILR bases from `TreeNode` objects. ([#1862](https://github.com/biocore/scikit-bio/pull/1862))
* `IntervalMetadata.query` now defaults to obtaining all results, see ([#1817](https://github.com/biocore/scikit-bio/issues/1817).
### Backward-incompatible changes [experimental]
* With the introduction of the `tree_basis` object, the ILR bases are now represented in log-odds coordinates rather than in probabilities to minimize issues with numerical stability. Furthermore, the `ilr` and `ilr_inv` functions now takes the `basis` input parameter in terms of log-odds coordinates. This affects the `skbio.stats.composition.sbp_basis` as well. ([#1862](https://github.com/biocore/scikit-bio/pull/1862))
### Important
* Complex multiple axis indexing operations with `TabularMSA` have been removed from testing due to incompatibilities with modern versions of Pandas. ([#1851](https://github.com/biocore/scikit-bio/pull/1851))
* Pinning `scipy <= 1.10.1` ([#1851](https://github.com/biocore/scikit-bio/pull/1867))
### Bug fixes
* Fixed a bug that caused build failure on the ARM64 microarchitecture due to floating-point number handling. ([#1859](https://github.com/biocore/scikit-bio/pull/1859))
* Never let the Gini index go below 0.0, see [#1844](https://github.com/biocore/scikit-bio/issue/1844).
* Fixed bug [#1847](https://github.com/biocore/scikit-bio/issues/1847) in which the edge from the root was inadvertantly included in the calculation for `descending_branch_length`
### Miscellaneous
* Replaced dependencies `CacheControl` and `lockfile` with `requests` to avoid a dependency inconsistency issue of the former. (See [#1863](https://github.com/biocore/scikit-bio/pull/1863), merged in [#1859](https://github.com/biocore/scikit-bio/pull/1859))
* Updated installation instructions for developers in `CONTRIBUTING.md` ([#1860](https://github.com/biocore/scikit-bio/pull/1860))
## Version 0.5.8
### Features
* Added NCBI taxonomy database dump format (`taxdump`) ([#1810](https://github.com/biocore/scikit-bio/pull/1810)).
* Added `TreeNode.from_taxdump` for converting taxdump into a tree ([#1810](https://github.com/biocore/scikit-bio/pull/1810)).
* scikit-learn has been removed as a dependency. This was a fairly heavy-weight dependency that was providing minor functionality to scikit-bio. The critical components have been implemented in scikit-bio directly, and the non-criticial components are listed under "Backward-incompatible changes [experimental]".
* Python 3.11 is now supported.
### Backward-incompatible changes [experimental]
* With the removal of the scikit-learn dependency, three beta diversity metric names can no longer be specified. These are `wminkowski`, `nan_euclidean`, and `haversine`. On testing, `wminkowski` and `haversine` did not work through `skbio.diversity.beta_diversity` (or `sklearn.metrics.pairwise_distances`). The former was deprecated in favor of calling `minkowski` with a vector of weights provided as kwarg `w` (example below), and the latter does not work with data of this shape. `nan_euclidean` can still be accessed fron scikit-learn directly if needed, if a user installs scikit-learn in their environment (example below).
```
counts = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0],
[88, 31, 0, 5, 5, 5, 5],
[44, 39, 0, 0, 0, 0, 0]]
# new mechanism of accessing wminkowski
from skbio.diversity import beta_diversity
beta_diversity("minkowski", counts, w=[1,1,1,1,1,1,2])
# accessing nan_euclidean through scikit-learn directly
import skbio
from sklearn.metrics import pairwise_distances
sklearn_dm = pairwise_distances(counts, metric="nan_euclidean")
skbio_dm = skbio.DistanceMatrix(sklearn_dm)
```
### Deprecated functionality [experimental]
* `skbio.alignment.local_pairwise_align_ssw` has been deprecated ([#1814](https://github.com/biocore/scikit-bio/issues/1814)) and will be removed or replaced in scikit-bio 0.6.0.
### Bug fixes
* Use `oldest-supported-numpy` as build dependency. This fixes problems with environments that use an older version of numpy than the one used to build scikit-bio ([#1813](https://github.com/biocore/scikit-bio/pull/1813)).
## Version 0.5.7
### Features
* Introduce support for Python 3.10 ([#1801](https://github.com/biocore/scikit-bio/pull/1801)).
* Tentative support for Apple M1 ([#1709](https://github.com/biocore/scikit-bio/pull/1709)).
* Added support for reading and writing a binary distance matrix object format. ([#1716](https://github.com/biocore/scikit-bio/pull/1716))
* Added support for `np.float32` with `DissimilarityMatrix` objects.
* Added support for method and number_of_dimensions to permdisp reducing the runtime by 100x at 4000 samples, [issue #1769](https://github.com/biocore/scikit-bio/pull/1769).
* OrdinationResults object is now accepted as input for permdisp.
### Performance enhancements
* Avoid an implicit data copy on construction of `DissimilarityMatrix` objects.
* Avoid validation on copy of `DissimilarityMatrix` and `DistanceMatrix` objects, see [PR #1747](https://github.com/biocore/scikit-bio/pull/1747)
* Use an optimized version of symmetry check in DistanceMatrix, see [PR #1747](https://github.com/biocore/scikit-bio/pull/1747)
* Avoid performing filtering when ids are identical, see [PR #1752](https://github.com/biocore/scikit-bio/pull/1752)
* center_distance_matrix has been re-implemented in cython for both speed and memory use. Indirectly speeds up pcoa [PR #1749](https://github.com/biocore/scikit-bio/pull/1749)
* Use a memory-optimized version of permute in DistanceMatrix, see [PR #1756](https://github.com/biocore/scikit-bio/pull/1756).
* Refactor pearson and spearman skbio.stats.distance.mantel implementations to drastically improve memory locality. Also cache intermediate results that are invariant across permutations, see [PR #1756](https://github.com/biocore/scikit-bio/pull/1756).
* Refactor permanova to remove intermediate buffers and cythonize the internals, see [PR #1768](https://github.com/biocore/scikit-bio/pull/1768).
### Bug fixes
* Fix windows and 32bit incompatibility in `unweighted_unifrac`.
### Miscellaneous
* Python 3.6 has been removed from our testing matrix.
* Specify build dependencies in pyproject.toml. This allows the package to be installed without having to first manually install numpy.
* Update hdmedians package to a version which doesn't require an initial manual numpy install.
* Now buildable on non-x86 platforms due to use of the [SIMD Everywhere](https://github.com/simd-everywhere/simde) library.
* Regenerate Cython wrapper by default to avoid incompatibilities with installed CPython.
* Update documentation for the `skbio.stats.composition.ancom` function. ([#1741](https://github.com/biocore/scikit-bio/pull/1741))
## Version 0.5.6
### Features
* Added option to return a capture group compiled regex pattern to any class inheriting ``GrammaredSequence`` through the ``to_regex`` method. ([#1431](https://github.com/biocore/scikit-bio/issues/1431))
* Added `Dissimilarity.within` and `.between` to obtain the respective distances and express them as a `DataFrame`. ([#1662](https://github.com/biocore/scikit-bio/pull/1662))
* Added Kendall Tau as possible correlation method in the `skbio.stats.distance.mantel` function ([#1675](https://github.com/biocore/scikit-bio/issues/1675)).
* Added support for IUPAC amino acid codes U (selenocysteine), O (pyrrolysine), and J (leucine or isoleucine). ([#1576](https://github.com/biocore/scikit-bio/issues/1576)
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
* Changed `skbio.tree.TreeNode.support` from a method to a property.
* Added `assign_supports` method to `skbio.tree.TreeNode` to extract branch support values from node labels.
* Modified the way a node's label is printed: `support:name` if both exist, or `support` or `name` if either exists.
### Performance enhancements
### Bug fixes
* Require `Sphinx <= 3.0`. Newer Sphinx versions caused build errors. [#1719](https://github.com/biocore/scikit-bio/pull/1719)
* * `skbio.stats.ordination` tests have been relaxed. ([#1713](https://github.com/biocore/scikit-bio/issues/1713))
* Fixes build errors for newer versions of NumPy, Pandas, and SciPy.
* Corrected a criticial bug in `skbio.alignment.StripedSmithWaterman`/`skbio.alignment.local_pairwise_align_ssw` which would cause the formatting of the aligned sequences to misplace gap characters by the number of gap characters present in the opposing aligned sequence up to that point. This was caused by a faulty implementation of CIGAR string parsing, see [#1679](https://github.com/biocore/scikit-bio/pull/1679) for full details.
* Fixes build errors for newer versions of NumPy, Pandas, and SciPy.
* Corrected a criticial bug in `skbio.alignment.StripedSmithWaterman`/`skbio.alignment.local_pairwise_align_ssw` which would cause the formatting of the aligned sequences to misplace gap characters by the number of gap characters present in the opposing aligned sequence up to that point. This was caused by a faulty implementation of CIGAR string parsing, see [#1679](https://github.com/biocore/scikit-bio/pull/1679) for full details.
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* `skbio.diversity.beta_diversity` now accepts a pandas DataFrame as input.
* Avoid pandas 1.0.0 import warning ([#1688](https://github.com/biocore/scikit-bio/issues/1688))
* Added support for Python 3.8 and dropped support for Python 3.5.
* This version now depends on `scipy >= 1.3` and `pandas >= 1.0`.
## Version 0.5.5 (2018-12-10)
### Features
* `skbio.stats.composition` now has methods to compute additive log-ratio transformation and inverse additive log-ratio transformation (`alr`, `alr_inv`) as well as a method to build a basis from a sequential binary partition (`sbp_basis`).
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
### Bug fixes
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* Python 3.6 and 3.7 compatibility is now supported
* A pytest runner is shipped with every installation ([#1633](https://github.com/biocore/scikit-bio/pull/1633))
* The nosetest framework has been replaced in favor of pytest ([#1624](https://github.com/biocore/scikit-bio/pull/1624))
* The numpy docs are deprecated in favor of [Napoleon](http://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html) ([#1629](https://github.com/biocore/scikit-bio/pull/1629))
* This version is now compatible with NumPy >= 1.9.2 and Pandas >= 0.23. ([#1627](https://github.com/biocore/scikit-bio/pull/1627))
## Version 0.5.4 (2018-08-23)
### Features
* Added `FSVD`, an alternative fast heuristic method to perform Principal Coordinates Analysis, to `skbio.stats.ordination.pcoa`.
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
* Added optimized utility methods `f_matrix_inplace` and `e_matrix_inplace` which perform `f_matrix` and `e_matrix` computations in-place and are used by the new `center_distance_matrix` method in `skbio.stats.ordination`.
### Bug fixes
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
## Version 0.5.3 (2018-08-07)
### Features
* Added `unpack` and `unpack_by_func` methods to `skbio.tree.TreeNode` to unpack one or multiple internal nodes. The `unpack` operation removes an internal node and regrafts its children to its parent while retaining the overall length. ([#1572](https://github.com/biocore/scikit-bio/pull/1572))
* Added `support` to `skbio.tree.TreeNode` to return the support value of a node.
* Added `permdisp` to `skbio.stats.distance` to test for the homogeniety of groups. ([#1228](https://github.com/biocore/scikit-bio/issues/1228)).
* Added `pcoa_biplot` to `skbio.stats.ordination` to project descriptors into a PCoA plot.
* Fixed pandas to 0.22.0 due to this: https://github.com/pandas-dev/pandas/issues/20527
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
### Bug fixes
* Relaxing type checking in diversity calculations. ([#1583](https://github.com/biocore/scikit-bio/issues/1583)).
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
## Version 0.5.2 (2018-04-18)
### Features
* Added ``skbio.io.format.embl`` for reading and writing EMBL files for ``DNA``, ``RNA`` and ``Sequence`` classes.
* Removing ValueError check in `skbio.stats._subsample.subsample_counts` when `replace=True` and `n` is greater than the number of items in counts. [#1527](https://github.com/biocore/scikit-bio/pull/1527)
* Added ``skbio.io.format.gff3`` for reading and writing GFF3 files for ``DNA``, ``Sequence``, and ``IntervalMetadata`` classes. ([#1450](https://github.com/biocore/scikit-bio/pull/1450))
* `skbio.metadata.IntervalMetadata` constructor has a new keyword argument, `copy_from`, for creating an `IntervalMetadata` object from an existing `IntervalMetadata` object with specified `upper_bound`.
* `skbio.metadata.IntervalMetadata` constructor allows `None` as a valid value for `upper_bound`. An `upper_bound` of `None` means that the `IntervalMetadata` object has no upper bound.
* `skbio.metadata.IntervalMetadata.drop` has a new boolean parameter `negate` to indicate whether to drop or keep the specified `Interval` objects.
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
### Performance enhancements
* `skbio.tree.nj` wall-clock runtime was decreased by 99% for a 500x500 distance matrix and 93% for a 100x100 distance matrix. ([#1512](https://github.com/biocore/scikit-bio/pull/1512), [#1513](https://github.com/biocore/scikit-bio/pull/1513))
### Bug fixes
* The `include_self` parameter was not being honored in `skbio.TreeNode.tips`. The scope of this bug was that if `TreeNode.tips` was called on a tip, it would always result in an empty `list` when unrolled.
* In `skbio.stats.ordination.ca`, `proportion_explained` was missing in the returned `OrdinationResults` object. ([#1345](https://github.com/biocore/scikit-bio/issues/1345))
* `skbio.diversity.beta_diversity` now handles qualitative metrics as expected such that `beta_diversity('jaccard', mat) == beta_diversity('jaccard', mat > 0)`. Please see [#1549](https://github.com/biocore/scikit-bio/issues/1549) for further detail.
* `skbio.stats.ordination.rda` The occasional column mismatch in output `biplot_scores` is fixed ([#1519](https://github.com/biocore/scikit-bio/issues/1519)).
### Deprecated functionality [stable]
### Deprecated functionality [experimental]
### Miscellaneous
* scikit-bio now depends on pandas >= 0.19.2, and is compatible with newer pandas versions (e.g. 0.20.3) that were previously incompatible.
* scikit-bio now depends on `numpy >= 1.9.2, < 1.14.0` for compatibility with Python 3.4, 3.5, and 3.6 and the available numpy conda packages in `defaults` and `conda-forge` channels.
* added support for running tests from `setup.py`. Both `python setup.py nosetests` and `python setup.py test` are now supported, however `python setup.py test` will only run a subset of the full test suite. ([#1341](https://github.com/biocore/scikit-bio/issues/1341))
## Version 0.5.1 (2016-11-12)
### Features
* Added `IntervalMetadata` and `Interval` classes in `skbio.metadata` to store, query, and manipulate information of a sub-region of a sequence. ([#1414](https://github.com/biocore/scikit-bio/issues/1414))
* `Sequence` and its child classes (including `GrammaredSequence`, `RNA`, `DNA`, `Protein`) now accept `IntervalMetadata` in their constructor API. Some of their relevant methods are also updated accordingly. ([#1430](https://github.com/biocore/scikit-bio/pull/1430))
* GenBank parser now reads and writes `Sequence` or its subclass objects with `IntervalMetadata`. ([#1440](https://github.com/biocore/scikit-bio/pull/1440))
* `DissimilarityMatrix` now has a new constructor method called `from_iterable`. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* `DissimilarityMatrix` now allows non-hollow matrices. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* `DistanceMatrix.from_iterable` now accepts a `validate=True` parameter. ([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* ``DistanceMatrix`` now has a new method called ``to_series`` to create a ``pandas.Series`` from a ``DistanceMatrix`` ([#1397](https://github.com/biocore/scikit-bio/issues/1397)).
* Added parallel beta diversity calculation support via `skbio.diversity.block_beta_diversity`. The issue and idea is discussed in ([#1181](https://github.com/biocore/scikit-bio/issues/1181), while the actual code changes are in [#1352](https://github.com/biocore/scikit-bio/pull/1352)).
### Backward-incompatible changes [stable]
* The constructor API for `Sequence` and its child classes (including `GrammaredSequence`, `RNA`, `DNA`, `Protein`) are changed from `(sequence, metadata=None, positional_metadata=None, lowercase=False)` to `(sequence, metadata=None, positional_metadata=None, interval_metadata=None, lowercase=False)`
The changes are made to allow these classes to adopt `IntervalMetadata` object for interval features on the sequence. The `interval_metadata` parameter is added imediately after `positional_metadata` instead of appended to the end, because it is more natural and logical and, more importantly, because it is unlikely in practice to break user code. A user's code would break only if they had supplied `metadata`, `postional_metadata`, and `lowercase` parameters positionally. In the unlikely event that this happens, users will get an error telling them a bool isn't a valid `IntervalMetadata` type, so it won't silently produce buggy behavior.
### Backward-incompatible changes [experimental]
* Modifying basis handling in `skbio.stats.composition.ilr_inv` prior to checking for orthogonality. Now the basis is strictly assumed to be in the Aitchison simplex.
* `DistanceMatrix.from_iterable` default behavior is now to validate matrix by computing all pairwise distances. Pass `validate=False` to get the previous behavior (no validation, but faster execution).([#1343](https://github.com/biocore/scikit-bio/issues/1343)).
* GenBank I/O now parses sequence features into the attribute of `interval_metadata` instead of `positiona_metadata`. And the key of `FEATURES` is removed from `metadata` attribute.
### Performance enhancements
* `TreeNode.shear` was rewritten for approximately a 25% performance increase. ([#1399](https://github.com/biocore/scikit-bio/pull/1399))
* The `IntervalMetadata` allows dramatic decrease in memory usage in reading GenBank files of feature rich sequences. ([#1159](https://github.com/biocore/scikit-bio/issues/1159))
### Bug fixes
* `skbio.tree.TreeNode.prune` and implicitly `skbio.tree.TreeNode.shear` were not handling a situation in which a parent was validly removed during pruning operations as may happen if the resulting subtree does not include the root. Previously, an `AttributeError` would raise as `parent` would be `None` in this situation.
* numpy linking was fixed for installation under El Capitan.
* A bug was introduced in #1398 into `TreeNode.prune` and fixed in #1416 in which, under the special case of a single descendent existing from the root, the resulting children parent references were not updated. The cause of the bug was a call made to `self.children.extend` as opposed to `self.extend` where the former is a `list.extend` without knowledge of the tree, while the latter is `TreeNode.extend` which is able to adjust references to `self.parent`.
### Miscellaneous
* Removed deprecated functions from `skbio.util`: `is_casava_v180_or_later`, `remove_files`, and `create_dir`.
* Removed deprecated `skbio.Sequence.copy` method.
## Version 0.5.0 (2016-06-14)
**IMPORTANT**: scikit-bio is no longer compatible with Python 2. scikit-bio is compatible with Python 3.4 and later.
### Features
* Added more descriptive error message to `skbio.io.registry` when attempting to read without specifying `into` and when there is no generator reader. ([#1326](https://github.com/biocore/scikit-bio/issues/1326))
* Added support for reference tags to `skbio.io.format.stockholm` reader and writer. ([#1348](https://github.com/biocore/scikit-bio/issues/1348))
* Expanded error message in `skbio.io.format.stockholm` reader when `constructor` is not passed, in order to provide better explanation to user. ([#1327](https://github.com/biocore/scikit-bio/issues/1327))
* Added `skbio.sequence.distance.kmer_distance` for computing the kmer distance between two sequences. ([#913](https://github.com/biocore/scikit-bio/issues/913))
* Added `skbio.sequence.Sequence.replace` for assigning a character to positions in a `Sequence`. ([#1222](https://github.com/biocore/scikit-bio/issues/1222))
* Added support for `pandas.RangeIndex`, lowering the memory footprint of default integer index objects. `Sequence.positional_metadata` and `TabularMSA.positional_metadata` now use `pd.RangeIndex` as the positional metadata index. `TabularMSA` now uses `pd.RangeIndex` as the default index. Usage of `pd.RangeIndex` over the previous `pd.Int64Index` [should be transparent](http://pandas.pydata.org/pandas-docs/version/0.18.0/whatsnew.html#range-index), so these changes should be non-breaking to users. scikit-bio now depends on pandas >= 0.18.0 ([#1308](https://github.com/biocore/scikit-bio/issues/1308))
* Added `reset_index=False` parameter to `TabularMSA.append` and `TabularMSA.extend` for resetting the MSA's index to the default index after appending/extending.
* Added support for partial pairwise calculations via `skbio.diversity.partial_beta_diversity`. ([#1221](https://github.com/biocore/scikit-bio/issues/1221), [#1337](https://github.com/biocore/scikit-bio/pull/1337)). This function is immediately deprecated as its return type will change in the future and should be used with caution in its present form (see the function's documentation for details).
* `TemporaryFile` and `NamedTemporaryFile` are now supported IO sources for `skbio.io` and related functionality. ([#1291](https://github.com/biocore/scikit-bio/issues/1291))
* Added `tree_node_class=TreeNode` parameter to `skbio.tree.majority_rule` to support returning consensus trees of type `TreeNode` (the default) or a type that has the same interface as `TreeNode` (e.g. `TreeNode` subclasses) ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
* `TreeNode.from_linkage_matrix` and `TreeNode.from_taxonomy` now support constructing `TreeNode` subclasses. `TreeNode.bifurcate` now supports `TreeNode` subclasses ([#1193](https://github.com/biocore/scikit-bio/pull/1193))
* The `ignore_metadata` keyword has been added to `TabularMSA.iter_positions` to improve performance when metadata is not necessary.
* Pairwise aligners in `skbio.alignment` now propagate per-sequence `metadata` objects (this does not include `positional_metadata`).
### Backward-incompatible changes [stable]
### Backward-incompatible changes [experimental]
* `TabularMSA.append` and `TabularMSA.extend` now require one of `minter`, `index`, or `reset_index` to be provided when incorporating new sequences into an MSA. Previous behavior was to auto-increment the index labels if `minter` and `index` weren't provided and the MSA had a default integer index, otherwise error. Use `reset_index=True` to obtain the previous behavior in a more explicit way.
* `skbio.stats.composition.ancom` now returns two `pd.DataFrame` objects, where it previously returned one. The first contains the ANCOM test results, as before, and the second contains percentile abundances of each feature in each group. The specific percentiles that are computed and returned is controlled by the new `percentiles` parameter to `skbio.stats.composition.ancom`. In the future, this second `pd.DataFrame` will not be returned by this function, but will be available through the [contingency table API](https://github.com/biocore/scikit-bio/issues/848). ([#1293](https://github.com/biocore/scikit-bio/issues/1293))
* `skbio.stats.composition.ancom` now performs multiple comparisons correction by default. The previous behavior of not performing multiple comparisons correction can be achieved by passing ``multiple_comparisons_correction=None``.
* The ``reject`` column in the first ``pd.DataFrame`` returned from `skbio.stats.composition.ancom` has been renamed ``Reject null hypothesis`` for clarity. ([#1375](https://github.com/biocore/scikit-bio/issues/1375))
### Bug fixes
* Fixed row and column names to `biplot_scores` in the `OrdinationResults` object from `skbio.stats.ordination`. This fix affect the `cca` and `rda` methods. ([#1322](https://github.com/biocore/scikit-bio/issues/1322))
* Fixed bug when using `skbio.io.format.stockholm` reader on file with multi-line tree with no id. Previously this raised an `AttributeError`, now it correctly handles this type of tree. ([#1334](https://github.com/biocore/scikit-bio/issues/1334))
* Fixed bug when reading Stockholm files with GF or GS features split over multiple lines. Previously, the feature text was simply concatenated because it was assumed to have trailing whitespace. There are examples of Stockholm files with and without trailing whitespace for multi-line features, so the `skbio.io.format.stockholm` reader now adds a single space when concatenating feature text without trailing whitespace to avoid joining words together. Multi-line trees stored as GF metadata are concatenated as they appear in the file; a space is not added when concatenating. ([#1328](https://github.com/biocore/scikit-bio/issues/1328))
* Fixed bug when using `Sequence.iter_kmers` on empty `Sequence` object. Previously this raised a `ValueError`, now it returns
an empty generator.
* Fixed minor bug where adding sequences to an empty `TabularMSA` with MSA-wide `positional_metadata` would result in a `TabularMSA` object in an inconsistent state. This could happen using `TabularMSA.append` or `TabularMSA.extend`. This bug only affects a `TabularMSA` object *without* sequences that has MSA-wide `positional_metadata` (for example, `TabularMSA([], positional_metadata={'column': []})`).
* `TreeNode.distance` now handles the situation in which `self` or `other` are ancestors. Previosly, a node further up the tree was used resulting in inflated distances. ([#807](https://github.com/biocore/scikit-bio/issues/807))
* `TreeNode.prune` can now handle a root with a single descendent. Previously, the root was ignored from possibly having a single descendent. ([#1247](https://github.com/biocore/scikit-bio/issues/1247))
* Providing the `format` keyword to `skbio.io.read` when creating a generator with an empty file will now return an empty generator instead of raising `StopIteration`. ([#1313](https://github.com/biocore/scikit-bio/issues/1313))
* `OrdinationResults` is now importable from `skbio` and `skbio.stats.ordination` and correctly linked from the documentation ([#1205](https://github.com/biocore/scikit-bio/issues/1205))
* Fixed performance bug in pairwise aligners resulting in 100x worse performance than in 0.2.4.
### Deprecated functionality [stable]
* Deprecated use of the term "non-degenerate", in favor of "definite". `GrammaredSequence.nondegenerate_chars`, `GrammaredSequence.nondegenerates`, and `GrammaredSequence.has_nondegenerates` have been renamed to `GrammaredSequence.definite_chars`, `GrammaredSequence.definites`, and `GrammaredSequence.has_definites`, respectively. The old names will be removed in scikit-bio 0.5.2. Relevant affected public classes include `GrammaredSequence`, `DNA`, `RNA`, and `Protein`.
### Deprecated functionality [experimental]
* Deprecated function `skbio.util.create_dir`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
functionality described [here](https://docs.python.org/2/library/os.html#os.makedirs). ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated function `skbio.util.remove_files`. This function will be removed in scikit-bio 0.5.1. Please use the Python standard library
functionality described [here](https://docs.python.org/2/library/os.html#os.remove). ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated function `skbio.util.is_casava_v180_or_later`. This function will be removed in 0.5.1. Functionality moved to FASTQ sniffer.
([#833](https://github.com/biocore/scikit-bio/issues/833))
### Miscellaneous
* When installing scikit-bio via `pip`, numpy must now be installed first ([#1296](https://github.com/biocore/scikit-bio/issues/1296))
## Version 0.4.2 (2016-02-17)
Minor maintenance release. **This is the last Python 2.7 compatible release. Future scikit-bio releases will only support Python 3.**
### Features
* Added `skbio.tree.TreeNode.bifurcate` for converting multifurcating trees into bifurcating trees. ([#896](https://github.com/biocore/scikit-bio/issues/896))
* Added `skbio.io.format.stockholm` for reading Stockholm files into a `TabularMSA` and writing from a `TabularMSA`. ([#967](https://github.com/biocore/scikit-bio/issues/967))
* scikit-bio `Sequence` objects have better compatibility with numpy. For example, calling `np.asarray(sequence)` now converts the sequence to a numpy array of characters (the same as calling `sequence.values`).
* Added `skbio.sequence.distance` subpackage for computing distances between scikit-bio `Sequence` objects ([#913](https://github.com/biocore/scikit-bio/issues/913))
* Added ``skbio.sequence.GrammaredSequence``, which can be inherited from to create grammared sequences with custom alphabets (e.g., for use with TabularMSA) ([#1175](https://github.com/biocore/scikit-bio/issues/1175))
* Added ``skbio.util.classproperty`` decorator
### Backward-incompatible changes [stable]
* When sniffing or reading a file (`skbio.io.sniff`, `skbio.io.read`, or the object-oriented `.read()` interface), passing `newline` as a keyword argument to `skbio.io.open` now raises a `TypeError`. This backward-incompatible change to a stable API is necessary because it fixes a bug (more details in bug fix section below).
* When reading a FASTQ or QSEQ file and passing `variant='solexa'`, `ValueError` is now raised instead of `NotImplementedError`. This backward-incompatible change to a stable API is necessary to avoid creating a spin-locked process due to [a bug in Python](https://bugs.python.org/issue25786). See [#1256](https://github.com/biocore/scikit-bio/issues/1256) for details. This change is temporary and will be reverted to `NotImplementedError` when the bug is fixed in Python.
### Backward-incompatible changes [experimental]
* `skbio.io.format.genbank`: When reading GenBank files, the date field of the LOCUS line is no longer parsed into a `datetime.datetime` object and is left as a string. When writing GenBank files, the locus date metadata is expected to be a string instead of a `datetime.datetime` object ([#1153](https://github.com/biocore/scikit-bio/issues/1153))
* `Sequence.distance` now converts the input sequence (`other`) to its type before passing both sequences to `metric`. Previous behavior was to always convert to `Sequence`.
### Bug fixes
* Fixed bug when using `Sequence.distance` or `DistanceMatrix.from_iterable` to compute distances between `Sequence` objects with differing `metadata`/`positional_metadata` and passing `metric=scipy.spatial.distance.hamming` ([#1254](https://github.com/biocore/scikit-bio/issues/1254))
* Fixed performance bug when computing Hamming distances between `Sequence` objects in `DistanceMatrix.from_iterable` ([#1250](https://github.com/biocore/scikit-bio/issues/1250))
* Changed `skbio.stats.composition.multiplicative_replacement` to raise an error whenever a large value of `delta` is chosen ([#1241](https://github.com/biocore/scikit-bio/issues/1241))
* When sniffing or reading a file (`skbio.io.sniff`, `skbio.io.read`, or the object-oriented `.read()` interface), passing `newline` as a keyword argument to `skbio.io.open` now raises a `TypeError`. The file format's `newline` character will be used when opening the file. Previous behavior allowed overriding the format's `newline` character but this could cause issues with readers that assume newline characters are those defined by the file format (which is an entirely reasonable assumption). This bug is very unlikely to have surfaced in practice as the default `newline` behavior is *universal newlines mode*.
* DNA, RNA, and Protein are no longer inheritable because they assume an IUPAC alphabet.
* `DistanceMatrix` constructor provides more informative error message when data contains NaNs ([#1276](https://github.com/biocore/scikit-bio/issues/1276))
### Miscellaneous
* Warnings raised by scikit-bio now share a common subclass ``skbio.util.SkbioWarning``.
## Version 0.4.1 (2015-12-09)
### Features
* The ``TabularMSA`` object was added to represent and operate on tabular multiple sequence alignments. This satisfies [RFC 1](https://github.com/biocore/scikit-bio-rfcs/blob/master/active/001-tabular-msa.md). See the ``TabularMSA`` docs for full details.
* Added phylogenetic diversity metrics, including weighted UniFrac, unweighted UniFrac, and Faith's Phylogenetic Diversity. These are accessible as ``skbio.diversity.beta.unweighted_unifrac``, ``skbio.diversity.beta.weighted_unifrac``, and ``skbio.diversity.alpha.faith_pd``, respectively.
* Addition of the function ``skbio.diversity.alpha_diversity`` to support applying an alpha diversity metric to multiple samples in one call.
* Addition of the functions ``skbio.diversity.get_alpha_diversity_metrics`` and ``skbio.diversity.get_beta_diversity_metrics`` to support discovery of the alpha and beta diversity metrics implemented in scikit-bio.
* Added `skbio.stats.composition.ancom` function, a test for OTU differential abundance across sample categories. ([#1054](https://github.com/biocore/scikit-bio/issues/1054))
* Added `skbio.io.format.blast7` for reading BLAST+ output format 7 or BLAST output format 9 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
* Added `skbio.DissimilarityMatrix.to_data_frame` method for creating a ``pandas.DataFrame`` from a `DissimilarityMatrix` or `DistanceMatrix`. ([#757](https://github.com/biocore/scikit-bio/issues/757))
* Added support for one-dimensional vector of dissimilarities in `skbio.stats.distance.DissimilarityMatrix`
constructor. ([#6240](https://github.com/biocore/scikit-bio/issues/624))
* Added `skbio.io.format.blast6` for reading BLAST+ output format 6 or BLAST output format 8 files into a `pd.DataFrame`. ([#1110](https://github.com/biocore/scikit-bio/issues/1110))
* Added `inner`, `ilr`, `ilr_inv` and `clr_inv`, ``skbio.stats.composition``, which enables linear transformations on compositions ([#892](https://github.com/biocore/scikit-bio/issues/892)
* Added ``skbio.diversity.alpha.pielou_e`` function as an evenness metric of alpha diversity. ([#1068](https://github.com/biocore/scikit-bio/issues/1068))
* Added `to_regex` method to `skbio.sequence._iupac_sequence` ABC - it returns a regex object that matches all non-degenerate versions of the sequence.
* Added ``skbio.util.assert_ordination_results_equal`` function for comparing ``OrdinationResults`` objects in unit tests.
* Added ``skbio.io.format.genbank`` for reading and writing GenBank/GenPept for ``DNA``, ``RNA``, ``Protein`` and ``Sequence`` classes.
* Added ``skbio.util.RepresentationWarning`` for warning about substitutions, assumptions, or particular alterations that were made for the successful completion of a process.
* ``TreeNode.tip_tip_distances`` now supports nodes without an associated length. In this case, a length of 0.0 is assumed and an ``skbio.util.RepresentationWarning`` is raised. Previous behavior was to raise a ``NoLengthError``. ([#791](https://github.com/biocore/scikit-bio/issues/791))
* ``DistanceMatrix`` now has a new constructor method called `from_iterable`.
* ``Sequence`` now accepts ``lowercase`` keyword like ``DNA`` and others. Updated ``fasta``, ``fastq``, and ``qseq`` readers/writers for ``Sequence`` to reflect this.
* The ``lowercase`` method has been moved up to ``Sequence`` meaning all sequence objects now have a ``lowercase`` method.
* Added ``reverse_transcribe`` class method to ``RNA``.
* Added `Sequence.observed_chars` property for obtaining the set of observed characters in a sequence. ([#1075](https://github.com/biocore/scikit-bio/issues/1075))
* Added `Sequence.frequencies` method for computing character frequencies in a sequence. ([#1074](https://github.com/biocore/scikit-bio/issues/1074))
* Added experimental class-method ``Sequence.concat`` which will produce a new sequence from an iterable of existing sequences. Parameters control how positional metadata is propagated during a concatenation.
* ``TreeNode.to_array`` now supports replacing ``nan`` branch lengths in the resulting branch length vector with the value provided as ``nan_length_value``.
* ``skbio.io.format.phylip`` now supports sniffing and reading strict, sequential PHYLIP-formatted files into ``skbio.Alignment`` objects. ([#1006](https://github.com/biocore/scikit-bio/issues/1006))
* Added `default_gap_char` class property to ``DNA``, ``RNA``, and ``Protein`` for representing gap characters in a new sequence.
### Backward-incompatible changes [stable]
* `Sequence.kmer_frequencies` now returns a `dict`. Previous behavior was to return a `collections.Counter` if `relative=False` was passed, and a `collections.defaultdict` if `relative=True` was passed. In the case of a missing key, the `Counter` would return 0 and the `defaultdict` would return 0.0. Because the return type is now always a `dict`, attempting to access a missing key will raise a `KeyError`. This change *may* break backwards-compatibility depending on how the `Counter`/`defaultdict` is being used. We hope that in most cases this change will not break backwards-compatibility because both `Counter` and `defaultdict` are `dict` subclasses.
If the previous behavior is desired, convert the `dict` into a `Counter`/`defaultdict`:
```python
import collections
from skbio import Sequence
seq = Sequence('ACCGAGTTTAACCGAATA')
# Counter
freqs_dict = seq.kmer_frequencies(k=8)
freqs_counter = collections.Counter(freqs_dict)
# defaultdict
freqs_dict = seq.kmer_frequencies(k=8, relative=True)
freqs_default_dict = collections.defaultdict(float, freqs_dict)
```
**Rationale:** We believe it is safer to return `dict` instead of `Counter`/`defaultdict` as this may prevent error-prone usage of the return value. Previous behavior allowed accessing missing kmers, returning 0 or 0.0 depending on the `relative` parameter. This is convenient in many cases but also potentially misleading. For example, consider the following code:
```python
from skbio import Sequence
seq = Sequence('ACCGAGTTTAACCGAATA')
freqs = seq.kmer_frequencies(k=8)
freqs['ACCGA']
```
Previous behavior would return 0 because the kmer `'ACCGA'` is not present in the `Counter`. In one respect this is the correct answer because we asked for kmers of length 8; `'ACCGA'` is a different length so it is not included in the results. However, we believe it is safer to avoid this implicit behavior in case the user assumes there are no `'ACCGA'` kmers in the sequence (which there are!). A `KeyError` in this case is more explicit and forces the user to consider their query. Returning a `dict` will also be consistent with `Sequence.frequencies`.
### Backward-incompatible changes [experimental]
* Replaced ``PCoA``, ``CCA``, ``CA`` and ``RDA`` in ``skbio.stats.ordination`` with equivalent functions ``pcoa``, ``cca``, ``ca`` and ``rda``. These functions now take ``pd.DataFrame`` objects.
* Change ``OrdinationResults`` to have its attributes based on ``pd.DataFrame`` and ``pd.Series`` objects, instead of pairs of identifiers and values. The changes are as follows:
- ``species`` and ``species_ids`` have been replaced by a ``pd.DataFrame`` named ``features``.
- ``site`` and ``site_ids`` have been replaced by a ``pd.DataFrame`` named ``samples``.
- ``eigvals`` is now a ``pd.Series`` object.
- ``proportion_explained`` is now a ``pd.Series`` object.
- ``biplot`` is now a ``pd.DataFrame`` object named ``biplot_scores``.
- ``site_constraints`` is now a ``pd.DataFrame`` object named ``sample_constraints``.
* ``short_method_name`` and ``long_method_name`` are now required arguments of the ``OrdinationResults`` object.
* Removed `skbio.diversity.alpha.equitability`. Please use `skbio.diversity.alpha.pielou_e`, which is more accurately named and better documented. Note that `equitability` by default used logarithm base 2 while `pielou_e` uses logarithm base `e` as described in Heip 1974.
* ``skbio.diversity.beta.pw_distances`` is now called ``skbio.diversity.beta_diversity``. This function no longer defines a default metric, and ``metric`` is now the first argument to this function. This function can also now take a pairwise distances function as ``pairwise_func``.
* Deprecated function ``skbio.diversity.beta.pw_distances_from_table`` has been removed from scikit-bio as scheduled. Code that used this should be adapted to use ``skbio.diversity.beta_diversity``.
* ``TreeNode.index_tree`` now returns a 2-D numpy array as its second return value (the child node index) instead of a 1-D numpy array.
* Deprecated functions `skbio.draw.boxplots` and `skbio.draw.grouped_distributions` have been removed from scikit-bio as scheduled. These functions generated plots that were not specific to bioinformatics. These types of plots can be generated with seaborn or another general-purpose plotting package.
* Deprecated function `skbio.stats.power.bootstrap_power_curve` has been removed from scikit-bio as scheduled. Use `skbio.stats.power.subsample_power` or `skbio.stats.power.subsample_paired_power` followed by `skbio.stats.power.confidence_bound`.
* Deprecated function `skbio.stats.spatial.procrustes` has been removed from scikit-bio as scheduled in favor of `scipy.spatial.procrustes`.
* Deprecated class `skbio.tree.CompressedTrie` and function `skbio.tree.fasta_to_pairlist` have been removed from scikit-bio as scheduled in favor of existing general-purpose Python trie packages.
* Deprecated function `skbio.util.flatten` has been removed from scikit-bio as scheduled in favor of solutions available in the Python standard library (see [here](http://stackoverflow.com/a/952952/3639023) and [here](http://stackoverflow.com/a/406199/3639023) for examples).
* Pairwise alignment functions in `skbio.alignment` now return a tuple containing the `TabularMSA` alignment, alignment score, and start/end positions. The returned `TabularMSA`'s `index` is always the default integer index; sequence IDs are no longer propagated to the MSA. Additionally, the pairwise alignment functions now accept the following input types to align:
- `local_pairwise_align_nucleotide`: `DNA` or `RNA`
- `local_pairwise_align_protein`: `Protein`
- `local_pairwise_align`: `IUPACSequence`
- `global_pairwise_align_nucleotide`: `DNA`, `RNA`, or `TabularMSA[DNA|RNA]`
- `global_pairwise_align_protein`: `Protein` or `TabularMSA[Protein]`
- `global_pairwise_align`: `IUPACSequence` or `TabularMSA`
- `local_pairwise_align_ssw`: `DNA`, `RNA`, or `Protein`. Additionally, this function now overrides the `protein` kwarg based on input type. `constructor` parameter was removed because the function now determines the return type based on input type.
* Removed `skbio.alignment.SequenceCollection` in favor of using a list or other standard library containers to store scikit-bio sequence objects (most `SequenceCollection` operations were simple list comprehensions). Use `DistanceMatrix.from_iterable` instead of `SequenceCollection.distances` (pass `key="id"` to exactly match original behavior).
* Removed `skbio.alignment.Alignment` in favor of `skbio.alignment.TabularMSA`.
* Removed `skbio.alignment.SequenceCollectionError` and `skbio.alignment.AlignmentError` exceptions as their corresponding classes no longer exist.
### Bug Fixes
* ``Sequence`` objects now handle slicing of empty positional metadata correctly. Any metadata that is empty will no longer be propagated by the internal ``_to`` constructor. ([#1133](https://github.com/biocore/scikit-bio/issues/1133))
* ``DissimilarityMatrix.plot()`` no longer leaves a white border around the
heatmap it plots (PR #1070).
* TreeNode.root_at_midpoint`` no longer fails when a node with two equal length child branches exists in the tree. ([#1077](https://github.com/biocore/scikit-bio/issues/1077))
* ``TreeNode._set_max_distance``, as called through ``TreeNode.get_max_distance`` or ``TreeNode.root_at_midpoint`` would store distance information as ``list``s in the attribute ``MaxDistTips`` on each node in the tree, however, these distances were only valid for the node in which the call to ``_set_max_distance`` was made. The values contained in ``MaxDistTips`` are now correct across the tree following a call to ``get_max_distance``. The scope of impact of this bug is limited to users that were interacting directly with ``MaxDistTips`` on descendant nodes; this bug does not impact any known method within scikit-bio. ([#1223](https://github.com/biocore/scikit-bio/issues/1223))
* Added missing `nose` dependency to setup.py's `install_requires`. ([#1214](https://github.com/biocore/scikit-bio/issues/1214))
* Fixed issue that resulted in legends of ``OrdinationResult`` plots sometimes being truncated. ([#1210](https://github.com/biocore/scikit-bio/issues/1210))
### Deprecated functionality [stable]
* `skbio.Sequence.copy` has been deprecated in favor of `copy.copy(seq)` and `copy.deepcopy(seq)`.
### Miscellaneous
* Doctests are now written in Python 3.
* ``make test`` now validates MANIFEST.in using [check-manifest](https://github.com/mgedmin/check-manifest). ([#461](https://github.com/biocore/scikit-bio/issues/461))
* Many new alpha diversity equations added to ``skbio.diversity.alpha`` documentation. ([#321](https://github.com/biocore/scikit-bio/issues/321))
* Order of ``lowercase`` and ``validate`` keywords swapped in ``DNA``, ``RNA``, and ``Protein``.
## Version 0.4.0 (2015-07-08)
Initial beta release. In addition to the changes detailed below, the following
subpackages have been mostly or entirely rewritten and most of their APIs are
substantially different (and improved!):
* `skbio.sequence`
* `skbio.io`
The APIs of these subpackages are now stable, and all others are experimental. See the [API stability docs](https://github.com/biocore/scikit-bio/tree/0.4.0/doc/source/user/api_stability.rst) for more details, including what we mean by *stable* and *experimental* in this context. We recognize that this is a lot of backward-incompatible changes. To avoid these types of changes being a surprise to our users, our public APIs are now decorated to make it clear to developers when an API can be relied upon (stable) and when it may be subject to change (experimental).
### Features
* Added `skbio.stats.composition` for analyzing data made up of proportions
* Added new ``skbio.stats.evolve`` subpackage for evolutionary statistics. Currently contains a single function, ``hommola_cospeciation``, which implements a permutation-based test of correlation between two distance matrices.
* Added support for ``skbio.io.util.open_file`` and ``skbio.io.util.open_files`` to pull files from HTTP and HTTPS URLs. This behavior propagates to the I/O registry.
* FASTA/QUAL (``skbio.io.format.fasta``) and FASTQ (``skbio.io.format.fastq``) readers now allow blank or whitespace-only lines at the beginning of the file, between records, or at the end of the file. A blank or whitespace-only line in any other location will continue to raise an error [#781](https://github.com/biocore/scikit-bio/issues/781).
* scikit-bio now ignores leading and trailing whitespace characters on each line while reading FASTA/QUAL and FASTQ files.
* Added `ratio` parameter to `skbio.stats.power.subsample_power`. This allows the user to calculate power on groups for uneven size (For example, draw twice as many samples from Group B than Group A). If `ratio` is not set, group sizes will remain equal across all groups.
* Power calculations (`skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`) can use test functions that return multiple p values, like some multivariate linear regression models. Previously, the power calculations required the test to return a single p value.
* Added ``skbio.util.assert_data_frame_almost_equal`` function for comparing ``pd.DataFrame`` objects in unit tests.
### Performance enhancements
* The speed of quality score decoding has been significantly improved (~2x) when reading `fastq` files.
* The speed of `NucleotideSequence.reverse_complement` has been improved (~6x).
### Bug fixes
* Changed `Sequence.distance` to raise an error any time two sequences are passed of different lengths regardless of the `distance_fn` being passed. [(#514)](https://github.com/biocore/scikit-bio/issues/514)
* Fixed issue with ``TreeNode.extend`` where if given the children of another ``TreeNode`` object (``tree.children``), both trees would be left in an incorrect and unpredictable state. ([#889](https://github.com/biocore/scikit-bio/issues/889))
* Changed the way power was calculated in `subsample_paired_power` to move the subsample selection before the test is performed. This increases the number of Monte Carlo simulations performed during power estimation, and improves the accuracy of the returned estimate. Previous power estimates from `subsample_paired_power` should be disregarded and re-calculated. ([#910](https://github.com/biocore/scikit-bio/issues/910))
* Fixed issue where `randdm` was attempting to create asymmetric distance matrices.This was causing an error to be raised by the `DistanceMatrix` constructor inside of the `randdm` function, so that `randdm` would fail when attempting to create large distance matrices. ([#943](https://github.com/biocore/scikit-bio/issues/943))
### Deprecated functionality
* Deprecated `skbio.util.flatten`. This function will be removed in scikit-bio 0.3.1. Please use standard python library functionality
described here [Making a flat list out of lists of lists](http://stackoverflow.com/a/952952/3639023), [Flattening a shallow list](http://stackoverflow.com/a/406199/3639023) ([#833](https://github.com/biocore/scikit-bio/issues/833))
* Deprecated `skbio.stats.power.bootstrap_power_curve` will be removed in scikit-bio 0.4.1. It is deprecated in favor of using ``subsample_power`` or ``sample_paired_power`` to calculate a power matrix, and then the use of ``confidence_bounds`` to calculate the average and confidence intervals.
### Backward-incompatible changes
* Removed the following deprecated functionality:
- `skbio.parse` subpackage, including `SequenceIterator`, `FastaIterator`, `FastqIterator`, `load`, `parse_fasta`, `parse_fastq`, `parse_qual`, `write_clustal`, `parse_clustal`, and `FastqParseError`; please use `skbio.io` instead.
- `skbio.format` subpackage, including `fasta_from_sequence`, `fasta_from_alignment`, and `format_fastq_record`; please use `skbio.io` instead.
- `skbio.alignment.SequenceCollection.int_map`; please use `SequenceCollection.update_ids` instead.
- `skbio.alignment.SequenceCollection` methods `to_fasta` and `toFasta`; please use `SequenceCollection.write` instead.
- `constructor` parameter in `skbio.alignment.Alignment.majority_consensus`; please convert returned biological sequence object manually as desired (e.g., `str(seq)`).
- `skbio.alignment.Alignment.to_phylip`; please use `Alignment.write` instead.
- `skbio.sequence.BiologicalSequence.to_fasta`; please use `BiologicalSequence.write` instead.
- `skbio.tree.TreeNode` methods `from_newick`, `from_file`, and `to_newick`; please use `TreeNode.read` and `TreeNode.write` instead.
- `skbio.stats.distance.DissimilarityMatrix` methods `from_file` and `to_file`; please use `DissimilarityMatrix.read` and `DissimilarityMatrix.write` instead.
- `skbio.stats.ordination.OrdinationResults` methods `from_file` and `to_file`; please use `OrdinationResults.read` and `OrdinationResults.write` instead.
- `skbio.stats.p_value_to_str`; there is no replacement.
- `skbio.stats.subsample`; please use `skbio.stats.subsample_counts` instead.
- `skbio.stats.distance.ANOSIM`; please use `skbio.stats.distance.anosim` instead.
- `skbio.stats.distance.PERMANOVA`; please use `skbio.stats.distance.permanova` instead.
- `skbio.stats.distance.CategoricalStatsResults`; there is no replacement, please use `skbio.stats.distance.anosim` or `skbio.stats.distance.permanova`, which will return a `pandas.Series` object.
* `skbio.alignment.Alignment.majority_consensus` now returns `BiologicalSequence('')` if the alignment is empty. Previously, `''` was returned.
* `min_observations` was removed from `skbio.stats.power.subsample_power` and `skbio.stats.power.subsample_paired_power`. The minimum number of samples for subsampling depends on the data set and statistical tests. Having a default parameter to set unnecessary limitations on the technique.
### Miscellaneous
* Changed testing procedures
- Developers should now use `make test`
- Users can use `python -m skbio.test`
- Added `skbio.util._testing.TestRunner` (available through `skbio.util.TestRunner`). Used to provide a `test` method for each module init file. This class represents a unified testing path which wraps all `skbio` testing functionality.
- Autodetect Python version and disable doctests for Python 3.
* `numpy` is no longer required to be installed before installing scikit-bio!
* Upgraded checklist.py to check source files non-conforming to [new header style](http://scikit-bio.org/docs/latest/development/new_module.html). ([#855](https://github.com/biocore/scikit-bio/issues/855))
* Updated to use `natsort` >= 4.0.0.
* The method of subsampling was changed for ``skbio.stats.power.subsample_paired_power``. Rather than drawing a paired sample for the run and then subsampling for each count, the subsample is now drawn for each sample and each run. In test data, this did not significantly alter the power results.
* checklist.py now enforces `__future__` imports in .py files.
## Version 0.2.3 (2015-02-13)
### Features
* Modified ``skbio.stats.distance.pwmantel`` to accept a list of filepaths. This is useful as it allows for a smaller amount of memory consumption as it only loads two matrices at a time as opposed to requiring that all distance matrices are loaded into memory.
* Added ``skbio.util.find_duplicates`` for finding duplicate elements in an iterable.
### Bug fixes
* Fixed floating point precision bugs in ``Alignment.position_frequencies``, ``Alignment.position_entropies``, ``Alignment.omit_gap_positions``, ``Alignment.omit_gap_sequences``, ``BiologicalSequence.k_word_frequencies``, and ``SequenceCollection.k_word_frequencies`` ([#801](https://github.com/biocore/scikit-bio/issues/801)).
### Backward-incompatible changes
* Removed ``feature_types`` attribute from ``BiologicalSequence`` and all subclasses ([#797](https://github.com/biocore/scikit-bio/pull/797)).
* Removed ``find_features`` method from ``BiologicalSequence`` and ``ProteinSequence`` ([#797](https://github.com/biocore/scikit-bio/pull/797)).
* ``BiologicalSequence.k_word_frequencies`` now returns a ``collections.defaultdict`` of type ``float`` instead of type ``int``. This only affects the "default" case, when a key isn't present in the dictionary. Previous behavior would return ``0`` as an ``int``, while the new behavior is to return ``0.0`` as a ``float``. This change also affects the ``defaultdict``s that are returned by ``SequenceCollection.k_word_frequencies``.
### Miscellaneous
* ``DissimilarityMatrix`` and ``DistanceMatrix`` now report duplicate IDs in the ``DissimilarityMatrixError`` message that can be raised during validation.
## Version 0.2.2 (2014-12-04)
### Features
* Added ``plot`` method to ``skbio.stats.distance.DissimilarityMatrix`` for creating basic heatmaps of a dissimilarity/distance matrix (see [#684](https://github.com/biocore/scikit-bio/issues/684)). Also added ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
* Added `__str__` method to `skbio.stats.ordination.OrdinationResults`.
* Added ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova`` functions, which replace the ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` classes. These new functions provide simpler procedural interfaces to running these statistical methods. They also provide more convenient access to results by returning a ``pandas.Series`` instead of a ``CategoricalStatsResults`` object. These functions have more extensive documentation than their previous versions. If significance tests are suppressed, p-values are returned as ``np.nan`` instead of ``None`` for consistency with other statistical methods in scikit-bio. [#754](https://github.com/biocore/scikit-bio/issues/754)
* Added `skbio.stats.power` for performing empirical power analysis. The module uses existing datasets and iteratively draws samples to estimate the number of samples needed to see a significant difference for a given critical value.
* Added `skbio.stats.isubsample` for subsampling from an unknown number of values. This method supports subsampling from multiple partitions and does not require that all items be stored in memory, requiring approximately `O(N*M)`` space where `N` is the number of partitions and `M` is the maximum subsample size.
* Added ``skbio.stats.subsample_counts``, which replaces ``skbio.stats.subsample``. See deprecation section below for more details ([#770](https://github.com/biocore/scikit-bio/issues/770)).
### Bug fixes
* Fixed issue where SSW wouldn't compile on i686 architectures ([#409](https://github.com/biocore/scikit-bio/issues/409)).
### Deprecated functionality
* Deprecated ``skbio.stats.p_value_to_str``. This function will be removed in scikit-bio 0.3.0. Permutation-based p-values in scikit-bio are calculated as ``(num_extreme + 1) / (num_permutations + 1)``, so it is impossible to obtain a p-value of zero. This function historically existed for correcting the number of digits displayed when obtaining a p-value of zero. Since this is no longer possible, this functionality will be removed.
* Deprecated ``skbio.stats.distance.ANOSIM`` and ``skbio.stats.distance.PERMANOVA`` in favor of ``skbio.stats.distance.anosim`` and ``skbio.stats.distance.permanova``, respectively.
* Deprecated ``skbio.stats.distance.CategoricalStatsResults`` in favor of using ``pandas.Series`` to store statistical method results. ``anosim`` and ``permanova`` return ``pandas.Series`` instead of ``CategoricalStatsResults``.
* Deprecated ``skbio.stats.subsample`` in favor of ``skbio.stats.subsample_counts``, which provides an identical interface; only the function name has changed. ``skbio.stats.subsample`` will be removed in scikit-bio 0.3.0.
### Backward-incompatible changes
* Deprecation warnings are now raised using ``DeprecationWarning`` instead of ``UserWarning`` ([#774](https://github.com/biocore/scikit-bio/issues/774)).
### Miscellaneous
* The ``pandas.DataFrame`` returned by ``skbio.stats.distance.pwmantel`` now stores p-values as floats and does not convert them to strings with a specific number of digits. p-values that were previously stored as "N/A" are now stored as ``np.nan`` for consistency with other statistical methods in scikit-bio. See note in "Deprecated functionality" above regarding ``p_value_to_str`` for details.
* scikit-bio now supports versions of IPython < 2.0.0 ([#767](https://github.com/biocore/scikit-bio/issues/767)).
## Version 0.2.1 (2014-10-27)
This is an alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Unified I/O with the scikit-bio I/O registry was the focus of this release.
### Features
* Added ``strict`` and ``lookup`` optional parameters to ``skbio.stats.distance.mantel`` for handling reordering and matching of IDs when provided ``DistanceMatrix`` instances as input (these parameters were previously only available in ``skbio.stats.distance.pwmantel``).
* ``skbio.stats.distance.pwmantel`` now accepts an iterable of ``array_like`` objects. Previously, only ``DistanceMatrix`` instances were allowed.
* Added ``plot`` method to ``skbio.stats.ordination.OrdinationResults`` for creating basic 3-D matplotlib scatterplots of ordination results, optionally colored by metadata in a ``pandas.DataFrame`` (see [#518](https://github.com/biocore/scikit-bio/issues/518)). Also added ``_repr_png_`` and ``_repr_svg_`` methods for automatic display in the IPython Notebook, with ``png`` and ``svg`` properties for direct access.
* Added ``skbio.stats.ordination.assert_ordination_results_equal`` for comparing ``OrdinationResults`` objects for equality in unit tests.
* ``BiologicalSequence`` (and its subclasses) now optionally store Phred quality scores. A biological sequence's quality scores are stored as a 1-D ``numpy.ndarray`` of nonnegative integers that is the same length as the biological sequence. Quality scores can be provided upon object instantiation via the keyword argument ``quality``, and can be retrieved via the ``BiologicalSequence.quality`` property. ``BiologicalSequence.has_quality`` is also provided for determining whether a biological sequence has quality scores or not. See [#616](https://github.com/biocore/scikit-bio/issues/616) for more details.
* Added ``BiologicalSequence.sequence`` property for retrieving the underlying string representing the sequence characters. This was previously (and still is) accessible via ``BiologicalSequence.__str__``. It is provided via a property for convenience and explicitness.
* Added ``BiologicalSequence.equals`` for full control over equality testing of biological sequences. By default, biological sequences must have the same type, underlying sequence of characters, identifier, description, and quality scores to compare equal. These properties can be ignored via the keyword argument ``ignore``. The behavior of ``BiologicalSequence.__eq__``/``__ne__`` remains unchanged (only type and underlying sequence of characters are compared).
* Added ``BiologicalSequence.copy`` for creating a copy of a biological sequence, optionally with one or more attributes updated.
* ``BiologicalSequence.__getitem__`` now supports specifying a sequence of indices to take from the biological sequence.
* Methods to read and write taxonomies are now available under ``skbio.tree.TreeNode.from_taxonomy`` and ``skbio.tree.TreeNode.to_taxonomy`` respectively.
* Added ``SequenceCollection.update_ids``, which provides a flexible way of updating sequence IDs on a ``SequenceCollection`` or ``Alignment`` (note that a new object is returned, since instances of these classes are immutable). Deprecated ``SequenceCollection.int_map`` in favor of this new method; it will be removed in scikit-bio 0.3.0.
* Added ``skbio.util.cardinal_to_ordinal`` for converting a cardinal number to ordinal string (e.g., useful for error messages).
* New I/O Registry: supports multiple file formats, automatic file format detection when reading, unified procedural ``skbio.io.read`` and ``skbio.io.write`` in addition to OOP interfaces (``read/write`` methods) on the below objects. See ``skbio.io`` for more details.
- Added "clustal" format support:
* Has sniffer
* Readers: ``Alignment``
* Writers: ``Alignment``
- Added "lsmat" format support:
* Has sniffer
* Readers: ``DissimilarityMatrix``, ``DistanceMatrix``
* Writers: ``DissimilarityMatrix``, ``DistanceMatrix``
- Added "ordination" format support:
* Has sniffer
* Readers: ``OrdinationResults``
* Writers: ``OrdinationResults``
- Added "newick" format support:
* Has sniffer
* Readers: ``TreeNode``
* Writers: ``TreeNode``
- Added "phylip" format support:
* No sniffer
* Readers: None
* Writers: ``Alignment``
- Added "qseq" format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: None
- Added "fasta"/QUAL format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: same as readers
- Added "fastq" format support:
* Has sniffer
* Readers: generator of ``BiologicalSequence`` or its subclasses, ``SequenceCollection``, ``Alignment``, ``BiologicalSequence``, ``NucleotideSequence``, ``DNASequence``, ``RNASequence``, ``ProteinSequence``
* Writers: same as readers
### Bug fixes
* Removed ``constructor`` parameter from ``Alignment.k_word_frequencies``, ``BiologicalSequence.k_words``, ``BiologicalSequence.k_word_counts``, and ``BiologicalSequence.k_word_frequencies`` as it had no effect (it was never hooked up in the underlying code). ``BiologicalSequence.k_words`` now returns a generator of ``BiologicalSequence`` objects instead of strings.
* Modified the ``Alignment`` constructor to verify that all sequences have the same length, if not, raise an ``AlignmentError`` exception. Updated the method ``Alignment.subalignment`` to calculate the indices only once now that identical sequence length is guaranteed.
### Deprecated functionality
* Deprecated ``constructor`` parameter in ``Alignment.majority_consensus`` in favor of having users call ``str`` on the returned ``BiologicalSequence``. This parameter will be removed in scikit-bio 0.3.0.
* Existing I/O functionality deprecated in favor of I/O registry, old functionality will be removed in scikit-bio 0.3.0. All functionality can be found at ``skbio.io.read``, ``skbio.io.write``, and the methods listed below:
* Deprecated the following "clustal" readers/writers:
- ``write_clustal`` -> ``Alignment.write``
- ``parse_clustal`` -> ``Alignment.read``
* Deprecated the following distance matrix format ("lsmat") readers/writers:
- ``DissimilarityMatrix.from_file`` -> ``DissimilarityMatrix.read``
- ``DissimilarityMatrix.to_file`` -> ``DissimilarityMatrix.write``
- ``DistanceMatrix.from_file`` -> ``DistanceMatrix.read``
- ``DistanceMatrix.to_file`` -> ``DistanceMatrix.write``
* Deprecated the following ordination format ("ordination") readers/writers:
- ``OrdinationResults.from_file`` -> ``OrdinationResults.read``
- ``OrdinationResults.to_file`` -> ``OrdinationResults.write``
* Deprecated the following "newick" readers/writers:
- ``TreeNode.from_file`` -> ``TreeNode.read``
- ``TreeNode.from_newick`` -> ``TreeNode.read``
- ``TreeNode.to_newick`` -> ``TreeNode.write``
* Deprecated the following "phylip" writers:
- ``Alignment.to_phylip`` -> ``Alignment.write``
* Deprecated the following "fasta"/QUAL readers/writers:
- ``SequenceCollection.from_fasta_records`` -> ``SequenceCollection.read``
- ``SequenceCollection.to_fasta`` -> ``SequenceCollection.write``
- ``fasta_from_sequences`` -> ``skbio.io.write(obj, into=<file>, format='fasta')``
- ``fasta_from_alignment`` -> ``Alignment.write``
- ``parse_fasta`` -> ``skbio.io.read(<fasta>, format='fasta')``
- ``parse_qual`` -> ``skbio.io.read(<fasta>, format='fasta', qual=<file>)``
- ``BiologicalSequence.to_fasta`` -> ``BiologicalSequence.write``
* Deprecated the following "fastq" readers/writers:
- ``parse_fastq`` -> ``skbio.io.read(<fastq>, format='fastq')``
- ``format_fastq_record`` -> ``skbio.io.write(<fastq>, format='fastq')``
### Backward-incompatible changes
* ``skbio.stats.distance.mantel`` now returns a 3-element tuple containing correlation coefficient, p-value, and the number of matching rows/cols in the distance matrices (``n``). The return value was previously a 2-element tuple containing only the correlation coefficient and p-value.
* ``skbio.stats.distance.mantel`` reorders input ``DistanceMatrix`` instances based on matching IDs (see optional parameters ``strict`` and ``lookup`` for controlling this behavior). In the past, ``DistanceMatrix`` instances were treated the same as ``array_like`` input and no reordering took place, regardless of ID (mis)matches. ``array_like`` input behavior remains the same.
* If mismatched types are provided to ``skbio.stats.distance.mantel`` (e.g., a ``DistanceMatrix`` and ``array_like``), a ``TypeError`` will be raised.
### Miscellaneous
* Added git timestamp checking to checklist.py, ensuring that when changes are made to Cython (.pyx) files, their corresponding generated C files are also updated.
* Fixed performance bug when instantiating ``BiologicalSequence`` objects. The previous runtime scaled linearly with sequence length; it is now constant time when the sequence is already a string. See [#623](https://github.com/biocore/scikit-bio/issues/623) for details.
* IPython and six are now required dependencies.
## Version 0.2.0 (2014-08-07)
This is an initial alpha release of scikit-bio. At this stage, major backwards-incompatible API changes can and will happen. Many backwards-incompatible API changes were made since the previous release.
### Features
* Added ability to compute distances between sequences in a ``SequenceCollection`` object ([#509](https://github.com/biocore/scikit-bio/issues/509)), and expanded ``Alignment.distance`` to allow the user to pass a function for computing distances (the default distance metric is still ``scipy.spatial.distance.hamming``) ([#194](https://github.com/biocore/scikit-bio/issues/194)).
* Added functionality to not penalize terminal gaps in global alignment. This functionality results in more biologically relevant global alignments (see [#537](https://github.com/biocore/scikit-bio/issues/537) for discussion of the issue) and is now the default behavior for global alignment.
* The python global aligners (``global_pairwise_align``, ``global_pairwise_align_nucleotide``, and ``global_pairwise_align_protein``) now support aligning pairs of sequences, pairs of alignments, and a sequence and an alignment (see [#550](https://github.com/biocore/scikit-bio/issues/550)). This functionality supports progressive multiple sequence alignment, among other things such as adding a sequence to an existing alignment.
* Added ``StockholmAlignment.to_file`` for writing Stockholm-formatted files.
* Added ``strict=True`` optional parameter to ``DissimilarityMatrix.filter``.
* Added ``TreeNode.find_all`` for finding all tree nodes that match a given name.
### Bug fixes
* Fixed bug that resulted in a ``ValueError`` from ``local_align_pairwise_nucleotide`` (see [#504](https://github.com/biocore/scikit-bio/issues/504)) under many circumstances. This would not generate incorrect results, but would cause the code to fail.
### Backward-incompatible changes
* Removed ``skbio.math``, leaving ``stats`` and ``diversity`` to become top level packages. For example, instead of ``from skbio.math.stats.ordination import PCoA`` you would now import ``from skbio.stats.ordination import PCoA``.
* The module ``skbio.math.gradient`` as well as the contents of ``skbio.math.subsample`` and ``skbio.math.stats.misc`` are now found in ``skbio.stats``. As an example, to import subsample: ``from skbio.stats import subsample``; to import everything from gradient: ``from skbio.stats.gradient import *``.
* The contents of ``skbio.math.stats.ordination.utils`` are now in ``skbio.stats.ordination``.
* Removed ``skbio.app`` subpackage (i.e., the *application controller framework*) as this code has been ported to the standalone [burrito](https://github.com/biocore/burrito) Python package. This code was not specific to bioinformatics and is useful for wrapping command-line applications in general.
* Removed ``skbio.core``, leaving ``alignment``, ``genetic_code``, ``sequence``, ``tree``, and ``workflow`` to become top level packages. For example, instead of ``from skbio.core.sequence import DNA`` you would now import ``from skbio.sequence import DNA``.
* Removed ``skbio.util.exception`` and ``skbio.util.warning`` (see [#577](https://github.com/biocore/scikit-bio/issues/577) for the reasoning behind this change). The exceptions/warnings were moved to the following locations:
- ``FileFormatError``, ``RecordError``, ``FieldError``, and ``EfficiencyWarning`` have been moved to ``skbio.util``
- ``BiologicalSequenceError`` has been moved to ``skbio.sequence``
- ``SequenceCollectionError`` and ``StockholmParseError`` have been moved to ``skbio.alignment``
- ``DissimilarityMatrixError``, ``DistanceMatrixError``, ``DissimilarityMatrixFormatError``, and ``MissingIDError`` have been moved to ``skbio.stats.distance``
- ``TreeError``, ``NoLengthError``, ``DuplicateNodeError``, ``MissingNodeError``, and ``NoParentError`` have been moved to ``skbio.tree``
- ``FastqParseError`` has been moved to ``skbio.parse.sequences``
- ``GeneticCodeError``, ``GeneticCodeInitError``, and ``InvalidCodonError`` have been moved to ``skbio.genetic_code``
* The contents of ``skbio.genetic_code`` formerly ``skbio.core.genetic_code`` are now in ``skbio.sequence``. The ``GeneticCodes`` dictionary is now a function ``genetic_code``. The functionality is the same, except that because this is now a function rather than a dict, retrieving a genetic code is done using a function call rather than a lookup (so, for example, ``GeneticCodes[2]`` becomes ``genetic_code(2)``.
* Many submodules have been made private with the intention of simplifying imports for users. See [#562](https://github.com/biocore/scikit-bio/issues/562) for discussion of this change. The following list contains the previous module name and where imports from that module should now come from.
- ``skbio.alignment.ssw`` to ``skbio.alignment``
- ``skbio.alignment.alignment`` to ``skbio.alignment``
- ``skbio.alignment.pairwise`` to ``skbio.alignment``
- ``skbio.diversity.alpha.base`` to ``skbio.diversity.alpha``
- ``skbio.diversity.alpha.gini`` to ``skbio.diversity.alpha``
- ``skbio.diversity.alpha.lladser`` to ``skbio.diversity.alpha``
- ``skbio.diversity.beta.base`` to ``skbio.diversity.beta``
- ``skbio.draw.distributions`` to ``skbio.draw``
- ``skbio.stats.distance.anosim`` to ``skbio.stats.distance``
- ``skbio.stats.distance.base`` to ``skbio.stats.distance``
- ``skbio.stats.distance.permanova`` to ``skbio.stats.distance``
- ``skbio.distance`` to ``skbio.stats.distance``
- ``skbio.stats.ordination.base`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.canonical_correspondence_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.correspondence_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.principal_coordinate_analysis`` to ``skbio.stats.ordination``
- ``skbio.stats.ordination.redundancy_analysis`` to ``skbio.stats.ordination``
- ``skbio.tree.tree`` to ``skbio.tree``
- ``skbio.tree.trie`` to ``skbio.tree``
- ``skbio.util.misc`` to ``skbio.util``
- ``skbio.util.testing`` to ``skbio.util``
- ``skbio.util.exception`` to ``skbio.util``
- ``skbio.util.warning`` to ``skbio.util``
* Moved ``skbio.distance`` contents into ``skbio.stats.distance``.
### Miscellaneous
* Relaxed requirement in ``BiologicalSequence.distance`` that sequences being compared are of equal length. This is relevant for Hamming distance, so the check is still performed in that case, but other distance metrics may not have that requirement. See [#504](https://github.com/biocore/scikit-bio/issues/507)).
* Renamed ``powertrip.py`` repo-checking script to ``checklist.py`` for clarity.
* ``checklist.py`` now ensures that all unit tests import from a minimally deep API. For example, it will produce an error if ``skbio.core.distance.DistanceMatrix`` is used over ``skbio.DistanceMatrix``.
* Extra dimension is no longer calculated in ``skbio.stats.spatial.procrustes``.
* Expanded documentation in various subpackages.
* Added new scikit-bio logo. Thanks [Alina Prassas](http://cargocollective.com/alinaprassas)!
## Version 0.1.4 (2014-06-25)
This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
### Features
* Added Python implementations of Smith-Waterman and Needleman-Wunsch alignment as ``skbio.core.alignment.pairwise.local_pairwise_align`` and ``skbio.core.alignment.pairwise.global_pairwise_align``. These are much slower than native C implementations (e.g., ``skbio.core.alignment.local_pairwise_align_ssw``) and as a result raise an ``EfficencyWarning`` when called, but are included as they serve as useful educational examples as they’re simple to experiment with.
* Added ``skbio.core.diversity.beta.pw_distances`` and ``skbio.core.diversity.beta.pw_distances_from_table``. These provide convenient access to the ``scipy.spatial.distance.pdist`` *beta diversity* metrics from within scikit-bio. The ``skbio.core.diversity.beta.pw_distances_from_table`` function will only be available temporarily, until the ``biom.table.Table`` object is merged into scikit-bio (see [#489](https://github.com/biocore/scikit-bio/issues/489)), at which point ``skbio.core.diversity.beta.pw_distances`` will be updated to use that.
* Added ``skbio.core.alignment.StockholmAlignment``, which provides support for parsing [Stockholm-formatted alignment files](http://sonnhammer.sbc.su.se/Stockholm.html) and working with those alignments in the context RNA secondary structural information.
* Added ``skbio.core.tree.majority_rule`` function for computing consensus trees from a list of trees.
### Backward-incompatible changes
* Function ``skbio.core.alignment.align_striped_smith_waterman`` renamed to ``local_pairwise_align_ssw`` and now returns an ``Alignment`` object instead of an ``AlignmentStructure``
* The following keyword-arguments for ``StripedSmithWaterman`` and ``local_pairwise_align_ssw`` have been renamed:
* ``gap_open`` -> ``gap_open_penalty``
* ``gap_extend`` -> ``gap_extend_penalty``
* ``match`` -> ``match_score``
* ``mismatch`` -> ``mismatch_score``
* Removed ``skbio.util.sort`` module in favor of [natsort](https://pypi.python.org/pypi/natsort) package.
### Miscellaneous
* Added powertrip.py script to perform basic sanity-checking of the repo based on recurring issues that weren't being caught until release time; added to Travis build.
* Added RELEASE.md with release instructions.
* Added intersphinx mappings to docs so that "See Also" references to numpy, scipy, matplotlib, and pandas are hyperlinks.
* The following classes are no longer ``namedtuple`` subclasses (see [#359](https://github.com/biocore/scikit-bio/issues/359) for the rationale):
* ``skbio.math.stats.ordination.OrdinationResults``
* ``skbio.math.gradient.GroupResults``
* ``skbio.math.gradient.CategoryResults``
* ``skbio.math.gradient.GradientANOVAResults``
* Added coding guidelines draft.
* Added new alpha diversity formulas to the ``skbio.math.diversity.alpha`` documentation.
## Version 0.1.3 (2014-06-12)
This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
### Features
* Added ``enforce_qual_range`` parameter to ``parse_fastq`` (on by default, maintaining backward compatibility). This allows disabling of the quality score range-checking.
* Added ``skbio.core.tree.nj``, which applies neighbor-joining for phylogenetic reconstruction.
* Added ``bioenv``, ``mantel``, and ``pwmantel`` distance-based statistics to ``skbio.math.stats.distance`` subpackage.
* Added ``skbio.math.stats.misc`` module for miscellaneous stats utility functions.
* IDs are now optional when constructing a ``DissimilarityMatrix`` or ``DistanceMatrix`` (monotonically-increasing integers cast as strings are automatically used).
* Added ``DistanceMatrix.permute`` method for randomly permuting rows and columns of a distance matrix.
* Added the following methods to ``DissimilarityMatrix``: ``filter``, ``index``, and ``__contains__`` for ID-based filtering, index lookup, and membership testing, respectively.
* Added ``ignore_comment`` parameter to ``parse_fasta`` (off by default, maintaining backward compatibility). This handles stripping the comment field from the header line (i.e., all characters beginning with the first space) before returning the label.
* Added imports of ``BiologicalSequence``, ``NucleotideSequence``, ``DNA``, ``DNASequence``, ``RNA``, ``RNASequence``, ``Protein``, ``ProteinSequence``, ``DistanceMatrix``, ``align_striped_smith_waterman``, `` SequenceCollection``, ``Alignment``, ``TreeNode``, ``nj``, ``parse_fasta``, ``parse_fastq``, ``parse_qual``, ``FastaIterator``, ``FastqIterator``, ``SequenceIterator`` in ``skbio/__init__.py`` for convenient importing. For example, it's now possible to ``from skbio import Alignment``, rather than ``from skbio.core.alignment import Alignment``.
### Bug fixes
* Fixed a couple of unit tests that could fail stochastically.
* Added missing ``__init__.py`` files to a couple of test directories so that these tests won't be skipped.
* ``parse_fastq`` now raises an error on dangling records.
* Fixed several warnings that were raised while running the test suite with Python 3.4.
### Backward-incompatible changes
* Functionality imported from ``skbio.core.ssw`` must now be imported from ``skbio.core.alignment`` instead.
### Miscellaneous
* Code is now flake8-compliant; added flake8 checking to Travis build.
* Various additions and improvements to documentation (API, installation instructions, developer instructions, etc.).
* ``__future__`` imports are now standardized across the codebase.
* New website front page and styling changes throughout. Moved docs site to its own versioned subdirectories.
* Reorganized alignment data structures and algorithms (e.g., SSW code, ``Alignment`` class, etc.) into an ``skbio.core.alignment`` subpackage.
## Version 0.1.1 (2014-05-16)
Fixes to setup.py. This is a pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
## Version 0.1.0 (2014-05-15)
Initial pre-alpha release. At this stage, major backwards-incompatible API changes can and will happen.
| 0.897793 | 0.961929 |
scikit-bio documentation
========================
This guide contains instructions for building the scikit-bio documentation, as
well as guidelines for contributing to the documentation.
**Note:** If you're only interested in viewing the scikit-bio documentation,
visit [scikit-bio.org](http://scikit-bio.org).
Building the documentation
--------------------------
To build the documentation, you'll need a scikit-bio development environment
set up. See [CONTRIBUTING.md](../CONTRIBUTING.md) for instructions. In
addition, you will also need to install Sphinx and the theme for the
documentation, you can do that with:
pip install 'Sphinx<=3.0' sphinx-bootstrap-theme
**Important:** The documentation will be built for whatever version of
scikit-bio is *currently installed* on your system (i.e., the version imported
by ```import skbio```). This may not match the code located in this repository.
You will need to either install this version of scikit-bio somewhere (e.g., in
a virtualenv) or point your ```PYTHONPATH``` environment variable to this code,
*before* building the documentation.
To build the documentation, assuming you are at the top-level scikit-bio
directory:
make -C doc clean html
The built HTML documentation will be at ```doc/build/html/index.html```.
Contributing to the documentation
---------------------------------
If you would like to contribute to the documentation, whether by adding
something entirely new or by modifying existing documentation, please first
review our [scikit-bio contribution guide](../CONTRIBUTING.md).
Before submitting your changes, ensure that the documentation builds without
errors or warnings.
### Documentation guidelines
Most of scikit-bio's API documentation is automatically generated from
[docstrings](http://legacy.python.org/dev/peps/pep-0257/#what-is-a-docstring).
The advantage to this approach is that users can access the documentation in an
interactive Python session or from our website as HTML. Other output formats
are also possible, such as PDF.
scikit-bio docstrings follow the [numpydoc conventions](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt).
This ensures that the docstrings are easily readable both from the interpreter
and HTML, PDF, etc. Please read the numpydoc guidelines before continuing.
### Documenting a module in scikit-bio
In addition to following the numpydoc conventions for docstrings, we have a few
more conventions that will ensure your documentation is correctly built and
linked within our website, and that it maintains consistency with the rest of
the scikit-bio docs.
The easiest way to get started with documenting your code is to look at the
docstrings in existing scikit-bio modules. A couple of modules to start with
are ```skbio.sequence``` and ```skbio.stats.distance```. Go ahead and look
through those now. We've structured our docs in a similar way to
[SciPy's documentation](http://docs.scipy.org/doc/scipy/reference/), so that
may be another good place to look for examples.
We'll take a top-down approach by discussing how to document a new module that
you'd like to add to scikit-bio (let's call it ```skbio/example.py```).
#### Module docstring
The first thing you'll need to add is a docstring for the module. The docstring
must start at the first line of the file. It should start with a title for the
module:
"""
Documentation examples (:mod:`skbio.example`)
=============================================
It is important to include the ```:mod:``` Sphinx directive in the title, as
this title will be included in the table of contents. Also make sure that the
title underline is the same length as the title.
We also need to include another Sphinx directive below this:
.. currentmodule:: skbio.example
This directive tells Sphinx that other classes, functions, etc. that we will
reference are located in the ```skbio.example``` module.
Next, include a more detailed description of the module. For example:
This module consists of several example classes and functions to illustrate
the scikit-bio documentation system.
Following that, list any classes, functions, and exceptions that you'd like
documentation generated for. Note that you do *not* need to include every
single class, function, or exception that is defined in the module. Also, you
do not need to list class methods, as those will be automatically included in
the generated class documentation. Only include objects that should be exposed
as part of the public API.
For example:
Classes
-------
.. autosummary::
:toctree: generated/
ExampleClass1
ExampleClass2
Functions
---------
.. autosummary::
:toctree: generated/
example_function1
example_function2
Exceptions
----------
.. autosummary::
:toctree: generated/
ExampleError
The ```autosummary``` directives are important as they generate RST files in
the ```generated/``` directory for each object. A single-line summary and link
to each object is inserted into the page for you.
After listing public module members, we encourage a usage example section
showing how to use some of the module's functionality. Examples should be
written in [doctest](http://docs.python.org/3/library/doctest.html) format so
that they can be automatically tested (e.g., using ```make test``` or
```python -m skbio.test```).
Examples
--------
Run the ``example_function1`` function:
>>> from skbio.example import example_function1
>>> example_function1("hello", "world")
hello world!
You can also embed the plots that an example generates into the built
documentation with the ```.. plot::``` directive. For example:
.. plot::
>>> import pandas as pd
>>> df = pd.DataFrame({'col1': [1, 2, 3, 4], 'col2': [10, 11, 12, 13]})
>>> fig = df.boxplot()
This will include the plot, a link to the source code used to generate the
plot, and links to different image formats (e.g., PNG and PDF) so that users
can easily download the plot.
You're now ready to document the members of your module.
#### Documenting module members
When documenting the members of a module (e.g., classes, methods, attributes,
functions, and exceptions), follow the numpydoc conventions. In addition to
these conventions, there are a few things to keep in mind:
- When documenting a class, only public methods and attributes are included in
the built documentation. If a method or attribute starts with an
underscore, it is assumed to be private.
- When documenting a class, include the ```Parameters``` section in the class
docstring, instead of in the ```__init__``` docstring. While numpydoc
technically supports either form, ```__init__``` is not included in the list
of methods by default and thus should have its documentation included in the
class docstring.
#### Including the module in the docs
Until now, we've only been editing docstrings, which are attached to Python
code. The final step is to hook up this new module's docstrings to the
documentation build system:
1. Make sure you're within the ```scikit-bio/doc``` directory.
2. Create a new file with the same name as your module under the ```source```
directory. Do not include ```skbio``` as part of the name, and use
```.rst``` as the suffix. For example, ```source/example.rst```.
3. Add the following line to ```source/example.rst``` to have your module's
docstring pulled into the document:
```
.. automodule:: skbio.example
```
4. Add the following line to ```source/index.rst``` to add the new page to the
top-level table of contents:
```
example
```
That's it! You can now try building the documentation, which should include the
documentation for your new module!
### Documenting a subpackage in scikit-bio
The process of documenting a subpackage is very similar to documenting a module
in scikit-bio. The only difference is that the module docstring goes in the
subpackage's ```__init__.py```.
### Troubleshooting
If things aren't working correctly, try running ```make clean``` and then
rebuild the docs. If things still aren't working, try building the docs
*without* your changes, and see if there are any Sphinx errors or warnings.
Make note of these, and then see what new errors or warnings are generated when
you add your changes again.
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/doc/README.md | README.md | 0.096376 | 0.938294 |
|
scikit-bio
==========
scikit-bio (canonically pronounced *sigh-kit-buy-oh*) is a library for working
with biological data in Python 3. scikit-bio is open source, BSD-licensed
software that is currently under active development.
API Reference
-------------
.. toctree::
:maxdepth: 1
io
sequence
alignment
tree
workflow
diversity
stats
metadata
util
User Documentation
------------------
The user documentation contains high-level information for users of scikit-bio.
.. toctree::
:maxdepth: 1
user/api_stability
Developer Documentation
-----------------------
The developer documentation contains information for how to contribute
to scikit-bio.
.. toctree::
:maxdepth: 1
development/coding_guidelines
development/new_module
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/doc/source/index.rst | index.rst | scikit-bio
==========
scikit-bio (canonically pronounced *sigh-kit-buy-oh*) is a library for working
with biological data in Python 3. scikit-bio is open source, BSD-licensed
software that is currently under active development.
API Reference
-------------
.. toctree::
:maxdepth: 1
io
sequence
alignment
tree
workflow
diversity
stats
metadata
util
User Documentation
------------------
The user documentation contains high-level information for users of scikit-bio.
.. toctree::
:maxdepth: 1
user/api_stability
Developer Documentation
-----------------------
The developer documentation contains information for how to contribute
to scikit-bio.
.. toctree::
:maxdepth: 1
development/coding_guidelines
development/new_module
| 0.70912 | 0.281244 |
{# This template was modified from autosummaries default format #}
{{ fullname | escape | underline}}
{# We need a list of the built-ins that we implemented, not the default ones #}
{% set built_in_methods = [] %}
{% for item in all_methods %}
{% if (item not in ['__class__',
'__delattr__',
'__getattribute__',
'__init__',
'__dir__',
'__format__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__subclasshook__',
'__init_subclass__',
'__class_getitem__'] and item.startswith('__')) %}
{{ built_in_methods.append(item) or '' }}
{% endif %}
{% endfor %}
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% if attributes %}
.. rubric:: Attributes
.. autosummary::
{% for item in attributes %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% if built_in_methods %}
.. rubric:: Built-ins
.. autosummary::
:toctree:
{% for item in built_in_methods %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% if methods %}
.. rubric:: Methods
.. autosummary::
:toctree:
{% for item in methods %}
{% if item != '__init__' %}
~{{ name }}.{{ item }}
{% endif %}
{%- endfor %}
{% endif %}
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/doc/source/_templates/autosummary/class.rst | class.rst | {# This template was modified from autosummaries default format #}
{{ fullname | escape | underline}}
{# We need a list of the built-ins that we implemented, not the default ones #}
{% set built_in_methods = [] %}
{% for item in all_methods %}
{% if (item not in ['__class__',
'__delattr__',
'__getattribute__',
'__init__',
'__dir__',
'__format__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__subclasshook__',
'__init_subclass__',
'__class_getitem__'] and item.startswith('__')) %}
{{ built_in_methods.append(item) or '' }}
{% endif %}
{% endfor %}
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% if attributes %}
.. rubric:: Attributes
.. autosummary::
{% for item in attributes %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% if built_in_methods %}
.. rubric:: Built-ins
.. autosummary::
:toctree:
{% for item in built_in_methods %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% if methods %}
.. rubric:: Methods
.. autosummary::
:toctree:
{% for item in methods %}
{% if item != '__init__' %}
~{{ name }}.{{ item }}
{% endif %}
{%- endfor %}
{% endif %}
| 0.422028 | 0.058669 |
$(document).ready(function() {
/* Add a [>>>] button on the top-right corner of code samples to hide
* the >>> and ... prompts and the output and thus make the code
* copyable. */
var div = $('.highlight-python .highlight,' +
'.highlight-python3 .highlight,' +
'.highlight-pycon .highlight,' +
'.highlight-default .highlight')
var pre = div.find('pre');
// get the styles from the current theme
pre.parent().parent().css('position', 'relative');
var hide_text = 'Hide the prompts and output';
var show_text = 'Show the prompts and output';
var border_width = pre.css('border-top-width');
var border_style = pre.css('border-top-style');
var border_color = pre.css('border-top-color');
var button_styles = {
'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
'border-color': border_color, 'border-style': border_style,
'border-width': border_width, 'color': border_color, 'text-size': '75%',
'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
'border-radius': '0 3px 0 0'
}
// create and add the button to all the code blocks that contain >>>
div.each(function(index) {
var jthis = $(this);
if (jthis.find('.gp').length > 0) {
var button = $('<span class="copybutton">>>></span>');
button.css(button_styles)
button.attr('title', hide_text);
button.data('hidden', 'false');
jthis.prepend(button);
}
// tracebacks (.gt) contain bare text elements that need to be
// wrapped in a span to work with .nextUntil() (see later)
jthis.find('pre:has(.gt)').contents().filter(function() {
return ((this.nodeType == 3) && (this.data.trim().length > 0));
}).wrap('<span>');
});
// define the behavior of the button when it's clicked
$('.copybutton').click(function(e){
e.preventDefault();
var button = $(this);
if (button.data('hidden') === 'false') {
// hide the code output
button.parent().find('.go, .gp, .gt').hide();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
button.css('text-decoration', 'line-through');
button.attr('title', show_text);
button.data('hidden', 'true');
} else {
// show the code output
button.parent().find('.go, .gp, .gt').show();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
button.css('text-decoration', 'none');
button.attr('title', hide_text);
button.data('hidden', 'false');
}
});
}); | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/doc/source/_static/copybutton.js | copybutton.js | $(document).ready(function() {
/* Add a [>>>] button on the top-right corner of code samples to hide
* the >>> and ... prompts and the output and thus make the code
* copyable. */
var div = $('.highlight-python .highlight,' +
'.highlight-python3 .highlight,' +
'.highlight-pycon .highlight,' +
'.highlight-default .highlight')
var pre = div.find('pre');
// get the styles from the current theme
pre.parent().parent().css('position', 'relative');
var hide_text = 'Hide the prompts and output';
var show_text = 'Show the prompts and output';
var border_width = pre.css('border-top-width');
var border_style = pre.css('border-top-style');
var border_color = pre.css('border-top-color');
var button_styles = {
'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0',
'border-color': border_color, 'border-style': border_style,
'border-width': border_width, 'color': border_color, 'text-size': '75%',
'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em',
'border-radius': '0 3px 0 0'
}
// create and add the button to all the code blocks that contain >>>
div.each(function(index) {
var jthis = $(this);
if (jthis.find('.gp').length > 0) {
var button = $('<span class="copybutton">>>></span>');
button.css(button_styles)
button.attr('title', hide_text);
button.data('hidden', 'false');
jthis.prepend(button);
}
// tracebacks (.gt) contain bare text elements that need to be
// wrapped in a span to work with .nextUntil() (see later)
jthis.find('pre:has(.gt)').contents().filter(function() {
return ((this.nodeType == 3) && (this.data.trim().length > 0));
}).wrap('<span>');
});
// define the behavior of the button when it's clicked
$('.copybutton').click(function(e){
e.preventDefault();
var button = $(this);
if (button.data('hidden') === 'false') {
// hide the code output
button.parent().find('.go, .gp, .gt').hide();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden');
button.css('text-decoration', 'line-through');
button.attr('title', show_text);
button.data('hidden', 'true');
} else {
// show the code output
button.parent().find('.go, .gp, .gt').show();
button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible');
button.css('text-decoration', 'none');
button.attr('title', hide_text);
button.data('hidden', 'false');
}
});
}); | 0.365683 | 0.23027 |
API Stability
=============
All public functionality in scikit-bio has a defined stability state.
These states inform users and developers to what extent they can rely on
different APIs in the package.
You can find out the stability state of public functionality by looking at its
docstring, which is formatted based on
`numpydoc <https://github.com/numpy/numpydoc>`_. This information will either
be in the *Extended Summary* section of the docstring, or in the case of
deprecation, this information will appear as a note following the *Short
Summary*.
The following diagram illustrates the API lifecycle in scikit-bio:
.. image:: assets/api-lifecycle.png
:align: center
Definitions of the stability states and the information associated with each
follow.
Stable
------
Functionality defined as stable is part of scikit-bio's backward-
compatible API. Users can be confident that the API will not change without
first passing through the deprecated state, typically for at least two
release cycles. We make every effort to maintain the API of this code.
The docstrings of stable functionality will indicate the first scikit-bio
version where the functionality was considered stable.
Experimental
------------
Functionality defined as experimental is being considered for addition to
scikit-bio's stable API. Users are encouraged to use this code, but to be
aware that its API may change or be removed. Experimental functionality
will typically pass through the deprecated state before it is removed, but
in rare cases it may be removed directly (for example, if a serious
methodological flaw is discovered that makes the functionality
scientifically invalid).
The docstrings of experimental functionality will indicate the first
scikit-bio version where the functionality was considered experimental.
We aim to move functionality through the experimental phase quickly (for
example, two releases before moving to stable), but we don't make specific
promises about when experimental functionality will become stable. This
aligns with our philosophy that we don't make promises about experimental
APIs, only about stable APIs.
Deprecated
----------
Functionality defined as deprecated is targeted for removal from
scikit-bio. Users should transition away from using it.
The docstrings of deprecated functionality will indicate the first version
of scikit-bio where the functionality was deprecated, the version of
scikit-bio when the functionality will be removed, and the reason for
deprecation of the code (for example, because a function was determined to
be scientifically invalid, or because the API was adapted, and users should
be using a different version of the function).
Using deprecated functionality will raise a ``DeprecationWarning``. Since
Python 2.7, these types of warnings are **silenced by default**. When
developing a tool that uses scikit-bio, we recommend enabling the display of
deprecation warnings to be informed of upcoming API changes. For details on how
to display deprecation warnings, see `Python's deprecation warning docs
<https://docs.python.org/3/whatsnew/2.7.html#changes-to-the-handling-of-deprecation-warnings>`_.
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/doc/source/user/api_stability.rst | api_stability.rst | API Stability
=============
All public functionality in scikit-bio has a defined stability state.
These states inform users and developers to what extent they can rely on
different APIs in the package.
You can find out the stability state of public functionality by looking at its
docstring, which is formatted based on
`numpydoc <https://github.com/numpy/numpydoc>`_. This information will either
be in the *Extended Summary* section of the docstring, or in the case of
deprecation, this information will appear as a note following the *Short
Summary*.
The following diagram illustrates the API lifecycle in scikit-bio:
.. image:: assets/api-lifecycle.png
:align: center
Definitions of the stability states and the information associated with each
follow.
Stable
------
Functionality defined as stable is part of scikit-bio's backward-
compatible API. Users can be confident that the API will not change without
first passing through the deprecated state, typically for at least two
release cycles. We make every effort to maintain the API of this code.
The docstrings of stable functionality will indicate the first scikit-bio
version where the functionality was considered stable.
Experimental
------------
Functionality defined as experimental is being considered for addition to
scikit-bio's stable API. Users are encouraged to use this code, but to be
aware that its API may change or be removed. Experimental functionality
will typically pass through the deprecated state before it is removed, but
in rare cases it may be removed directly (for example, if a serious
methodological flaw is discovered that makes the functionality
scientifically invalid).
The docstrings of experimental functionality will indicate the first
scikit-bio version where the functionality was considered experimental.
We aim to move functionality through the experimental phase quickly (for
example, two releases before moving to stable), but we don't make specific
promises about when experimental functionality will become stable. This
aligns with our philosophy that we don't make promises about experimental
APIs, only about stable APIs.
Deprecated
----------
Functionality defined as deprecated is targeted for removal from
scikit-bio. Users should transition away from using it.
The docstrings of deprecated functionality will indicate the first version
of scikit-bio where the functionality was deprecated, the version of
scikit-bio when the functionality will be removed, and the reason for
deprecation of the code (for example, because a function was determined to
be scientifically invalid, or because the API was adapted, and users should
be using a different version of the function).
Using deprecated functionality will raise a ``DeprecationWarning``. Since
Python 2.7, these types of warnings are **silenced by default**. When
developing a tool that uses scikit-bio, we recommend enabling the display of
deprecation warnings to be informed of upcoming API changes. For details on how
to display deprecation warnings, see `Python's deprecation warning docs
<https://docs.python.org/3/whatsnew/2.7.html#changes-to-the-handling-of-deprecation-warnings>`_.
| 0.921118 | 0.6961 |
import collections.abc
import numpy as np
import pandas as pd
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity._phylogenetic import _nodes_by_counts
def _validate_counts_vector(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
try:
if not np.all(np.isreal(counts)):
raise Exception
except Exception:
raise ValueError("Counts vector must contain real-valued entries.")
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
def _validate_counts_matrix(counts, ids=None, suppress_cast=False):
results = []
# handle case of where counts is a single vector by making it a matrix.
# this has to be done before forcing counts into an ndarray because we
# don't yet know that all of the entries are of equal length
if isinstance(counts, pd.core.frame.DataFrame):
if ids is not None and len(counts.index) != len(ids):
raise ValueError(
"Number of rows in ``counts``"
" must be equal to number of provided ``ids``."
)
return np.asarray(counts)
else:
if len(counts) == 0 or not isinstance(counts[0],
collections.abc.Iterable):
counts = [counts]
counts = np.asarray(counts)
if counts.ndim > 2:
raise ValueError(
"Only 1-D and 2-D array-like objects can be provided "
"as input. Provided object has %d dimensions." %
counts.ndim)
if ids is not None and len(counts) != len(ids):
raise ValueError(
"Number of rows in ``counts`` must be equal "
"to number of provided ``ids``."
)
lens = []
for v in counts:
results.append(_validate_counts_vector(v, suppress_cast))
lens.append(len(v))
if len(set(lens)) > 1:
raise ValueError(
"All rows in ``counts`` must be of equal length."
)
return np.asarray(results)
def _validate_otu_ids_and_tree(counts, otu_ids, tree):
len_otu_ids = len(otu_ids)
set_otu_ids = set(otu_ids)
if len_otu_ids != len(set_otu_ids):
raise ValueError("``otu_ids`` cannot contain duplicated ids.")
if len(counts) != len_otu_ids:
raise ValueError("``otu_ids`` must be the same length as ``counts`` "
"vector(s).")
if len(tree.root().children) == 0:
raise ValueError("``tree`` must contain more than just a root node.")
if len(tree.root().children) > 2:
# this is an imperfect check for whether the tree is rooted or not.
# can this be improved?
raise ValueError("``tree`` must be rooted.")
# all nodes (except the root node) have corresponding branch lengths
# all tip names in tree are unique
# all otu_ids correspond to tip names in tree
branch_lengths = []
tip_names = []
for e in tree.traverse():
if not e.is_root():
branch_lengths.append(e.length)
if e.is_tip():
tip_names.append(e.name)
set_tip_names = set(tip_names)
if len(tip_names) != len(set_tip_names):
raise DuplicateNodeError("All tip names must be unique.")
if np.array([branch is None for branch in branch_lengths]).any():
raise ValueError("All non-root nodes in ``tree`` must have a branch "
"length.")
missing_tip_names = set_otu_ids - set_tip_names
if missing_tip_names != set():
n_missing_tip_names = len(missing_tip_names)
raise MissingNodeError("All ``otu_ids`` must be present as tip names "
"in ``tree``. ``otu_ids`` not corresponding to "
"tip names (n=%d): %s" %
(n_missing_tip_names,
" ".join(missing_tip_names)))
def _vectorize_counts_and_tree(counts, otu_ids, tree):
""" Index tree and convert counts to np.array in corresponding order
"""
tree_index = tree.to_array(nan_length_value=0.0)
otu_ids = np.asarray(otu_ids)
counts = np.atleast_2d(counts)
counts_by_node = _nodes_by_counts(counts, otu_ids, tree_index)
branch_lengths = tree_index['length']
# branch_lengths is just a reference to the array inside of tree_index,
# but it's used so much that it's convenient to just pull it out here.
return counts_by_node.T, tree_index, branch_lengths
def _get_phylogenetic_kwargs(counts, **kwargs):
try:
otu_ids = kwargs.pop('otu_ids')
except KeyError:
raise ValueError("``otu_ids`` is required for phylogenetic diversity "
"metrics.")
try:
tree = kwargs.pop('tree')
except KeyError:
raise ValueError("``tree`` is required for phylogenetic diversity "
"metrics.")
return otu_ids, tree, kwargs
def _quantitative_to_qualitative_counts(counts):
return counts > 0.0 | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/diversity/_util.py | _util.py |
import collections.abc
import numpy as np
import pandas as pd
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity._phylogenetic import _nodes_by_counts
def _validate_counts_vector(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
try:
if not np.all(np.isreal(counts)):
raise Exception
except Exception:
raise ValueError("Counts vector must contain real-valued entries.")
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
def _validate_counts_matrix(counts, ids=None, suppress_cast=False):
results = []
# handle case of where counts is a single vector by making it a matrix.
# this has to be done before forcing counts into an ndarray because we
# don't yet know that all of the entries are of equal length
if isinstance(counts, pd.core.frame.DataFrame):
if ids is not None and len(counts.index) != len(ids):
raise ValueError(
"Number of rows in ``counts``"
" must be equal to number of provided ``ids``."
)
return np.asarray(counts)
else:
if len(counts) == 0 or not isinstance(counts[0],
collections.abc.Iterable):
counts = [counts]
counts = np.asarray(counts)
if counts.ndim > 2:
raise ValueError(
"Only 1-D and 2-D array-like objects can be provided "
"as input. Provided object has %d dimensions." %
counts.ndim)
if ids is not None and len(counts) != len(ids):
raise ValueError(
"Number of rows in ``counts`` must be equal "
"to number of provided ``ids``."
)
lens = []
for v in counts:
results.append(_validate_counts_vector(v, suppress_cast))
lens.append(len(v))
if len(set(lens)) > 1:
raise ValueError(
"All rows in ``counts`` must be of equal length."
)
return np.asarray(results)
def _validate_otu_ids_and_tree(counts, otu_ids, tree):
len_otu_ids = len(otu_ids)
set_otu_ids = set(otu_ids)
if len_otu_ids != len(set_otu_ids):
raise ValueError("``otu_ids`` cannot contain duplicated ids.")
if len(counts) != len_otu_ids:
raise ValueError("``otu_ids`` must be the same length as ``counts`` "
"vector(s).")
if len(tree.root().children) == 0:
raise ValueError("``tree`` must contain more than just a root node.")
if len(tree.root().children) > 2:
# this is an imperfect check for whether the tree is rooted or not.
# can this be improved?
raise ValueError("``tree`` must be rooted.")
# all nodes (except the root node) have corresponding branch lengths
# all tip names in tree are unique
# all otu_ids correspond to tip names in tree
branch_lengths = []
tip_names = []
for e in tree.traverse():
if not e.is_root():
branch_lengths.append(e.length)
if e.is_tip():
tip_names.append(e.name)
set_tip_names = set(tip_names)
if len(tip_names) != len(set_tip_names):
raise DuplicateNodeError("All tip names must be unique.")
if np.array([branch is None for branch in branch_lengths]).any():
raise ValueError("All non-root nodes in ``tree`` must have a branch "
"length.")
missing_tip_names = set_otu_ids - set_tip_names
if missing_tip_names != set():
n_missing_tip_names = len(missing_tip_names)
raise MissingNodeError("All ``otu_ids`` must be present as tip names "
"in ``tree``. ``otu_ids`` not corresponding to "
"tip names (n=%d): %s" %
(n_missing_tip_names,
" ".join(missing_tip_names)))
def _vectorize_counts_and_tree(counts, otu_ids, tree):
""" Index tree and convert counts to np.array in corresponding order
"""
tree_index = tree.to_array(nan_length_value=0.0)
otu_ids = np.asarray(otu_ids)
counts = np.atleast_2d(counts)
counts_by_node = _nodes_by_counts(counts, otu_ids, tree_index)
branch_lengths = tree_index['length']
# branch_lengths is just a reference to the array inside of tree_index,
# but it's used so much that it's convenient to just pull it out here.
return counts_by_node.T, tree_index, branch_lengths
def _get_phylogenetic_kwargs(counts, **kwargs):
try:
otu_ids = kwargs.pop('otu_ids')
except KeyError:
raise ValueError("``otu_ids`` is required for phylogenetic diversity "
"metrics.")
try:
tree = kwargs.pop('tree')
except KeyError:
raise ValueError("``tree`` is required for phylogenetic diversity "
"metrics.")
return otu_ids, tree, kwargs
def _quantitative_to_qualitative_counts(counts):
return counts > 0.0 | 0.734976 | 0.509093 |
from skbio.util._decorator import experimental
from skbio.diversity._util import (_validate_counts_vector,
_validate_otu_ids_and_tree,
_vectorize_counts_and_tree)
def _faith_pd(counts_by_node, branch_lengths):
return (branch_lengths * (counts_by_node > 0)).sum()
@experimental(as_of="0.4.1")
def faith_pd(counts, otu_ids, tree, validate=True):
""" Compute Faith's phylogenetic diversity metric (PD)
Parameters
----------
counts : 1-D array_like, int
Vectors of counts/abundances of OTUs for one sample.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``counts``.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
Returns
-------
float
The phylogenetic diversity (PD) of the samples.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
See Also
--------
skbio.diversity
skbio.diversity.alpha_diversity
Notes
-----
Faith's phylogenetic diversity, often referred to as PD, was originally
described in [1]_.
If computing Faith's PD for multiple samples, using
``skbio.diversity.alpha_diversity`` will be much faster than calling this
function individually on each sample.
This implementation differs from that in PyCogent (and therefore QIIME
versions less than 2.0.0) by imposing a few additional restrictions on the
inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
tree was provided that had a single trifurcating node (a newick convention
for unrooted trees) that node was considered the root of the tree. Next,
all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
IDs that were not present the tree. To reproduce Faith PD results from
PyCogent with scikit-bio, ensure that your PyCogent Faith PD calculations
are performed on a rooted tree and that all OTU IDs are present in the
tree.
This implementation of Faith's PD is based on the array-based
implementation of UniFrac described in [2]_.
References
----------
.. [1] Faith, D. P. Conservation evaluation and phylogenetic diversity.
Biol. Conserv. (1992).
.. [2] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
throughput phylogenetic analyses of microbial communities including
analysis of pyrosequencing and PhyloChip data. ISME J. 4(1):17-27
(2010).
Examples
--------
Assume we have the following abundance data for a sample ``u``,
represented as a counts vector. These counts represent the
number of times specific Operational Taxonomic Units, or OTUs, were
observed in the sample.
>>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
Because Faith PD is a phylogenetic diversity metric, we need to know which
OTU each count corresponds to, which we'll provide as ``otu_ids``.
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
... 'OTU8']
We also need a phylogenetic tree that relates the OTUs to one another.
>>> from io import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO(
... '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
... ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
We can then compute the Faith PD of the sample.
>>> from skbio.diversity.alpha import faith_pd
>>> pd = faith_pd(u_counts, otu_ids, tree)
>>> print(round(pd, 2))
6.95
"""
counts_by_node, branch_lengths = _setup_faith_pd(
counts, otu_ids, tree, validate, single_sample=True)
return _faith_pd(counts_by_node, branch_lengths)
def _setup_faith_pd(counts, otu_ids, tree, validate, single_sample):
if validate:
if single_sample:
# only validate count if operating in single sample mode, they
# will have already been validated otherwise
counts = _validate_counts_vector(counts)
_validate_otu_ids_and_tree(counts, otu_ids, tree)
else:
_validate_otu_ids_and_tree(counts[0], otu_ids, tree)
counts_by_node, tree_index, branch_lengths = \
_vectorize_counts_and_tree(counts, otu_ids, tree)
return counts_by_node, branch_lengths | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/diversity/alpha/_faith_pd.py | _faith_pd.py |
from skbio.util._decorator import experimental
from skbio.diversity._util import (_validate_counts_vector,
_validate_otu_ids_and_tree,
_vectorize_counts_and_tree)
def _faith_pd(counts_by_node, branch_lengths):
return (branch_lengths * (counts_by_node > 0)).sum()
@experimental(as_of="0.4.1")
def faith_pd(counts, otu_ids, tree, validate=True):
""" Compute Faith's phylogenetic diversity metric (PD)
Parameters
----------
counts : 1-D array_like, int
Vectors of counts/abundances of OTUs for one sample.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``counts``.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
Returns
-------
float
The phylogenetic diversity (PD) of the samples.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
See Also
--------
skbio.diversity
skbio.diversity.alpha_diversity
Notes
-----
Faith's phylogenetic diversity, often referred to as PD, was originally
described in [1]_.
If computing Faith's PD for multiple samples, using
``skbio.diversity.alpha_diversity`` will be much faster than calling this
function individually on each sample.
This implementation differs from that in PyCogent (and therefore QIIME
versions less than 2.0.0) by imposing a few additional restrictions on the
inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
tree was provided that had a single trifurcating node (a newick convention
for unrooted trees) that node was considered the root of the tree. Next,
all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
IDs that were not present the tree. To reproduce Faith PD results from
PyCogent with scikit-bio, ensure that your PyCogent Faith PD calculations
are performed on a rooted tree and that all OTU IDs are present in the
tree.
This implementation of Faith's PD is based on the array-based
implementation of UniFrac described in [2]_.
References
----------
.. [1] Faith, D. P. Conservation evaluation and phylogenetic diversity.
Biol. Conserv. (1992).
.. [2] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
throughput phylogenetic analyses of microbial communities including
analysis of pyrosequencing and PhyloChip data. ISME J. 4(1):17-27
(2010).
Examples
--------
Assume we have the following abundance data for a sample ``u``,
represented as a counts vector. These counts represent the
number of times specific Operational Taxonomic Units, or OTUs, were
observed in the sample.
>>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
Because Faith PD is a phylogenetic diversity metric, we need to know which
OTU each count corresponds to, which we'll provide as ``otu_ids``.
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
... 'OTU8']
We also need a phylogenetic tree that relates the OTUs to one another.
>>> from io import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO(
... '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
... ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
We can then compute the Faith PD of the sample.
>>> from skbio.diversity.alpha import faith_pd
>>> pd = faith_pd(u_counts, otu_ids, tree)
>>> print(round(pd, 2))
6.95
"""
counts_by_node, branch_lengths = _setup_faith_pd(
counts, otu_ids, tree, validate, single_sample=True)
return _faith_pd(counts_by_node, branch_lengths)
def _setup_faith_pd(counts, otu_ids, tree, validate, single_sample):
if validate:
if single_sample:
# only validate count if operating in single sample mode, they
# will have already been validated otherwise
counts = _validate_counts_vector(counts)
_validate_otu_ids_and_tree(counts, otu_ids, tree)
else:
_validate_otu_ids_and_tree(counts[0], otu_ids, tree)
counts_by_node, tree_index, branch_lengths = \
_vectorize_counts_and_tree(counts, otu_ids, tree)
return counts_by_node, branch_lengths | 0.965438 | 0.729857 |
import numpy as np
from scipy.special import gammaln
from scipy.optimize import fmin_powell, minimize_scalar
from skbio.stats import subsample_counts
from skbio.util._decorator import experimental
from skbio.diversity._util import _validate_counts_vector
@experimental(as_of="0.4.0")
def berger_parker_d(counts):
r"""Calculate Berger-Parker dominance.
Berger-Parker dominance is defined as the fraction of the sample that
belongs to the most abundant OTU:
.. math::
d = \frac{N_{max}}{N}
where :math:`N_{max}` is defined as the number of individuals in the most
abundant OTU (or any of the most abundant OTUs in the case of ties), and
:math:`N` is defined as the total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Berger-Parker dominance.
Notes
-----
Berger-Parker dominance is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Berger & Parker (1970). SDR-IV online help.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return counts.max() / counts.sum()
@experimental(as_of="0.4.0")
def brillouin_d(counts):
r"""Calculate Brillouin index of alpha diversity.
This is calculated as follows:
.. math::
HB = \frac{\ln N!-\sum^s_{i=1}{\ln n_i!}}{N}
where :math:`N` is defined as the total number of individuals in the
sample, :math:`s` is the number of OTUs, and :math:`n_i` is defined as the
number of individuals in the :math:`i^{\text{th}}` OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Brillouin index.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
nz = counts[counts.nonzero()]
n = nz.sum()
return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
@experimental(as_of="0.4.0")
def dominance(counts):
r"""Calculate dominance.
Dominance is defined as
.. math::
\sum{p_i^2}
where :math:`p_i` is the proportion of the entire community that OTU
:math:`i` represents.
Dominance can also be defined as 1 - Simpson's index. It ranges between
0 and 1.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Dominance.
See Also
--------
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
return (freqs * freqs).sum()
@experimental(as_of="0.4.0")
def doubles(counts):
"""Calculate number of double occurrences (doubletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Doubleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 2).sum()
@experimental(as_of="0.4.0")
def enspie(counts):
r"""Calculate ENS_pie alpha diversity measure.
ENS_pie is equivalent to ``1 / dominance``:
.. math::
ENS_{pie} = \frac{1}{\sum_{i=1}^s{p_i^2}}
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
ENS_pie alpha diversity measure.
See Also
--------
dominance
Notes
-----
ENS_pie is defined in [1]_.
References
----------
.. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
drivers on biodiversity: why standardised sampling is not enough".
Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
"""
counts = _validate_counts_vector(counts)
return 1 / dominance(counts)
@experimental(as_of="0.4.0")
def esty_ci(counts):
r"""Calculate Esty's CI.
Esty's CI is defined as
.. math::
F_1/N \pm z\sqrt{W}
where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
number of individuals (sum of abundances for all OTUs), and :math:`z` is a
constant that depends on the targeted confidence and based on the normal
distribution.
:math:`W` is defined as
.. math::
\frac{F_1(N-F_1)+2NF_2}{N^3}
where :math:`F_2` is the number of doubleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
tuple
Esty's confidence interval as ``(lower_bound, upper_bound)``.
Notes
-----
Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
interval.
References
----------
.. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
estimator of the coverage of a random sample". Ann Statist 11: 905-912.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
f2 = doubles(counts)
n = counts.sum()
z = 1.959963985
W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
@experimental(as_of="0.4.0")
def fisher_alpha(counts):
r"""Calculate Fisher's alpha, a metric of diversity.
Fisher's alpha is estimated by solving the following equation for
:math:`\alpha`:
.. math::
S=\alpha\ln(1+\frac{N}{\alpha})
where :math:`S` is the number of OTUs and :math:`N` is the
total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Fisher's alpha.
Raises
------
RuntimeError
If the optimizer fails to converge (error > 1.0).
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
Fisher's alpha.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
def f(alpha):
return (alpha * np.log(1 + (n / alpha)) - s) ** 2
# Temporarily silence RuntimeWarnings (invalid and division by zero) during
# optimization in case invalid input is provided to the objective function
# (e.g. alpha=0).
orig_settings = np.seterr(divide='ignore', invalid='ignore')
try:
alpha = minimize_scalar(f).x
finally:
np.seterr(**orig_settings)
if f(alpha) > 1.0:
raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
"could not compute Fisher's alpha.")
return alpha
@experimental(as_of="0.4.0")
def goods_coverage(counts):
r"""Calculate Good's coverage of counts.
Good's coverage estimator is defined as
.. math::
1-\frac{F_1}{N}
where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Good's coverage estimator.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
N = counts.sum()
return 1 - (f1 / N)
@experimental(as_of="0.4.0")
def heip_e(counts):
r"""Calculate Heip's evenness measure.
Heip's evenness is defined as:
.. math::
\frac{(e^H-1)}{(S-1)}
where :math:`H` is the Shannon-Wiener entropy of counts (using logarithm
base :math:`e`) and :math:`S` is the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Heip's evenness measure.
See Also
--------
shannon
pielou_e
Notes
-----
The implementation here is based on the description in [1]_.
References
----------
.. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate_counts_vector(counts)
return ((np.exp(shannon(counts, base=np.e)) - 1) /
(observed_otus(counts) - 1))
@experimental(as_of="0.4.0")
def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
"""Calculate Kempton-Taylor Q index of alpha diversity.
Estimates the slope of the cumulative abundance curve in the interquantile
range. By default, uses lower and upper quartiles, rounding inwards.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
lower_quantile : float, optional
Lower bound of the interquantile range. Defaults to lower quartile.
upper_quantile : float, optional
Upper bound of the interquantile range. Defaults to upper quartile.
Returns
-------
double
Kempton-Taylor Q index of alpha diversity.
Notes
-----
The index is defined in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
The implementation provided here differs slightly from the results given in
Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
Magurran recommends counting half of the OTUs with the same # counts as the
point where the UQ falls and the point where the LQ falls, but the
justification for this is unclear (e.g. if there were a very large # OTUs
that just overlapped one of the quantiles, the results would be
considerably off). Leaving the calculation as-is for now, but consider
changing.
References
----------
.. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
species diversity. Nature, 262, 818-820.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = len(counts)
lower = int(np.ceil(n * lower_quantile))
upper = int(n * upper_quantile)
sorted_counts = np.sort(counts)
return (upper - lower) / np.log(sorted_counts[upper] /
sorted_counts[lower])
@experimental(as_of="0.4.0")
def margalef(counts):
r"""Calculate Margalef's richness index.
Margalef's D is defined as:
.. math::
D = \frac{(S - 1)}{\ln N}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes log accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Margalef's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return (observed_otus(counts) - 1) / np.log(counts.sum())
@experimental(as_of="0.4.0")
def mcintosh_d(counts):
r"""Calculate McIntosh dominance index D.
McIntosh dominance index D is defined as:
.. math::
D = \frac{N - U}{N - \sqrt{N}}
where :math:`N` is the total number of individuals in the sample and
:math:`U` is defined as:
.. math::
U = \sqrt{\sum{{n_i}^2}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh dominance index D.
See Also
--------
mcintosh_e
Notes
-----
The index was proposed in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
References
----------
.. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
certain concepts to diversity. Ecology 48, 1115-1126.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
u = np.sqrt((counts * counts).sum())
n = counts.sum()
return (n - u) / (n - np.sqrt(n))
@experimental(as_of="0.4.0")
def mcintosh_e(counts):
r"""Calculate McIntosh's evenness measure E.
McIntosh evenness measure E is defined as:
.. math::
E = \frac{\sqrt{\sum{n_i^2}}}{\sqrt{((N-S+1)^2 + S -1}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU, :math:`N` is the total number of individuals, and :math:`S` is the
number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh evenness measure E.
See Also
--------
mcintosh_d
Notes
-----
The implementation here is based on the description given in [1]_, **NOT**
the one in the SDR-IV online manual, which is wrong.
References
----------
.. [1] Heip & Engels (1974) Comparing Species Diversity and Evenness
Indices. p 560.
"""
counts = _validate_counts_vector(counts)
numerator = np.sqrt((counts * counts).sum())
n = counts.sum()
s = observed_otus(counts)
denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
return numerator / denominator
@experimental(as_of="0.4.0")
def menhinick(counts):
r"""Calculate Menhinick's richness index.
Menhinick's richness index is defined as:
.. math::
D_{Mn} = \frac{S}{\sqrt{N}}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes square-root accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Menhinick's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts) / np.sqrt(counts.sum())
@experimental(as_of="0.4.0")
def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
r"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
The Michaelis-Menten equation is defined as:
.. math::
S=\frac{nS_{max}}{n+B}
where :math:`n` is the number of individuals and :math:`S` is the number of
OTUs. This function estimates the :math:`S_{max}` parameter.
The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
the total number of individuals (sum of abundances for all OTUs).
:math:`S` is the number of OTUs represented in a random sample of :math:`n`
individuals.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
num_repeats : int, optional
The number of times to perform rarefaction (subsampling without
replacement) at each value of :math:`n`.
params_guess : tuple, optional
Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
Returns
-------
S_max : double
Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
equation.
See Also
--------
skbio.stats.subsample_counts
Notes
-----
There is some controversy about how to do the fitting. The ML model given
in [1]_ is based on the assumption that error is roughly proportional to
magnitude of observation, reasonable for enzyme kinetics but not reasonable
for rarefaction data. Here we just do a nonlinear curve fit for the
parameters using least-squares.
References
----------
.. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
Michaelis-Menten equation. Biometrics 43, 793-803.
"""
counts = _validate_counts_vector(counts)
n_indiv = counts.sum()
if params_guess is None:
S_max_guess = observed_otus(counts)
B_guess = int(round(n_indiv / 2))
params_guess = (S_max_guess, B_guess)
# observed # of OTUs vs # of individuals sampled, S vs n
xvals = np.arange(1, n_indiv + 1)
ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
for i in range(num_repeats):
ymtx[i] = np.asarray([observed_otus(subsample_counts(counts, n))
for n in xvals], dtype=int)
yvals = ymtx.mean(0)
# Vectors of actual vals y and number of individuals n.
def errfn(p, n, y):
return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
# Return S_max.
return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
disp=False)[0]
@experimental(as_of="0.4.0")
def observed_otus(counts):
"""Calculate the number of distinct OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Distinct OTU count.
"""
counts = _validate_counts_vector(counts)
return (counts != 0).sum()
@experimental(as_of="0.4.0")
def osd(counts):
"""Calculate observed OTUs, singles, and doubles.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
osd : tuple
Observed OTUs, singles, and doubles.
See Also
--------
observed_otus
singles
doubles
Notes
-----
This is a convenience function used by many of the other measures that rely
on these three measures.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts), singles(counts), doubles(counts)
@experimental(as_of="0.4.1")
def pielou_e(counts):
r"""Calculate Pielou's Evenness index J'.
Pielou's Evenness is defined as:
.. math::
J' = \frac{(H)}{\ln(S)}
where :math:`H` is the Shannon-Wiener entropy of counts and :math:`S` is
the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Pielou's Evenness.
See Also
--------
shannon
heip_e
Notes
-----
The implementation here is based on the description in Wikipedia [1]_.
It was first proposed by E. C. Pielou [2]_ and is similar to Heip's
evenness [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Species_evenness
.. [2] Pielou, E. C., 1966. The measurement of diversity in different types
of biological collections. Journal of Theoretical Biology, 13, 131-44.
.. [3] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate_counts_vector(counts)
return shannon(counts, base=np.e) / np.log(observed_otus(counts))
@experimental(as_of="0.4.0")
def robbins(counts):
r"""Calculate Robbins' estimator for probability of unobserved outcomes.
Robbins' estimator is defined as:
.. math::
\frac{F_1}{n+1}
where :math:`F_1` is the number of singleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Robbins' estimate.
Notes
-----
Robbins' estimator is defined in [1]_. The estimate computed here is for
:math:`n-1` counts, i.e. the x-axis is off by 1.
References
----------
.. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
"""
counts = _validate_counts_vector(counts)
return singles(counts) / counts.sum()
@experimental(as_of="0.4.0")
def shannon(counts, base=2):
r"""Calculate Shannon entropy of counts, default in bits.
Shannon-Wiener diversity index is defined as:
.. math::
H = -\sum_{i=1}^s\left(p_i\log_2 p_i\right)
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Shannon diversity index H.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_ except that the default logarithm base used here is 2
instead of :math:`e`.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
nonzero_freqs = freqs[freqs.nonzero()]
return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
@experimental(as_of="0.4.0")
def simpson(counts):
r"""Calculate Simpson's index.
Simpson's index is defined as ``1 - dominance``:
.. math::
1 - \sum{p_i^2}
where :math:`p_i` is the proportion of the community represented by OTU
:math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's index.
See Also
--------
dominance
Notes
-----
The implementation here is ``1 - dominance`` as described in [1]_. Other
references (such as [2]_) define Simpson's index as ``1 / dominance``.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return 1 - dominance(counts)
@experimental(as_of="0.4.0")
def simpson_e(counts):
r"""Calculate Simpson's evenness measure E.
Simpson's E is defined as
.. math::
E=\frac{1 / D}{S_{obs}}
where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's evenness measure E.
See Also
--------
dominance
enspie
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
"""
counts = _validate_counts_vector(counts)
return enspie(counts) / observed_otus(counts)
@experimental(as_of="0.4.0")
def singles(counts):
"""Calculate number of single occurrences (singletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Singleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 1).sum()
@experimental(as_of="0.4.0")
def strong(counts):
r"""Calculate Strong's dominance index.
Strong's dominance index is defined as:
.. math::
D_w = max_i[(\frac{b_i}{N})-\frac{i}{S}]
where :math:`b_i` is the sequential cumulative totaling of the
:math:`i^{\text{th}}` OTU abundance values ranked from largest to smallest,
:math:`N` is the total number of individuals in the sample, and
:math:`S` is the number of OTUs in the sample. The expression in brackets
is computed for all OTUs, and :math:`max_i` denotes the maximum value in
brackets for any OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Strong's dominance index (Dw).
Notes
-----
Strong's dominance index is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Strong, W. L., 2002 Assessing species abundance unevenness within
and between plant communities. Community Ecology, 3, 237-246.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
i = np.arange(1, len(counts) + 1)
sorted_sum = np.sort(counts)[::-1].cumsum()
return (sorted_sum / n - (i / s)).max() | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/diversity/alpha/_base.py | _base.py |
import numpy as np
from scipy.special import gammaln
from scipy.optimize import fmin_powell, minimize_scalar
from skbio.stats import subsample_counts
from skbio.util._decorator import experimental
from skbio.diversity._util import _validate_counts_vector
@experimental(as_of="0.4.0")
def berger_parker_d(counts):
r"""Calculate Berger-Parker dominance.
Berger-Parker dominance is defined as the fraction of the sample that
belongs to the most abundant OTU:
.. math::
d = \frac{N_{max}}{N}
where :math:`N_{max}` is defined as the number of individuals in the most
abundant OTU (or any of the most abundant OTUs in the case of ties), and
:math:`N` is defined as the total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Berger-Parker dominance.
Notes
-----
Berger-Parker dominance is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Berger & Parker (1970). SDR-IV online help.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return counts.max() / counts.sum()
@experimental(as_of="0.4.0")
def brillouin_d(counts):
r"""Calculate Brillouin index of alpha diversity.
This is calculated as follows:
.. math::
HB = \frac{\ln N!-\sum^s_{i=1}{\ln n_i!}}{N}
where :math:`N` is defined as the total number of individuals in the
sample, :math:`s` is the number of OTUs, and :math:`n_i` is defined as the
number of individuals in the :math:`i^{\text{th}}` OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Brillouin index.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
nz = counts[counts.nonzero()]
n = nz.sum()
return (gammaln(n + 1) - gammaln(nz + 1).sum()) / n
@experimental(as_of="0.4.0")
def dominance(counts):
r"""Calculate dominance.
Dominance is defined as
.. math::
\sum{p_i^2}
where :math:`p_i` is the proportion of the entire community that OTU
:math:`i` represents.
Dominance can also be defined as 1 - Simpson's index. It ranges between
0 and 1.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Dominance.
See Also
--------
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
return (freqs * freqs).sum()
@experimental(as_of="0.4.0")
def doubles(counts):
"""Calculate number of double occurrences (doubletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Doubleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 2).sum()
@experimental(as_of="0.4.0")
def enspie(counts):
r"""Calculate ENS_pie alpha diversity measure.
ENS_pie is equivalent to ``1 / dominance``:
.. math::
ENS_{pie} = \frac{1}{\sum_{i=1}^s{p_i^2}}
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
ENS_pie alpha diversity measure.
See Also
--------
dominance
Notes
-----
ENS_pie is defined in [1]_.
References
----------
.. [1] Chase and Knight (2013). "Scale-dependent effect sizes of ecological
drivers on biodiversity: why standardised sampling is not enough".
Ecology Letters, Volume 16, Issue Supplement s1, pgs 17-26.
"""
counts = _validate_counts_vector(counts)
return 1 / dominance(counts)
@experimental(as_of="0.4.0")
def esty_ci(counts):
r"""Calculate Esty's CI.
Esty's CI is defined as
.. math::
F_1/N \pm z\sqrt{W}
where :math:`F_1` is the number of singleton OTUs, :math:`N` is the total
number of individuals (sum of abundances for all OTUs), and :math:`z` is a
constant that depends on the targeted confidence and based on the normal
distribution.
:math:`W` is defined as
.. math::
\frac{F_1(N-F_1)+2NF_2}{N^3}
where :math:`F_2` is the number of doubleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
tuple
Esty's confidence interval as ``(lower_bound, upper_bound)``.
Notes
-----
Esty's CI is defined in [1]_. :math:`z` is hardcoded for a 95% confidence
interval.
References
----------
.. [1] Esty, W. W. (1983). "A normal limit law for a nonparametric
estimator of the coverage of a random sample". Ann Statist 11: 905-912.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
f2 = doubles(counts)
n = counts.sum()
z = 1.959963985
W = (f1 * (n - f1) + 2 * n * f2) / (n ** 3)
return f1 / n - z * np.sqrt(W), f1 / n + z * np.sqrt(W)
@experimental(as_of="0.4.0")
def fisher_alpha(counts):
r"""Calculate Fisher's alpha, a metric of diversity.
Fisher's alpha is estimated by solving the following equation for
:math:`\alpha`:
.. math::
S=\alpha\ln(1+\frac{N}{\alpha})
where :math:`S` is the number of OTUs and :math:`N` is the
total number of individuals in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Fisher's alpha.
Raises
------
RuntimeError
If the optimizer fails to converge (error > 1.0).
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_. Uses ``scipy.optimize.minimize_scalar`` to find
Fisher's alpha.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
def f(alpha):
return (alpha * np.log(1 + (n / alpha)) - s) ** 2
# Temporarily silence RuntimeWarnings (invalid and division by zero) during
# optimization in case invalid input is provided to the objective function
# (e.g. alpha=0).
orig_settings = np.seterr(divide='ignore', invalid='ignore')
try:
alpha = minimize_scalar(f).x
finally:
np.seterr(**orig_settings)
if f(alpha) > 1.0:
raise RuntimeError("Optimizer failed to converge (error > 1.0), so "
"could not compute Fisher's alpha.")
return alpha
@experimental(as_of="0.4.0")
def goods_coverage(counts):
r"""Calculate Good's coverage of counts.
Good's coverage estimator is defined as
.. math::
1-\frac{F_1}{N}
where :math:`F_1` is the number of singleton OTUs and :math:`N` is the
total number of individuals (sum of abundances for all OTUs).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Good's coverage estimator.
"""
counts = _validate_counts_vector(counts)
f1 = singles(counts)
N = counts.sum()
return 1 - (f1 / N)
@experimental(as_of="0.4.0")
def heip_e(counts):
r"""Calculate Heip's evenness measure.
Heip's evenness is defined as:
.. math::
\frac{(e^H-1)}{(S-1)}
where :math:`H` is the Shannon-Wiener entropy of counts (using logarithm
base :math:`e`) and :math:`S` is the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Heip's evenness measure.
See Also
--------
shannon
pielou_e
Notes
-----
The implementation here is based on the description in [1]_.
References
----------
.. [1] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate_counts_vector(counts)
return ((np.exp(shannon(counts, base=np.e)) - 1) /
(observed_otus(counts) - 1))
@experimental(as_of="0.4.0")
def kempton_taylor_q(counts, lower_quantile=0.25, upper_quantile=0.75):
"""Calculate Kempton-Taylor Q index of alpha diversity.
Estimates the slope of the cumulative abundance curve in the interquantile
range. By default, uses lower and upper quartiles, rounding inwards.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
lower_quantile : float, optional
Lower bound of the interquantile range. Defaults to lower quartile.
upper_quantile : float, optional
Upper bound of the interquantile range. Defaults to upper quartile.
Returns
-------
double
Kempton-Taylor Q index of alpha diversity.
Notes
-----
The index is defined in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
The implementation provided here differs slightly from the results given in
Magurran 1998. Specifically, we have 14 in the numerator rather than 15.
Magurran recommends counting half of the OTUs with the same # counts as the
point where the UQ falls and the point where the LQ falls, but the
justification for this is unclear (e.g. if there were a very large # OTUs
that just overlapped one of the quantiles, the results would be
considerably off). Leaving the calculation as-is for now, but consider
changing.
References
----------
.. [1] Kempton, R. A. and Taylor, L. R. (1976) Models and statistics for
species diversity. Nature, 262, 818-820.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = len(counts)
lower = int(np.ceil(n * lower_quantile))
upper = int(n * upper_quantile)
sorted_counts = np.sort(counts)
return (upper - lower) / np.log(sorted_counts[upper] /
sorted_counts[lower])
@experimental(as_of="0.4.0")
def margalef(counts):
r"""Calculate Margalef's richness index.
Margalef's D is defined as:
.. math::
D = \frac{(S - 1)}{\ln N}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes log accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Margalef's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return (observed_otus(counts) - 1) / np.log(counts.sum())
@experimental(as_of="0.4.0")
def mcintosh_d(counts):
r"""Calculate McIntosh dominance index D.
McIntosh dominance index D is defined as:
.. math::
D = \frac{N - U}{N - \sqrt{N}}
where :math:`N` is the total number of individuals in the sample and
:math:`U` is defined as:
.. math::
U = \sqrt{\sum{{n_i}^2}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh dominance index D.
See Also
--------
mcintosh_e
Notes
-----
The index was proposed in [1]_. The implementation here is based on the
description given in the SDR-IV online manual [2]_.
References
----------
.. [1] McIntosh, R. P. 1967 An index of diversity and the relation of
certain concepts to diversity. Ecology 48, 1115-1126.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
u = np.sqrt((counts * counts).sum())
n = counts.sum()
return (n - u) / (n - np.sqrt(n))
@experimental(as_of="0.4.0")
def mcintosh_e(counts):
r"""Calculate McIntosh's evenness measure E.
McIntosh evenness measure E is defined as:
.. math::
E = \frac{\sqrt{\sum{n_i^2}}}{\sqrt{((N-S+1)^2 + S -1}}
where :math:`n_i` is the number of individuals in the :math:`i^{\text{th}}`
OTU, :math:`N` is the total number of individuals, and :math:`S` is the
number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
McIntosh evenness measure E.
See Also
--------
mcintosh_d
Notes
-----
The implementation here is based on the description given in [1]_, **NOT**
the one in the SDR-IV online manual, which is wrong.
References
----------
.. [1] Heip & Engels (1974) Comparing Species Diversity and Evenness
Indices. p 560.
"""
counts = _validate_counts_vector(counts)
numerator = np.sqrt((counts * counts).sum())
n = counts.sum()
s = observed_otus(counts)
denominator = np.sqrt((n - s + 1) ** 2 + s - 1)
return numerator / denominator
@experimental(as_of="0.4.0")
def menhinick(counts):
r"""Calculate Menhinick's richness index.
Menhinick's richness index is defined as:
.. math::
D_{Mn} = \frac{S}{\sqrt{N}}
where :math:`S` is the number of OTUs and :math:`N` is the total number of
individuals in the sample.
Assumes square-root accumulation.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Menhinick's richness index.
Notes
-----
Based on the description in [1]_.
References
----------
.. [1] Magurran, A E 2004. Measuring biological diversity. Blackwell. pp.
76-77.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts) / np.sqrt(counts.sum())
@experimental(as_of="0.4.0")
def michaelis_menten_fit(counts, num_repeats=1, params_guess=None):
r"""Calculate Michaelis-Menten fit to rarefaction curve of observed OTUs.
The Michaelis-Menten equation is defined as:
.. math::
S=\frac{nS_{max}}{n+B}
where :math:`n` is the number of individuals and :math:`S` is the number of
OTUs. This function estimates the :math:`S_{max}` parameter.
The fit is made to datapoints for :math:`n=1,2,...,N`, where :math:`N` is
the total number of individuals (sum of abundances for all OTUs).
:math:`S` is the number of OTUs represented in a random sample of :math:`n`
individuals.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
num_repeats : int, optional
The number of times to perform rarefaction (subsampling without
replacement) at each value of :math:`n`.
params_guess : tuple, optional
Initial guess of :math:`S_{max}` and :math:`B`. If ``None``, default
guess for :math:`S_{max}` is :math:`S` (as :math:`S_{max}` should
be >= :math:`S`) and default guess for :math:`B` is ``round(N / 2)``.
Returns
-------
S_max : double
Estimate of the :math:`S_{max}` parameter in the Michaelis-Menten
equation.
See Also
--------
skbio.stats.subsample_counts
Notes
-----
There is some controversy about how to do the fitting. The ML model given
in [1]_ is based on the assumption that error is roughly proportional to
magnitude of observation, reasonable for enzyme kinetics but not reasonable
for rarefaction data. Here we just do a nonlinear curve fit for the
parameters using least-squares.
References
----------
.. [1] Raaijmakers, J. G. W. 1987 Statistical analysis of the
Michaelis-Menten equation. Biometrics 43, 793-803.
"""
counts = _validate_counts_vector(counts)
n_indiv = counts.sum()
if params_guess is None:
S_max_guess = observed_otus(counts)
B_guess = int(round(n_indiv / 2))
params_guess = (S_max_guess, B_guess)
# observed # of OTUs vs # of individuals sampled, S vs n
xvals = np.arange(1, n_indiv + 1)
ymtx = np.empty((num_repeats, len(xvals)), dtype=int)
for i in range(num_repeats):
ymtx[i] = np.asarray([observed_otus(subsample_counts(counts, n))
for n in xvals], dtype=int)
yvals = ymtx.mean(0)
# Vectors of actual vals y and number of individuals n.
def errfn(p, n, y):
return (((p[0] * n / (p[1] + n)) - y) ** 2).sum()
# Return S_max.
return fmin_powell(errfn, params_guess, ftol=1e-5, args=(xvals, yvals),
disp=False)[0]
@experimental(as_of="0.4.0")
def observed_otus(counts):
"""Calculate the number of distinct OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Distinct OTU count.
"""
counts = _validate_counts_vector(counts)
return (counts != 0).sum()
@experimental(as_of="0.4.0")
def osd(counts):
"""Calculate observed OTUs, singles, and doubles.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
osd : tuple
Observed OTUs, singles, and doubles.
See Also
--------
observed_otus
singles
doubles
Notes
-----
This is a convenience function used by many of the other measures that rely
on these three measures.
"""
counts = _validate_counts_vector(counts)
return observed_otus(counts), singles(counts), doubles(counts)
@experimental(as_of="0.4.1")
def pielou_e(counts):
r"""Calculate Pielou's Evenness index J'.
Pielou's Evenness is defined as:
.. math::
J' = \frac{(H)}{\ln(S)}
where :math:`H` is the Shannon-Wiener entropy of counts and :math:`S` is
the number of OTUs in the sample.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Pielou's Evenness.
See Also
--------
shannon
heip_e
Notes
-----
The implementation here is based on the description in Wikipedia [1]_.
It was first proposed by E. C. Pielou [2]_ and is similar to Heip's
evenness [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Species_evenness
.. [2] Pielou, E. C., 1966. The measurement of diversity in different types
of biological collections. Journal of Theoretical Biology, 13, 131-44.
.. [3] Heip, C. 1974. A new index measuring evenness. J. Mar. Biol. Ass.
UK., 54, 555-557.
"""
counts = _validate_counts_vector(counts)
return shannon(counts, base=np.e) / np.log(observed_otus(counts))
@experimental(as_of="0.4.0")
def robbins(counts):
r"""Calculate Robbins' estimator for probability of unobserved outcomes.
Robbins' estimator is defined as:
.. math::
\frac{F_1}{n+1}
where :math:`F_1` is the number of singleton OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Robbins' estimate.
Notes
-----
Robbins' estimator is defined in [1]_. The estimate computed here is for
:math:`n-1` counts, i.e. the x-axis is off by 1.
References
----------
.. [1] Robbins, H. E (1968). Ann. of Stats. Vol 36, pp. 256-257.
"""
counts = _validate_counts_vector(counts)
return singles(counts) / counts.sum()
@experimental(as_of="0.4.0")
def shannon(counts, base=2):
r"""Calculate Shannon entropy of counts, default in bits.
Shannon-Wiener diversity index is defined as:
.. math::
H = -\sum_{i=1}^s\left(p_i\log_2 p_i\right)
where :math:`s` is the number of OTUs and :math:`p_i` is the proportion of
the community represented by OTU :math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
base : scalar, optional
Logarithm base to use in the calculations.
Returns
-------
double
Shannon diversity index H.
Notes
-----
The implementation here is based on the description given in the SDR-IV
online manual [1]_ except that the default logarithm base used here is 2
instead of :math:`e`.
References
----------
.. [1] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
freqs = counts / counts.sum()
nonzero_freqs = freqs[freqs.nonzero()]
return -(nonzero_freqs * np.log(nonzero_freqs)).sum() / np.log(base)
@experimental(as_of="0.4.0")
def simpson(counts):
r"""Calculate Simpson's index.
Simpson's index is defined as ``1 - dominance``:
.. math::
1 - \sum{p_i^2}
where :math:`p_i` is the proportion of the community represented by OTU
:math:`i`.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's index.
See Also
--------
dominance
Notes
-----
The implementation here is ``1 - dominance`` as described in [1]_. Other
references (such as [2]_) define Simpson's index as ``1 / dominance``.
References
----------
.. [1] http://folk.uio.no/ohammer/past/diversity.html
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
return 1 - dominance(counts)
@experimental(as_of="0.4.0")
def simpson_e(counts):
r"""Calculate Simpson's evenness measure E.
Simpson's E is defined as
.. math::
E=\frac{1 / D}{S_{obs}}
where :math:`D` is dominance and :math:`S_{obs}` is the number of observed
OTUs.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Simpson's evenness measure E.
See Also
--------
dominance
enspie
simpson
Notes
-----
The implementation here is based on the description given in [1]_.
References
----------
.. [1] http://www.tiem.utk.edu/~gross/bioed/bealsmodules/simpsonDI.html
"""
counts = _validate_counts_vector(counts)
return enspie(counts) / observed_otus(counts)
@experimental(as_of="0.4.0")
def singles(counts):
"""Calculate number of single occurrences (singletons).
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
int
Singleton count.
"""
counts = _validate_counts_vector(counts)
return (counts == 1).sum()
@experimental(as_of="0.4.0")
def strong(counts):
r"""Calculate Strong's dominance index.
Strong's dominance index is defined as:
.. math::
D_w = max_i[(\frac{b_i}{N})-\frac{i}{S}]
where :math:`b_i` is the sequential cumulative totaling of the
:math:`i^{\text{th}}` OTU abundance values ranked from largest to smallest,
:math:`N` is the total number of individuals in the sample, and
:math:`S` is the number of OTUs in the sample. The expression in brackets
is computed for all OTUs, and :math:`max_i` denotes the maximum value in
brackets for any OTU.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
Returns
-------
double
Strong's dominance index (Dw).
Notes
-----
Strong's dominance index is defined in [1]_. The implementation here is
based on the description given in the SDR-IV online manual [2]_.
References
----------
.. [1] Strong, W. L., 2002 Assessing species abundance unevenness within
and between plant communities. Community Ecology, 3, 237-246.
.. [2] http://www.pisces-conservation.com/sdrhelp/index.html
"""
counts = _validate_counts_vector(counts)
n = counts.sum()
s = observed_otus(counts)
i = np.arange(1, len(counts) + 1)
sorted_sum = np.sort(counts)[::-1].cumsum()
return (sorted_sum / n - (i / s)).max() | 0.952783 | 0.642601 |
import numpy as np
from skbio.diversity._util import _validate_counts_vector
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def gini_index(data, method='rectangles'):
r"""Calculate the Gini index.
The Gini index is defined as
.. math::
G=\frac{A}{A+B}
where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
:math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
since :math:`A+B=0.5`.
Parameters
----------
data : 1-D array_like
Vector of counts, abundances, proportions, etc. All entries must be
non-negative.
method : {'rectangles', 'trapezoids'}
Method for calculating the area under the Lorenz curve. If
``'rectangles'``, connects the Lorenz curve points by lines parallel to
the x axis. This is the correct method (in our opinion) though
``'trapezoids'`` might be desirable in some circumstances. If
``'trapezoids'``, connects the Lorenz curve points by linear segments
between them. Basically assumes that the given sampling is accurate and
that more features of given data would fall on linear gradients between
the values of this data.
Returns
-------
double
Gini index.
Raises
------
ValueError
If `method` isn't one of the supported methods for calculating the area
under the curve.
Notes
-----
The Gini index was introduced in [1]_. The formula for
``method='rectangles'`` is
.. math::
dx\sum_{i=1}^n h_i
The formula for ``method='trapezoids'`` is
.. math::
dx(\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
References
----------
.. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
"""
# Suppress cast to int because this method supports ints and floats.
data = _validate_counts_vector(data, suppress_cast=True)
lorenz_points = _lorenz_curve(data)
B = _lorenz_curve_integrator(lorenz_points, method)
return max(0.0, 1 - 2 * B)
def _lorenz_curve(data):
"""Calculate the Lorenz curve for input data.
Notes
-----
Formula available on wikipedia.
"""
sorted_data = np.sort(data)
Sn = sorted_data.sum()
n = sorted_data.shape[0]
return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
def _lorenz_curve_integrator(lc_pts, method):
"""Calculates the area under a Lorenz curve.
Notes
-----
Could be utilized for integrating other simple, non-pathological
"functions" where width of the trapezoids is constant.
"""
x, y = lc_pts
# each point differs by 1/n
dx = 1 / x.shape[0]
if method == 'trapezoids':
# 0 percent of the population has zero percent of the goods
h_0 = 0.0
h_n = y[-1]
# the 0th entry is at x=1/n
sum_hs = y[:-1].sum()
return dx * ((h_0 + h_n) / 2 + sum_hs)
elif method == 'rectangles':
return dx * y.sum()
else:
raise ValueError("Method '%s' not implemented. Available methods: "
"'rectangles', 'trapezoids'." % method) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/diversity/alpha/_gini.py | _gini.py |
import numpy as np
from skbio.diversity._util import _validate_counts_vector
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def gini_index(data, method='rectangles'):
r"""Calculate the Gini index.
The Gini index is defined as
.. math::
G=\frac{A}{A+B}
where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
:math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
since :math:`A+B=0.5`.
Parameters
----------
data : 1-D array_like
Vector of counts, abundances, proportions, etc. All entries must be
non-negative.
method : {'rectangles', 'trapezoids'}
Method for calculating the area under the Lorenz curve. If
``'rectangles'``, connects the Lorenz curve points by lines parallel to
the x axis. This is the correct method (in our opinion) though
``'trapezoids'`` might be desirable in some circumstances. If
``'trapezoids'``, connects the Lorenz curve points by linear segments
between them. Basically assumes that the given sampling is accurate and
that more features of given data would fall on linear gradients between
the values of this data.
Returns
-------
double
Gini index.
Raises
------
ValueError
If `method` isn't one of the supported methods for calculating the area
under the curve.
Notes
-----
The Gini index was introduced in [1]_. The formula for
``method='rectangles'`` is
.. math::
dx\sum_{i=1}^n h_i
The formula for ``method='trapezoids'`` is
.. math::
dx(\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
References
----------
.. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
"""
# Suppress cast to int because this method supports ints and floats.
data = _validate_counts_vector(data, suppress_cast=True)
lorenz_points = _lorenz_curve(data)
B = _lorenz_curve_integrator(lorenz_points, method)
return max(0.0, 1 - 2 * B)
def _lorenz_curve(data):
"""Calculate the Lorenz curve for input data.
Notes
-----
Formula available on wikipedia.
"""
sorted_data = np.sort(data)
Sn = sorted_data.sum()
n = sorted_data.shape[0]
return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
def _lorenz_curve_integrator(lc_pts, method):
"""Calculates the area under a Lorenz curve.
Notes
-----
Could be utilized for integrating other simple, non-pathological
"functions" where width of the trapezoids is constant.
"""
x, y = lc_pts
# each point differs by 1/n
dx = 1 / x.shape[0]
if method == 'trapezoids':
# 0 percent of the population has zero percent of the goods
h_0 = 0.0
h_n = y[-1]
# the 0th entry is at x=1/n
sum_hs = y[:-1].sum()
return dx * ((h_0 + h_n) / 2 + sum_hs)
elif method == 'rectangles':
return dx * y.sum()
else:
raise ValueError("Method '%s' not implemented. Available methods: "
"'rectangles', 'trapezoids'." % method) | 0.949983 | 0.766053 |
import numpy as np
from skbio.diversity._util import _validate_counts_vector
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def ace(counts, rare_threshold=10):
r"""Calculate the ACE metric (Abundance-based Coverage Estimator).
The ACE metric is defined as:
.. math::
S_{ace}=S_{abund}+\frac{S_{rare}}{C_{ace}}+
\frac{F_1}{C_{ace}}\gamma^2_{ace}
where :math:`S_{abund}` is the number of abundant OTUs (with more than
`rare_threshold` individuals) when all samples are pooled,
:math:`S_{rare}` is the number of rare OTUs (with less than or equal to
`rare_threshold` individuals) when all samples are pooled, :math:`C_{ace}`
is the sample abundance coverage estimator, :math:`F_1` is the frequency of
singletons, and :math:`\gamma^2_{ace}` is the estimated coefficient of
variation for rare OTUs.
The estimated coefficient of variation is defined as (assuming
`rare_threshold` is 10, the default):
.. math::
\gamma^2_{ace}=max\left[\frac{S_{rare}}{C_{ace}}
\frac{\sum^{10}_{i=1}{{i\left(i-1\right)}}F_i}
{\left(N_{rare}\right)\left(N_{rare}-1\right)} -1,0\right]
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
rare_threshold : int, optional
Threshold at which an OTU containing as many or fewer individuals will
be considered rare.
Returns
-------
double
Computed ACE metric.
Raises
------
ValueError
If every rare OTU is a singleton.
Notes
-----
ACE was first introduced in [1]_ and [2]_. The implementation here is based
on the description given in the EstimateS manual [3]_.
If no rare OTUs exist, returns the number of abundant OTUs. The default
value of 10 for `rare_threshold` is based on [4]_.
If `counts` contains zeros, indicating OTUs which are known to exist in the
environment but did not appear in the sample, they will be ignored for the
purpose of calculating the number of rare OTUs.
References
----------
.. [1] Chao, A. & S.-M Lee. 1992 Estimating the number of classes via
sample coverage. Journal of the American Statistical Association 87,
210-217.
.. [2] Chao, A., M.-C. Ma, & M. C. K. Yang. 1993. Stopping rules and
estimation for recapture debugging with unequal failure rates.
Biometrika 80, 193-201.
.. [3] http://viceroy.eeb.uconn.edu/estimates/
.. [4] Chao, A., W.-H. Hwang, Y.-C. Chen, and C.-Y. Kuo. 2000. Estimating
the number of shared species in two communities. Statistica Sinica
10:227-246.
"""
counts = _validate_counts_vector(counts)
freq_counts = np.bincount(counts)
s_rare = _otus_rare(freq_counts, rare_threshold)
singles = freq_counts[1]
if singles > 0 and singles == s_rare:
raise ValueError("The only rare OTUs are singletons, so the ACE "
"metric is undefined. EstimateS suggests using "
"bias-corrected Chao1 instead.")
s_abun = _otus_abundant(freq_counts, rare_threshold)
if s_rare == 0:
return s_abun
n_rare = _number_rare(freq_counts, rare_threshold)
c_ace = 1 - singles / n_rare
top = s_rare * _number_rare(freq_counts, rare_threshold, gamma=True)
bottom = c_ace * n_rare * (n_rare - 1)
gamma_ace = (top / bottom) - 1
if gamma_ace < 0:
gamma_ace = 0
return s_abun + (s_rare / c_ace) + ((singles / c_ace) * gamma_ace)
def _otus_rare(freq_counts, rare_threshold):
"""Count number of rare OTUs."""
return freq_counts[1:rare_threshold + 1].sum()
def _otus_abundant(freq_counts, rare_threshold):
"""Count number of abundant OTUs."""
return freq_counts[rare_threshold + 1:].sum()
def _number_rare(freq_counts, rare_threshold, gamma=False):
"""Return number of individuals in rare OTUs.
``gamma=True`` generates the ``n_rare`` used for the variation coefficient.
"""
n_rare = 0
if gamma:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j) * (i - 1)
else:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j)
return n_rare | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/diversity/alpha/_ace.py | _ace.py |
import numpy as np
from skbio.diversity._util import _validate_counts_vector
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def ace(counts, rare_threshold=10):
r"""Calculate the ACE metric (Abundance-based Coverage Estimator).
The ACE metric is defined as:
.. math::
S_{ace}=S_{abund}+\frac{S_{rare}}{C_{ace}}+
\frac{F_1}{C_{ace}}\gamma^2_{ace}
where :math:`S_{abund}` is the number of abundant OTUs (with more than
`rare_threshold` individuals) when all samples are pooled,
:math:`S_{rare}` is the number of rare OTUs (with less than or equal to
`rare_threshold` individuals) when all samples are pooled, :math:`C_{ace}`
is the sample abundance coverage estimator, :math:`F_1` is the frequency of
singletons, and :math:`\gamma^2_{ace}` is the estimated coefficient of
variation for rare OTUs.
The estimated coefficient of variation is defined as (assuming
`rare_threshold` is 10, the default):
.. math::
\gamma^2_{ace}=max\left[\frac{S_{rare}}{C_{ace}}
\frac{\sum^{10}_{i=1}{{i\left(i-1\right)}}F_i}
{\left(N_{rare}\right)\left(N_{rare}-1\right)} -1,0\right]
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
rare_threshold : int, optional
Threshold at which an OTU containing as many or fewer individuals will
be considered rare.
Returns
-------
double
Computed ACE metric.
Raises
------
ValueError
If every rare OTU is a singleton.
Notes
-----
ACE was first introduced in [1]_ and [2]_. The implementation here is based
on the description given in the EstimateS manual [3]_.
If no rare OTUs exist, returns the number of abundant OTUs. The default
value of 10 for `rare_threshold` is based on [4]_.
If `counts` contains zeros, indicating OTUs which are known to exist in the
environment but did not appear in the sample, they will be ignored for the
purpose of calculating the number of rare OTUs.
References
----------
.. [1] Chao, A. & S.-M Lee. 1992 Estimating the number of classes via
sample coverage. Journal of the American Statistical Association 87,
210-217.
.. [2] Chao, A., M.-C. Ma, & M. C. K. Yang. 1993. Stopping rules and
estimation for recapture debugging with unequal failure rates.
Biometrika 80, 193-201.
.. [3] http://viceroy.eeb.uconn.edu/estimates/
.. [4] Chao, A., W.-H. Hwang, Y.-C. Chen, and C.-Y. Kuo. 2000. Estimating
the number of shared species in two communities. Statistica Sinica
10:227-246.
"""
counts = _validate_counts_vector(counts)
freq_counts = np.bincount(counts)
s_rare = _otus_rare(freq_counts, rare_threshold)
singles = freq_counts[1]
if singles > 0 and singles == s_rare:
raise ValueError("The only rare OTUs are singletons, so the ACE "
"metric is undefined. EstimateS suggests using "
"bias-corrected Chao1 instead.")
s_abun = _otus_abundant(freq_counts, rare_threshold)
if s_rare == 0:
return s_abun
n_rare = _number_rare(freq_counts, rare_threshold)
c_ace = 1 - singles / n_rare
top = s_rare * _number_rare(freq_counts, rare_threshold, gamma=True)
bottom = c_ace * n_rare * (n_rare - 1)
gamma_ace = (top / bottom) - 1
if gamma_ace < 0:
gamma_ace = 0
return s_abun + (s_rare / c_ace) + ((singles / c_ace) * gamma_ace)
def _otus_rare(freq_counts, rare_threshold):
"""Count number of rare OTUs."""
return freq_counts[1:rare_threshold + 1].sum()
def _otus_abundant(freq_counts, rare_threshold):
"""Count number of abundant OTUs."""
return freq_counts[rare_threshold + 1:].sum()
def _number_rare(freq_counts, rare_threshold, gamma=False):
"""Return number of individuals in rare OTUs.
``gamma=True`` generates the ``n_rare`` used for the variation coefficient.
"""
n_rare = 0
if gamma:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j) * (i - 1)
else:
for i, j in enumerate(freq_counts[:rare_threshold + 1]):
n_rare = n_rare + (i * j)
return n_rare | 0.940946 | 0.825273 |
from skbio.metadata._repr import _MetadataReprBuilder
class _TabularMSAReprBuilder(_MetadataReprBuilder):
def __init__(self, msa, width, indent):
super(_TabularMSAReprBuilder, self).__init__(msa, width, indent)
self._ellipse_insert = ' ... '
def _process_header(self):
cls_name = self._obj.__class__.__name__
if self._obj.dtype is not None:
dtype_class = '[' + self._obj.dtype.__name__ + ']'
else:
dtype_class = ''
self._lines.add_line(cls_name + dtype_class)
self._lines.add_separator()
def _process_data(self):
num_sequences = self._obj.shape.sequence
num_positions = self._obj.shape.position
# catch case of all empty sequences
if num_positions > 0:
# display all sequences if we can, else display the first two and
# last two sequences separated by ellipsis
if num_sequences <= 5:
self._lines.add_lines(
self._format_sequences(range(num_sequences)))
else:
self._lines.add_lines(self._format_sequences(range(2)))
self._lines.add_line('...')
self._lines.add_lines(self._format_sequences(
range(num_sequences - 2, num_sequences)))
def _format_sequences(self, sequence_indices):
lines = []
for line_index in sequence_indices:
seq_str = str(self._obj._get_sequence_iloc_(line_index))
if len(seq_str) <= self._width:
formatted_seq = seq_str
else:
formatted_seq = (
seq_str[0:self._num_characters_before_ellipse()] +
self._ellipse_insert +
seq_str[-self._num_characters_after_ellipse():]
)
lines.append(formatted_seq)
return lines
def _num_characters_before_ellipse(self):
return int(self._num_characters_to_display() / 2)
def _num_characters_after_ellipse(self):
return (self._num_characters_to_display() -
self._num_characters_before_ellipse())
def _num_characters_to_display(self):
return self._width - len(self._ellipse_insert) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/alignment/_repr.py | _repr.py |
from skbio.metadata._repr import _MetadataReprBuilder
class _TabularMSAReprBuilder(_MetadataReprBuilder):
def __init__(self, msa, width, indent):
super(_TabularMSAReprBuilder, self).__init__(msa, width, indent)
self._ellipse_insert = ' ... '
def _process_header(self):
cls_name = self._obj.__class__.__name__
if self._obj.dtype is not None:
dtype_class = '[' + self._obj.dtype.__name__ + ']'
else:
dtype_class = ''
self._lines.add_line(cls_name + dtype_class)
self._lines.add_separator()
def _process_data(self):
num_sequences = self._obj.shape.sequence
num_positions = self._obj.shape.position
# catch case of all empty sequences
if num_positions > 0:
# display all sequences if we can, else display the first two and
# last two sequences separated by ellipsis
if num_sequences <= 5:
self._lines.add_lines(
self._format_sequences(range(num_sequences)))
else:
self._lines.add_lines(self._format_sequences(range(2)))
self._lines.add_line('...')
self._lines.add_lines(self._format_sequences(
range(num_sequences - 2, num_sequences)))
def _format_sequences(self, sequence_indices):
lines = []
for line_index in sequence_indices:
seq_str = str(self._obj._get_sequence_iloc_(line_index))
if len(seq_str) <= self._width:
formatted_seq = seq_str
else:
formatted_seq = (
seq_str[0:self._num_characters_before_ellipse()] +
self._ellipse_insert +
seq_str[-self._num_characters_after_ellipse():]
)
lines.append(formatted_seq)
return lines
def _num_characters_before_ellipse(self):
return int(self._num_characters_to_display() / 2)
def _num_characters_after_ellipse(self):
return (self._num_characters_to_display() -
self._num_characters_before_ellipse())
def _num_characters_to_display(self):
return self._width - len(self._ellipse_insert) | 0.686475 | 0.132543 |
from warnings import warn
from itertools import product
import numpy as np
from skbio.alignment import TabularMSA
from skbio.alignment._ssw_wrapper import StripedSmithWaterman
from skbio.sequence import DNA, RNA, Protein
from skbio.sequence import GrammaredSequence
from skbio.util import EfficiencyWarning
from skbio.util._decorator import experimental, deprecated
# This is temporary: blosum50 does not exist in skbio yet as per
# issue 161. When the issue is resolved, this should be removed in favor
# of an import.
blosum50 = \
{
'*': {'*': 1, 'A': -5, 'C': -5, 'B': -5, 'E': -5, 'D': -5, 'G': -5,
'F': -5, 'I': -5, 'H': -5, 'K': -5, 'M': -5, 'L': -5,
'N': -5, 'Q': -5, 'P': -5, 'S': -5, 'R': -5, 'T': -5,
'W': -5, 'V': -5, 'Y': -5, 'X': -5, 'Z': -5},
'A': {'*': -5, 'A': 5, 'C': -1, 'B': -2, 'E': -1, 'D': -2, 'G': 0,
'F': -3, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -2,
'N': -1, 'Q': -1, 'P': -1, 'S': 1, 'R': -2, 'T': 0, 'W': -3,
'V': 0, 'Y': -2, 'X': -1, 'Z': -1},
'C': {'*': -5, 'A': -1, 'C': 13, 'B': -3, 'E': -3, 'D': -4,
'G': -3, 'F': -2, 'I': -2, 'H': -3, 'K': -3, 'M': -2,
'L': -2, 'N': -2, 'Q': -3, 'P': -4, 'S': -1, 'R': -4,
'T': -1, 'W': -5, 'V': -1, 'Y': -3, 'X': -1, 'Z': -3},
'B': {'*': -5, 'A': -2, 'C': -3, 'B': 6, 'E': 1, 'D': 6, 'G': -1,
'F': -4, 'I': -4, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 5,
'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -5, 'V': -3,
'Y': -3, 'X': -1, 'Z': 1},
'E': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 6, 'D': 2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': -1, 'R': 0, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5},
'D': {'*': -5, 'A': -2, 'C': -4, 'B': 6, 'E': 2, 'D': 8, 'G': -1,
'F': -5, 'I': -4, 'H': -1, 'K': -1, 'M': -4, 'L': -4, 'N': 2,
'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -5, 'V': -4,
'Y': -3, 'X': -1, 'Z': 1},
'G': {'*': -5, 'A': 0, 'C': -3, 'B': -1, 'E': -3, 'D': -1, 'G': 8,
'F': -4, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0,
'Q': -2, 'P': -2, 'S': 0, 'R': -3, 'T': -2, 'W': -3, 'V': -4,
'Y': -3, 'X': -1, 'Z': -2},
'F': {'*': -5, 'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -5,
'G': -4, 'F': 8, 'I': 0, 'H': -1, 'K': -4, 'M': 0, 'L': 1,
'N': -4, 'Q': -4, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 1,
'V': -1, 'Y': 4, 'X': -1, 'Z': -4},
'I': {'*': -5, 'A': -1, 'C': -2, 'B': -4, 'E': -4, 'D': -4,
'G': -4, 'F': 0, 'I': 5, 'H': -4, 'K': -3, 'M': 2, 'L': 2,
'N': -3, 'Q': -3, 'P': -3, 'S': -3, 'R': -4, 'T': -1,
'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'*': -5, 'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2,
'F': -1, 'I': -4, 'H': 10, 'K': 0, 'M': -1, 'L': -3, 'N': 1,
'Q': 1, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -3, 'V': -4,
'Y': 2, 'X': -1, 'Z': 0},
'K': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 6, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': 0, 'R': 3, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 1},
'M': {'*': -5, 'A': -1, 'C': -2, 'B': -3, 'E': -2, 'D': -4,
'G': -3, 'F': 0, 'I': 2, 'H': -1, 'K': -2, 'M': 7, 'L': 3,
'N': -2, 'Q': 0, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -1,
'V': 1, 'Y': 0, 'X': -1, 'Z': -1},
'L': {'*': -5, 'A': -2, 'C': -2, 'B': -4, 'E': -3, 'D': -4,
'G': -4, 'F': 1, 'I': 2, 'H': -3, 'K': -3, 'M': 3, 'L': 5,
'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -1,
'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'*': -5, 'A': -1, 'C': -2, 'B': 5, 'E': 0, 'D': 2, 'G': 0,
'F': -4, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -4, 'N': 7,
'Q': 0, 'P': -2, 'S': 1, 'R': -1, 'T': 0, 'W': -4, 'V': -3,
'Y': -2, 'X': -1, 'Z': 0},
'Q': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2,
'F': -4, 'I': -3, 'H': 1, 'K': 2, 'M': 0, 'L': -2, 'N': 0,
'Q': 7, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -1, 'V': -3,
'Y': -1, 'X': -1, 'Z': 4},
'P': {'*': -5, 'A': -1, 'C': -4, 'B': -2, 'E': -1, 'D': -1,
'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -3,
'L': -4, 'N': -2, 'Q': -1, 'P': 10, 'S': -1, 'R': -3,
'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': -1},
'S': {'*': -5, 'A': 1, 'C': -1, 'B': 0, 'E': -1, 'D': 0, 'G': 0,
'F': -3, 'I': -3, 'H': -1, 'K': 0, 'M': -2, 'L': -3, 'N': 1,
'Q': 0, 'P': -1, 'S': 5, 'R': -1, 'T': 2, 'W': -4, 'V': -2,
'Y': -2, 'X': -1, 'Z': 0},
'R': {'*': -5, 'A': -2, 'C': -4, 'B': -1, 'E': 0, 'D': -2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 3, 'M': -2, 'L': -3, 'N': -1,
'Q': 1, 'P': -3, 'S': -1, 'R': 7, 'T': -1, 'W': -3, 'V': -3,
'Y': -1, 'X': -1, 'Z': 0},
'T': {'*': -5, 'A': 0, 'C': -1, 'B': 0, 'E': -1, 'D': -1, 'G': -2,
'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0,
'Q': -1, 'P': -1, 'S': 2, 'R': -1, 'T': 5, 'W': -3, 'V': 0,
'Y': -2, 'X': -1, 'Z': -1},
'W': {'*': -5, 'A': -3, 'C': -5, 'B': -5, 'E': -3, 'D': -5,
'G': -3, 'F': 1, 'I': -3, 'H': -3, 'K': -3, 'M': -1, 'L': -2,
'N': -4, 'Q': -1, 'P': -4, 'S': -4, 'R': -3, 'T': -3,
'W': 15, 'V': -3, 'Y': 2, 'X': -1, 'Z': -2},
'V': {'*': -5, 'A': 0, 'C': -1, 'B': -3, 'E': -3, 'D': -4, 'G': -4,
'F': -1, 'I': 4, 'H': -4, 'K': -3, 'M': 1, 'L': 1, 'N': -3,
'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 5,
'Y': -1, 'X': -1, 'Z': -3},
'Y': {'*': -5, 'A': -2, 'C': -3, 'B': -3, 'E': -2, 'D': -3,
'G': -3, 'F': 4, 'I': -1, 'H': 2, 'K': -2, 'M': 0, 'L': -1,
'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -1, 'T': -2, 'W': 2,
'V': -1, 'Y': 8, 'X': -1, 'Z': -2},
'X': {'*': -5, 'A': -1, 'C': -1, 'B': -1, 'E': -1, 'D': -1,
'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1,
'L': -1, 'N': -1, 'Q': -1, 'P': -1, 'S': -1, 'R': -1,
'T': -1, 'W': -1, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 5, 'D': 1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0,
'Q': 4, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -2, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5}}
@experimental(as_of="0.4.0")
def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
"""Locally align exactly two nucleotide seqs with Smith-Waterman
Parameters
----------
seq1 : DNA or RNA
The first unaligned sequence.
seq2 : DNA or RNA
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be DNA or RNA, not type %r"
% type(seq).__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
"""Locally align exactly two protein seqs with Smith-Waterman
Parameters
----------
seq1 : Protein
The first unaligned sequence.
seq2 : Protein
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, Protein):
raise TypeError(
"`seq1` and `seq2` must be Protein, not type %r"
% type(seq).__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
"""Locally align exactly two seqs with Smith-Waterman
Parameters
----------
seq1 : GrammaredSequence
The first unaligned sequence.
seq2 : GrammaredSequence
The second unaligned sequence.
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm was originally described in [1]_. The scikit-bio
implementation was validated against the EMBOSS water web server [2]_.
References
----------
.. [1] Identification of common molecular subsequences.
Smith TF, Waterman MS.
J Mol Biol. 1981 Mar 25;147(1):195-7.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_water/
"""
warn("You're using skbio's python implementation of Smith-Waterman "
"alignment. This will be very slow (e.g., thousands of times slower) "
"than skbio.alignment.local_pairwise_align_ssw.",
EfficiencyWarning)
for seq in seq1, seq2:
if not isinstance(seq, GrammaredSequence):
raise TypeError(
"`seq1` and `seq2` must be %r subclasses, not type %r" %
(GrammaredSequence.__name__, type(seq).__name__))
if type(seq1) is not type(seq2):
raise TypeError(
"`seq1` and `seq2` must be the same type: %r != %r"
% (type(seq1).__name__, type(seq2).__name__))
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=0.0,
init_matrices_f=_init_matrices_sw)
end_row_position, end_col_position =\
np.unravel_index(np.argmax(score_matrix), score_matrix.shape)
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=1, mismatch_score=-2,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align nucleotide seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : DNA, RNA, or TabularMSA[DNA|RNA]
The first unaligned sequence(s).
seq2 : DNA, RNA, or TabularMSA[DNA|RNA]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be DNA, RNA, or TabularMSA, not type "
"%r" % type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype,
(DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align pair of protein seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : Protein or TabularMSA[Protein]
The first unaligned sequence(s).
seq2 : Protein or TabularMSA[Protein]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, (Protein, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be Protein or TabularMSA, not type %r"
% type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, penalize_terminal_gaps=False):
"""Globally align a pair of seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : GrammaredSequence or TabularMSA
The first unaligned sequence(s).
seq2 : GrammaredSequence or TabularMSA
The second unaligned sequence(s).
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm (in a slightly more basic form) was originally described
in [1]_. The scikit-bio implementation was validated against the
EMBOSS needle web server [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] A general method applicable to the search for similarities in
the amino acid sequence of two proteins.
Needleman SB, Wunsch CD.
J Mol Biol. 1970 Mar;48(3):443-53.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_needle/
"""
warn("You're using skbio's python implementation of Needleman-Wunsch "
"alignment. This is known to be very slow (e.g., thousands of times "
"slower than a native C implementation). We'll be adding a faster "
"version soon (see https://github.com/biocore/scikit-bio/issues/254 "
"to track progress on this).", EfficiencyWarning)
for seq in seq1, seq2:
# We don't need to check the case where `seq` is a `TabularMSA` with a
# dtype that isn't a subclass of `GrammaredSequence`, this is
# guaranteed by `TabularMSA`.
if not isinstance(seq, (GrammaredSequence, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be GrammaredSequence subclasses or "
"TabularMSA, not type %r" % type(seq).__name__)
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
if seq1.dtype is not seq2.dtype:
raise TypeError(
"`seq1` and `seq2` must have the same dtype: %r != %r"
% (seq1.dtype.__name__, seq2.dtype.__name__))
if penalize_terminal_gaps:
init_matrices_f = _init_matrices_nw
else:
init_matrices_f = _init_matrices_nw_no_terminal_gap_penalty
score_matrix, traceback_matrix = \
_compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=-np.inf,
init_matrices_f=init_matrices_f,
penalize_terminal_gaps=penalize_terminal_gaps)
end_row_position = traceback_matrix.shape[0] - 1
end_col_position = traceback_matrix.shape[1] - 1
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@deprecated(as_of="0.5.8", until="0.6.0",
reason="This will be removed or replaced, in favor of more general"
"-purpose performant aligners. Additional details at "
"https://github.com/biocore/scikit-bio/issues/1814")
def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
"""Align query and target sequences with Striped Smith-Waterman.
Parameters
----------
sequence1 : DNA, RNA, or Protein
The first unaligned sequence
sequence2 : DNA, RNA, or Protein
The second unaligned sequence
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
Notes
-----
This is a wrapper for the SSW package [1]_.
For a complete list of optional keyword-arguments that can be provided,
see ``skbio.alignment.StripedSmithWaterman``.
The following kwargs will not have any effect: `suppress_sequences`,
`zero_index`, and `protein`
If an alignment does not meet a provided filter, `None` will be returned.
References
----------
.. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
Applications". PLOS ONE (2013). Web. 11 July 2014.
http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
See Also
--------
skbio.alignment.StripedSmithWaterman
"""
for seq in sequence1, sequence2:
if not isinstance(seq, (DNA, RNA, Protein)):
raise TypeError(
"`sequence1` and `sequence2` must be DNA, RNA, or Protein, "
"not type %r" % type(seq).__name__)
if type(sequence1) is not type(sequence2):
raise TypeError(
"`sequence1` and `sequence2` must be the same type: %r != %r"
% (type(sequence1).__name__, type(sequence2).__name__))
# We need the sequences for `TabularMSA` to make sense, so don't let the
# user suppress them.
kwargs['suppress_sequences'] = False
kwargs['zero_index'] = True
kwargs['protein'] = False
if isinstance(sequence1, Protein):
kwargs['protein'] = True
query = StripedSmithWaterman(str(sequence1), **kwargs)
alignment = query(str(sequence2))
# If there is no cigar, then it has failed a filter. Return None.
if not alignment.cigar:
return None
start_end = None
if alignment.query_begin != -1:
start_end = [
(alignment.query_begin, alignment.query_end),
(alignment.target_begin, alignment.target_end_optimal)
]
metadata1 = metadata2 = None
if sequence1.has_metadata():
metadata1 = sequence1.metadata
if sequence2.has_metadata():
metadata2 = sequence2.metadata
constructor = type(sequence1)
msa = TabularMSA([
constructor(alignment.aligned_query_sequence, metadata=metadata1,
validate=False),
constructor(alignment.aligned_target_sequence, metadata=metadata2,
validate=False)
])
return msa, alignment.optimal_alignment_score, start_end
@deprecated(as_of="0.4.0", until="0.6.0",
reason="Will be replaced by a SubstitutionMatrix class. To track "
"progress, see [#161]"
"(https://github.com/biocore/scikit-bio/issues/161).")
def make_identity_substitution_matrix(match_score, mismatch_score,
alphabet='ACGTU'):
"""Generate substitution matrix where all matches are scored equally
Parameters
----------
match_score : int, float
The score that should be assigned for all matches. This value is
typically positive.
mismatch_score : int, float
The score that should be assigned for all mismatches. This value is
typically negative.
alphabet : iterable of str, optional
The characters that should be included in the substitution matrix.
Returns
-------
dict of dicts
All characters in alphabet are keys in both dictionaries, so that any
pair of characters can be looked up to get their match or mismatch
score.
"""
result = {}
for c1 in alphabet:
row = {}
for c2 in alphabet:
if c1 == c2:
row[c2] = match_score
else:
row[c2] = mismatch_score
result[c1] = row
return result
# Functions from here allow for generalized (global or local) alignment. I
# will likely want to put these in a single object to make the naming a little
# less clunky.
def _coerce_alignment_input_type(seq):
if isinstance(seq, GrammaredSequence):
return TabularMSA([seq])
else:
return seq
_traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
'uninitialized': -1, 'alignment-end': 0}
def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, :] = _traceback_encoding['alignment-end']
traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
return score_matrix, traceback_matrix
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _init_matrices_nw_no_terminal_gap_penalty(
aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, gap_chars):
substitution_score = 0
for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
if aln1_char in gap_chars or aln2_char in gap_chars:
substitution_score += gap_substitution_score
else:
try:
substitution_score += \
substitution_matrix[aln1_char][aln2_char]
except KeyError:
offending_chars = \
[c for c in (aln1_char, aln2_char)
if c not in substitution_matrix]
raise ValueError(
"One of the sequences contains a character that is "
"not contained in the substitution matrix. Are you "
"using an appropriate substitution matrix for your "
"sequence type (e.g., a nucleotide substitution "
"matrix does not make sense for aligning protein "
"sequences)? Does your sequence contain invalid "
"characters? The offending character(s) is: "
" %s." % ', '.join(offending_chars))
substitution_score /= (len(aln1_chars) * len(aln2_chars))
return substitution_score
def _compute_score_and_traceback_matrices(
aln1, aln2, gap_open_penalty, gap_extend_penalty, substitution_matrix,
new_alignment_score=-np.inf, init_matrices_f=_init_matrices_nw,
penalize_terminal_gaps=True, gap_substitution_score=0):
"""Return dynamic programming (score) and traceback matrices.
A note on the ``penalize_terminal_gaps`` parameter. When this value is
``False``, this function is no longer true Smith-Waterman/Needleman-Wunsch
scoring, but when ``True`` it can result in biologically irrelevant
artifacts in Needleman-Wunsch (global) alignments. Specifically, if one
sequence is longer than the other (e.g., if aligning a primer sequence to
an amplification product, or searching for a gene in a genome) the shorter
sequence will have a long gap inserted. The parameter is ``True`` by
default (so that this function computes the score and traceback matrices as
described by the original authors) but the global alignment wrappers pass
``False`` by default, so that the global alignment API returns the result
that users are most likely to be looking for.
"""
aln1_length = aln1.shape.position
aln2_length = aln2.shape.position
# cache some values for quicker/simpler access
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
new_alignment_score = (new_alignment_score, aend)
# Initialize a matrix to use for scoring the alignment and for tracing
# back the best alignment
score_matrix, traceback_matrix = init_matrices_f(
aln1, aln2, gap_open_penalty, gap_extend_penalty)
# Iterate over the characters in aln2 (which corresponds to the vertical
# sequence in the matrix)
for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(
ignore_metadata=True), 1):
aln2_chars = str(aln2_chars)
# Iterate over the characters in aln1 (which corresponds to the
# horizontal sequence in the matrix)
for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(
ignore_metadata=True), 1):
aln1_chars = str(aln1_chars)
# compute the score for a match/mismatch
substitution_score = _compute_substitution_score(
aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, aln1.dtype.gap_chars)
diag_score = \
(score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
match)
# compute the score for adding a gap in aln2 (vertical)
if not penalize_terminal_gaps and (aln1_pos == aln1_length):
# we've reached the end of aln1, so adding vertical gaps
# (which become gaps in aln1) should no longer
# be penalized (if penalize_terminal_gaps == False)
up_score = (score_matrix[aln2_pos-1, aln1_pos], vgap)
elif traceback_matrix[aln2_pos-1, aln1_pos] == vgap:
# gap extend, because the cell above was also a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_extend_penalty,
vgap)
else:
# gap open, because the cell above was not a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_open_penalty,
vgap)
# compute the score for adding a gap in aln1 (horizontal)
if not penalize_terminal_gaps and (aln2_pos == aln2_length):
# we've reached the end of aln2, so adding horizontal gaps
# (which become gaps in aln2) should no longer
# be penalized (if penalize_terminal_gaps == False)
left_score = (score_matrix[aln2_pos, aln1_pos-1], hgap)
elif traceback_matrix[aln2_pos, aln1_pos-1] == hgap:
# gap extend, because the cell to the left was also a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_extend_penalty,
hgap)
else:
# gap open, because the cell to the left was not a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_open_penalty,
hgap)
# identify the largest score, and use that information to populate
# the score and traceback matrices
best_score = _first_largest([new_alignment_score, left_score,
diag_score, up_score])
score_matrix[aln2_pos, aln1_pos] = best_score[0]
traceback_matrix[aln2_pos, aln1_pos] = best_score[1]
return score_matrix, traceback_matrix
def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
start_col):
# cache some values for simpler reference
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
gap_character = aln1.dtype.default_gap_char
# initialize the result alignments
aln1_sequence_count = aln1.shape.sequence
aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
aln2_sequence_count = aln2.shape.sequence
aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
current_row = start_row
current_col = start_col
best_score = score_matrix[current_row, current_col]
current_value = None
while current_value != aend:
current_value = traceback_matrix[current_row, current_col]
if current_value == match:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
current_col -= 1
elif current_value == vgap:
for aligned_seq in aligned_seqs1:
aligned_seq.append(gap_character)
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
elif current_value == hgap:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq in aligned_seqs2:
aligned_seq.append(gap_character)
current_col -= 1
elif current_value == aend:
continue
else:
raise ValueError(
"Invalid value in traceback matrix: %s" % current_value)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs1, aln1)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln1.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs1[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs2, aln2)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln2.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs2[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
def _first_largest(scores):
""" Similar to max, but returns the first element achieving the high score
If max receives a tuple, it will break a tie for the highest value
of entry[i] with entry[i+1]. We don't want that here - to better match
with the results of other tools, we want to be able to define which
entry is returned in the case of a tie.
"""
result = scores[0]
for score, direction in scores[1:]:
if score > result[0]:
result = (score, direction)
return result | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/alignment/_pairwise.py | _pairwise.py |
from warnings import warn
from itertools import product
import numpy as np
from skbio.alignment import TabularMSA
from skbio.alignment._ssw_wrapper import StripedSmithWaterman
from skbio.sequence import DNA, RNA, Protein
from skbio.sequence import GrammaredSequence
from skbio.util import EfficiencyWarning
from skbio.util._decorator import experimental, deprecated
# This is temporary: blosum50 does not exist in skbio yet as per
# issue 161. When the issue is resolved, this should be removed in favor
# of an import.
blosum50 = \
{
'*': {'*': 1, 'A': -5, 'C': -5, 'B': -5, 'E': -5, 'D': -5, 'G': -5,
'F': -5, 'I': -5, 'H': -5, 'K': -5, 'M': -5, 'L': -5,
'N': -5, 'Q': -5, 'P': -5, 'S': -5, 'R': -5, 'T': -5,
'W': -5, 'V': -5, 'Y': -5, 'X': -5, 'Z': -5},
'A': {'*': -5, 'A': 5, 'C': -1, 'B': -2, 'E': -1, 'D': -2, 'G': 0,
'F': -3, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -2,
'N': -1, 'Q': -1, 'P': -1, 'S': 1, 'R': -2, 'T': 0, 'W': -3,
'V': 0, 'Y': -2, 'X': -1, 'Z': -1},
'C': {'*': -5, 'A': -1, 'C': 13, 'B': -3, 'E': -3, 'D': -4,
'G': -3, 'F': -2, 'I': -2, 'H': -3, 'K': -3, 'M': -2,
'L': -2, 'N': -2, 'Q': -3, 'P': -4, 'S': -1, 'R': -4,
'T': -1, 'W': -5, 'V': -1, 'Y': -3, 'X': -1, 'Z': -3},
'B': {'*': -5, 'A': -2, 'C': -3, 'B': 6, 'E': 1, 'D': 6, 'G': -1,
'F': -4, 'I': -4, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 5,
'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -5, 'V': -3,
'Y': -3, 'X': -1, 'Z': 1},
'E': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 6, 'D': 2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': -1, 'R': 0, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5},
'D': {'*': -5, 'A': -2, 'C': -4, 'B': 6, 'E': 2, 'D': 8, 'G': -1,
'F': -5, 'I': -4, 'H': -1, 'K': -1, 'M': -4, 'L': -4, 'N': 2,
'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -5, 'V': -4,
'Y': -3, 'X': -1, 'Z': 1},
'G': {'*': -5, 'A': 0, 'C': -3, 'B': -1, 'E': -3, 'D': -1, 'G': 8,
'F': -4, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0,
'Q': -2, 'P': -2, 'S': 0, 'R': -3, 'T': -2, 'W': -3, 'V': -4,
'Y': -3, 'X': -1, 'Z': -2},
'F': {'*': -5, 'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -5,
'G': -4, 'F': 8, 'I': 0, 'H': -1, 'K': -4, 'M': 0, 'L': 1,
'N': -4, 'Q': -4, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 1,
'V': -1, 'Y': 4, 'X': -1, 'Z': -4},
'I': {'*': -5, 'A': -1, 'C': -2, 'B': -4, 'E': -4, 'D': -4,
'G': -4, 'F': 0, 'I': 5, 'H': -4, 'K': -3, 'M': 2, 'L': 2,
'N': -3, 'Q': -3, 'P': -3, 'S': -3, 'R': -4, 'T': -1,
'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'*': -5, 'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2,
'F': -1, 'I': -4, 'H': 10, 'K': 0, 'M': -1, 'L': -3, 'N': 1,
'Q': 1, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -3, 'V': -4,
'Y': 2, 'X': -1, 'Z': 0},
'K': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 6, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': 0, 'R': 3, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 1},
'M': {'*': -5, 'A': -1, 'C': -2, 'B': -3, 'E': -2, 'D': -4,
'G': -3, 'F': 0, 'I': 2, 'H': -1, 'K': -2, 'M': 7, 'L': 3,
'N': -2, 'Q': 0, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -1,
'V': 1, 'Y': 0, 'X': -1, 'Z': -1},
'L': {'*': -5, 'A': -2, 'C': -2, 'B': -4, 'E': -3, 'D': -4,
'G': -4, 'F': 1, 'I': 2, 'H': -3, 'K': -3, 'M': 3, 'L': 5,
'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -1,
'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'*': -5, 'A': -1, 'C': -2, 'B': 5, 'E': 0, 'D': 2, 'G': 0,
'F': -4, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -4, 'N': 7,
'Q': 0, 'P': -2, 'S': 1, 'R': -1, 'T': 0, 'W': -4, 'V': -3,
'Y': -2, 'X': -1, 'Z': 0},
'Q': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2,
'F': -4, 'I': -3, 'H': 1, 'K': 2, 'M': 0, 'L': -2, 'N': 0,
'Q': 7, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -1, 'V': -3,
'Y': -1, 'X': -1, 'Z': 4},
'P': {'*': -5, 'A': -1, 'C': -4, 'B': -2, 'E': -1, 'D': -1,
'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -3,
'L': -4, 'N': -2, 'Q': -1, 'P': 10, 'S': -1, 'R': -3,
'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': -1},
'S': {'*': -5, 'A': 1, 'C': -1, 'B': 0, 'E': -1, 'D': 0, 'G': 0,
'F': -3, 'I': -3, 'H': -1, 'K': 0, 'M': -2, 'L': -3, 'N': 1,
'Q': 0, 'P': -1, 'S': 5, 'R': -1, 'T': 2, 'W': -4, 'V': -2,
'Y': -2, 'X': -1, 'Z': 0},
'R': {'*': -5, 'A': -2, 'C': -4, 'B': -1, 'E': 0, 'D': -2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 3, 'M': -2, 'L': -3, 'N': -1,
'Q': 1, 'P': -3, 'S': -1, 'R': 7, 'T': -1, 'W': -3, 'V': -3,
'Y': -1, 'X': -1, 'Z': 0},
'T': {'*': -5, 'A': 0, 'C': -1, 'B': 0, 'E': -1, 'D': -1, 'G': -2,
'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0,
'Q': -1, 'P': -1, 'S': 2, 'R': -1, 'T': 5, 'W': -3, 'V': 0,
'Y': -2, 'X': -1, 'Z': -1},
'W': {'*': -5, 'A': -3, 'C': -5, 'B': -5, 'E': -3, 'D': -5,
'G': -3, 'F': 1, 'I': -3, 'H': -3, 'K': -3, 'M': -1, 'L': -2,
'N': -4, 'Q': -1, 'P': -4, 'S': -4, 'R': -3, 'T': -3,
'W': 15, 'V': -3, 'Y': 2, 'X': -1, 'Z': -2},
'V': {'*': -5, 'A': 0, 'C': -1, 'B': -3, 'E': -3, 'D': -4, 'G': -4,
'F': -1, 'I': 4, 'H': -4, 'K': -3, 'M': 1, 'L': 1, 'N': -3,
'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 5,
'Y': -1, 'X': -1, 'Z': -3},
'Y': {'*': -5, 'A': -2, 'C': -3, 'B': -3, 'E': -2, 'D': -3,
'G': -3, 'F': 4, 'I': -1, 'H': 2, 'K': -2, 'M': 0, 'L': -1,
'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -1, 'T': -2, 'W': 2,
'V': -1, 'Y': 8, 'X': -1, 'Z': -2},
'X': {'*': -5, 'A': -1, 'C': -1, 'B': -1, 'E': -1, 'D': -1,
'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1,
'L': -1, 'N': -1, 'Q': -1, 'P': -1, 'S': -1, 'R': -1,
'T': -1, 'W': -1, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 5, 'D': 1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0,
'Q': 4, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -2, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5}}
@experimental(as_of="0.4.0")
def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
"""Locally align exactly two nucleotide seqs with Smith-Waterman
Parameters
----------
seq1 : DNA or RNA
The first unaligned sequence.
seq2 : DNA or RNA
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be DNA or RNA, not type %r"
% type(seq).__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
"""Locally align exactly two protein seqs with Smith-Waterman
Parameters
----------
seq1 : Protein
The first unaligned sequence.
seq2 : Protein
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, Protein):
raise TypeError(
"`seq1` and `seq2` must be Protein, not type %r"
% type(seq).__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
"""Locally align exactly two seqs with Smith-Waterman
Parameters
----------
seq1 : GrammaredSequence
The first unaligned sequence.
seq2 : GrammaredSequence
The second unaligned sequence.
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm was originally described in [1]_. The scikit-bio
implementation was validated against the EMBOSS water web server [2]_.
References
----------
.. [1] Identification of common molecular subsequences.
Smith TF, Waterman MS.
J Mol Biol. 1981 Mar 25;147(1):195-7.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_water/
"""
warn("You're using skbio's python implementation of Smith-Waterman "
"alignment. This will be very slow (e.g., thousands of times slower) "
"than skbio.alignment.local_pairwise_align_ssw.",
EfficiencyWarning)
for seq in seq1, seq2:
if not isinstance(seq, GrammaredSequence):
raise TypeError(
"`seq1` and `seq2` must be %r subclasses, not type %r" %
(GrammaredSequence.__name__, type(seq).__name__))
if type(seq1) is not type(seq2):
raise TypeError(
"`seq1` and `seq2` must be the same type: %r != %r"
% (type(seq1).__name__, type(seq2).__name__))
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=0.0,
init_matrices_f=_init_matrices_sw)
end_row_position, end_col_position =\
np.unravel_index(np.argmax(score_matrix), score_matrix.shape)
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=1, mismatch_score=-2,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align nucleotide seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : DNA, RNA, or TabularMSA[DNA|RNA]
The first unaligned sequence(s).
seq2 : DNA, RNA, or TabularMSA[DNA|RNA]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be DNA, RNA, or TabularMSA, not type "
"%r" % type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype,
(DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align pair of protein seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : Protein or TabularMSA[Protein]
The first unaligned sequence(s).
seq2 : Protein or TabularMSA[Protein]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, (Protein, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be Protein or TabularMSA, not type %r"
% type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, penalize_terminal_gaps=False):
"""Globally align a pair of seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : GrammaredSequence or TabularMSA
The first unaligned sequence(s).
seq2 : GrammaredSequence or TabularMSA
The second unaligned sequence(s).
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm (in a slightly more basic form) was originally described
in [1]_. The scikit-bio implementation was validated against the
EMBOSS needle web server [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] A general method applicable to the search for similarities in
the amino acid sequence of two proteins.
Needleman SB, Wunsch CD.
J Mol Biol. 1970 Mar;48(3):443-53.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_needle/
"""
warn("You're using skbio's python implementation of Needleman-Wunsch "
"alignment. This is known to be very slow (e.g., thousands of times "
"slower than a native C implementation). We'll be adding a faster "
"version soon (see https://github.com/biocore/scikit-bio/issues/254 "
"to track progress on this).", EfficiencyWarning)
for seq in seq1, seq2:
# We don't need to check the case where `seq` is a `TabularMSA` with a
# dtype that isn't a subclass of `GrammaredSequence`, this is
# guaranteed by `TabularMSA`.
if not isinstance(seq, (GrammaredSequence, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be GrammaredSequence subclasses or "
"TabularMSA, not type %r" % type(seq).__name__)
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
if seq1.dtype is not seq2.dtype:
raise TypeError(
"`seq1` and `seq2` must have the same dtype: %r != %r"
% (seq1.dtype.__name__, seq2.dtype.__name__))
if penalize_terminal_gaps:
init_matrices_f = _init_matrices_nw
else:
init_matrices_f = _init_matrices_nw_no_terminal_gap_penalty
score_matrix, traceback_matrix = \
_compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=-np.inf,
init_matrices_f=init_matrices_f,
penalize_terminal_gaps=penalize_terminal_gaps)
end_row_position = traceback_matrix.shape[0] - 1
end_col_position = traceback_matrix.shape[1] - 1
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@deprecated(as_of="0.5.8", until="0.6.0",
reason="This will be removed or replaced, in favor of more general"
"-purpose performant aligners. Additional details at "
"https://github.com/biocore/scikit-bio/issues/1814")
def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
"""Align query and target sequences with Striped Smith-Waterman.
Parameters
----------
sequence1 : DNA, RNA, or Protein
The first unaligned sequence
sequence2 : DNA, RNA, or Protein
The second unaligned sequence
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
Notes
-----
This is a wrapper for the SSW package [1]_.
For a complete list of optional keyword-arguments that can be provided,
see ``skbio.alignment.StripedSmithWaterman``.
The following kwargs will not have any effect: `suppress_sequences`,
`zero_index`, and `protein`
If an alignment does not meet a provided filter, `None` will be returned.
References
----------
.. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
Applications". PLOS ONE (2013). Web. 11 July 2014.
http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
See Also
--------
skbio.alignment.StripedSmithWaterman
"""
for seq in sequence1, sequence2:
if not isinstance(seq, (DNA, RNA, Protein)):
raise TypeError(
"`sequence1` and `sequence2` must be DNA, RNA, or Protein, "
"not type %r" % type(seq).__name__)
if type(sequence1) is not type(sequence2):
raise TypeError(
"`sequence1` and `sequence2` must be the same type: %r != %r"
% (type(sequence1).__name__, type(sequence2).__name__))
# We need the sequences for `TabularMSA` to make sense, so don't let the
# user suppress them.
kwargs['suppress_sequences'] = False
kwargs['zero_index'] = True
kwargs['protein'] = False
if isinstance(sequence1, Protein):
kwargs['protein'] = True
query = StripedSmithWaterman(str(sequence1), **kwargs)
alignment = query(str(sequence2))
# If there is no cigar, then it has failed a filter. Return None.
if not alignment.cigar:
return None
start_end = None
if alignment.query_begin != -1:
start_end = [
(alignment.query_begin, alignment.query_end),
(alignment.target_begin, alignment.target_end_optimal)
]
metadata1 = metadata2 = None
if sequence1.has_metadata():
metadata1 = sequence1.metadata
if sequence2.has_metadata():
metadata2 = sequence2.metadata
constructor = type(sequence1)
msa = TabularMSA([
constructor(alignment.aligned_query_sequence, metadata=metadata1,
validate=False),
constructor(alignment.aligned_target_sequence, metadata=metadata2,
validate=False)
])
return msa, alignment.optimal_alignment_score, start_end
@deprecated(as_of="0.4.0", until="0.6.0",
reason="Will be replaced by a SubstitutionMatrix class. To track "
"progress, see [#161]"
"(https://github.com/biocore/scikit-bio/issues/161).")
def make_identity_substitution_matrix(match_score, mismatch_score,
alphabet='ACGTU'):
"""Generate substitution matrix where all matches are scored equally
Parameters
----------
match_score : int, float
The score that should be assigned for all matches. This value is
typically positive.
mismatch_score : int, float
The score that should be assigned for all mismatches. This value is
typically negative.
alphabet : iterable of str, optional
The characters that should be included in the substitution matrix.
Returns
-------
dict of dicts
All characters in alphabet are keys in both dictionaries, so that any
pair of characters can be looked up to get their match or mismatch
score.
"""
result = {}
for c1 in alphabet:
row = {}
for c2 in alphabet:
if c1 == c2:
row[c2] = match_score
else:
row[c2] = mismatch_score
result[c1] = row
return result
# Functions from here allow for generalized (global or local) alignment. I
# will likely want to put these in a single object to make the naming a little
# less clunky.
def _coerce_alignment_input_type(seq):
if isinstance(seq, GrammaredSequence):
return TabularMSA([seq])
else:
return seq
_traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
'uninitialized': -1, 'alignment-end': 0}
def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, :] = _traceback_encoding['alignment-end']
traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
return score_matrix, traceback_matrix
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _init_matrices_nw_no_terminal_gap_penalty(
aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, gap_chars):
substitution_score = 0
for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
if aln1_char in gap_chars or aln2_char in gap_chars:
substitution_score += gap_substitution_score
else:
try:
substitution_score += \
substitution_matrix[aln1_char][aln2_char]
except KeyError:
offending_chars = \
[c for c in (aln1_char, aln2_char)
if c not in substitution_matrix]
raise ValueError(
"One of the sequences contains a character that is "
"not contained in the substitution matrix. Are you "
"using an appropriate substitution matrix for your "
"sequence type (e.g., a nucleotide substitution "
"matrix does not make sense for aligning protein "
"sequences)? Does your sequence contain invalid "
"characters? The offending character(s) is: "
" %s." % ', '.join(offending_chars))
substitution_score /= (len(aln1_chars) * len(aln2_chars))
return substitution_score
def _compute_score_and_traceback_matrices(
aln1, aln2, gap_open_penalty, gap_extend_penalty, substitution_matrix,
new_alignment_score=-np.inf, init_matrices_f=_init_matrices_nw,
penalize_terminal_gaps=True, gap_substitution_score=0):
"""Return dynamic programming (score) and traceback matrices.
A note on the ``penalize_terminal_gaps`` parameter. When this value is
``False``, this function is no longer true Smith-Waterman/Needleman-Wunsch
scoring, but when ``True`` it can result in biologically irrelevant
artifacts in Needleman-Wunsch (global) alignments. Specifically, if one
sequence is longer than the other (e.g., if aligning a primer sequence to
an amplification product, or searching for a gene in a genome) the shorter
sequence will have a long gap inserted. The parameter is ``True`` by
default (so that this function computes the score and traceback matrices as
described by the original authors) but the global alignment wrappers pass
``False`` by default, so that the global alignment API returns the result
that users are most likely to be looking for.
"""
aln1_length = aln1.shape.position
aln2_length = aln2.shape.position
# cache some values for quicker/simpler access
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
new_alignment_score = (new_alignment_score, aend)
# Initialize a matrix to use for scoring the alignment and for tracing
# back the best alignment
score_matrix, traceback_matrix = init_matrices_f(
aln1, aln2, gap_open_penalty, gap_extend_penalty)
# Iterate over the characters in aln2 (which corresponds to the vertical
# sequence in the matrix)
for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(
ignore_metadata=True), 1):
aln2_chars = str(aln2_chars)
# Iterate over the characters in aln1 (which corresponds to the
# horizontal sequence in the matrix)
for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(
ignore_metadata=True), 1):
aln1_chars = str(aln1_chars)
# compute the score for a match/mismatch
substitution_score = _compute_substitution_score(
aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, aln1.dtype.gap_chars)
diag_score = \
(score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
match)
# compute the score for adding a gap in aln2 (vertical)
if not penalize_terminal_gaps and (aln1_pos == aln1_length):
# we've reached the end of aln1, so adding vertical gaps
# (which become gaps in aln1) should no longer
# be penalized (if penalize_terminal_gaps == False)
up_score = (score_matrix[aln2_pos-1, aln1_pos], vgap)
elif traceback_matrix[aln2_pos-1, aln1_pos] == vgap:
# gap extend, because the cell above was also a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_extend_penalty,
vgap)
else:
# gap open, because the cell above was not a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_open_penalty,
vgap)
# compute the score for adding a gap in aln1 (horizontal)
if not penalize_terminal_gaps and (aln2_pos == aln2_length):
# we've reached the end of aln2, so adding horizontal gaps
# (which become gaps in aln2) should no longer
# be penalized (if penalize_terminal_gaps == False)
left_score = (score_matrix[aln2_pos, aln1_pos-1], hgap)
elif traceback_matrix[aln2_pos, aln1_pos-1] == hgap:
# gap extend, because the cell to the left was also a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_extend_penalty,
hgap)
else:
# gap open, because the cell to the left was not a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_open_penalty,
hgap)
# identify the largest score, and use that information to populate
# the score and traceback matrices
best_score = _first_largest([new_alignment_score, left_score,
diag_score, up_score])
score_matrix[aln2_pos, aln1_pos] = best_score[0]
traceback_matrix[aln2_pos, aln1_pos] = best_score[1]
return score_matrix, traceback_matrix
def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
start_col):
# cache some values for simpler reference
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
gap_character = aln1.dtype.default_gap_char
# initialize the result alignments
aln1_sequence_count = aln1.shape.sequence
aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
aln2_sequence_count = aln2.shape.sequence
aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
current_row = start_row
current_col = start_col
best_score = score_matrix[current_row, current_col]
current_value = None
while current_value != aend:
current_value = traceback_matrix[current_row, current_col]
if current_value == match:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
current_col -= 1
elif current_value == vgap:
for aligned_seq in aligned_seqs1:
aligned_seq.append(gap_character)
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
elif current_value == hgap:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq in aligned_seqs2:
aligned_seq.append(gap_character)
current_col -= 1
elif current_value == aend:
continue
else:
raise ValueError(
"Invalid value in traceback matrix: %s" % current_value)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs1, aln1)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln1.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs1[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
for i, (aligned_seq, original) in enumerate(zip(aligned_seqs2, aln2)):
aligned_seq = ''.join(aligned_seq)[::-1]
constructor = aln2.dtype
metadata = None
if original.has_metadata():
metadata = original.metadata
aligned_seqs2[i] = constructor(aligned_seq, metadata=metadata,
validate=False)
return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
def _first_largest(scores):
""" Similar to max, but returns the first element achieving the high score
If max receives a tuple, it will break a tie for the highest value
of entry[i] with entry[i+1]. We don't want that here - to better match
with the results of other tools, we want to be able to define which
entry is returned in the case of a tie.
"""
result = scores[0]
for score, direction in scores[1:]:
if score > result[0]:
result = (score, direction)
return result | 0.466846 | 0.369002 |
import math
from skbio.util._misc import chunk_str
from skbio.metadata._repr import _MetadataReprBuilder
class _SequenceReprBuilder(_MetadataReprBuilder):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
super(_SequenceReprBuilder, self).__init__(seq, width, indent)
self._chunk_size = chunk_size
def _process_header(self):
cls_name = self._obj.__class__.__name__
self._lines.add_line(cls_name)
self._lines.add_separator()
def _process_data(self):
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
self._lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
self._lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
self._lines.add_line('...')
self._lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._obj) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._obj[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_repr.py | _repr.py |
import math
from skbio.util._misc import chunk_str
from skbio.metadata._repr import _MetadataReprBuilder
class _SequenceReprBuilder(_MetadataReprBuilder):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
super(_SequenceReprBuilder, self).__init__(seq, width, indent)
self._chunk_size = chunk_size
def _process_header(self):
cls_name = self._obj.__class__.__name__
self._lines.add_line(cls_name)
self._lines.add_separator()
def _process_data(self):
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
self._lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
self._lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
self._lines.add_line('...')
self._lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._obj) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._obj[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines | 0.908344 | 0.411998 |
from abc import ABCMeta, abstractproperty
from itertools import product
import re
import numpy as np
from skbio.util._decorator import (classproperty, overrides, stable,
deprecated, experimental)
from skbio.util._misc import MiniRegistry
from ._sequence import Sequence
class GrammaredSequenceMeta(ABCMeta, type):
def __new__(mcs, name, bases, dct):
cls = super(GrammaredSequenceMeta, mcs).__new__(mcs, name, bases, dct)
concrete_gap_chars = \
type(cls.gap_chars) is not abstractproperty
concrete_degenerate_map = \
type(cls.degenerate_map) is not abstractproperty
concrete_definite_chars = \
type(cls.definite_chars) is not abstractproperty
concrete_default_gap_char = \
type(cls.default_gap_char) is not abstractproperty
# degenerate_chars is not abstract but it depends on degenerate_map
# which is abstract.
concrete_degenerate_chars = concrete_degenerate_map
# Only perform metaclass checks if none of the attributes on the class
# are abstract.
# TODO: Rather than hard-coding a list of attributes to check, we can
# probably check all the attributes on the class and make sure none of
# them are abstract.
if (concrete_gap_chars and concrete_degenerate_map and
concrete_definite_chars and concrete_default_gap_char and
concrete_degenerate_chars):
if cls.default_gap_char not in cls.gap_chars:
raise TypeError(
"default_gap_char must be in gap_chars for class %s" %
name)
if len(cls.gap_chars & cls.degenerate_chars) > 0:
raise TypeError(
"gap_chars and degenerate_chars must not share any "
"characters for class %s" % name)
for key in cls.degenerate_map.keys():
for definite_char in cls.degenerate_map[key]:
if definite_char not in cls.definite_chars:
raise TypeError(
"degenerate_map must expand only to "
"characters included in definite_chars "
"for class %s" % name)
if len(cls.degenerate_chars & cls.definite_chars) > 0:
raise TypeError(
"degenerate_chars and definite_chars must not "
"share any characters for class %s" % name)
if len(cls.gap_chars & cls.definite_chars) > 0:
raise TypeError(
"gap_chars and definite_chars must not share any "
"characters for class %s" % name)
return cls
# Adapted from http://stackoverflow.com/a/16056691/943814
# Note that inheriting from GrammaredSequenceMeta, rather than something
# more general, is intentional. Multiple inheritance with metaclasses can be
# tricky and is not handled automatically in Python. Since this class needs to
# inherit both from ABCMeta and GrammaredSequenceMeta, the only way we could
# find to make this work was to have GrammaredSequenceMeta inherit from ABCMeta
# and then inherit from GrammaredSequenceMeta here.
class DisableSubclassingMeta(GrammaredSequenceMeta):
def __new__(mcs, name, bases, dct):
for b in bases:
if isinstance(b, DisableSubclassingMeta):
raise TypeError("Subclassing disabled for class %s. To create"
" a custom sequence class, inherit directly"
" from skbio.sequence.%s" %
(b.__name__, GrammaredSequence.__name__))
return super(DisableSubclassingMeta, mcs).__new__(mcs, name, bases,
dict(dct))
class GrammaredSequence(Sequence, metaclass=GrammaredSequenceMeta):
"""Store sequence data conforming to a character set.
This is an abstract base class (ABC) that cannot be instantiated.
This class is intended to be inherited from to create grammared sequences
with custom alphabets.
Raises
------
ValueError
If sequence characters are not in the character set [1]_.
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
Note in the example below that properties either need to be static or
use skbio's `classproperty` decorator.
>>> from skbio.sequence import GrammaredSequence
>>> from skbio.util import classproperty
>>> class CustomSequence(GrammaredSequence):
... @classproperty
... def degenerate_map(cls):
... return {"X": set("AB")}
...
... @classproperty
... def definite_chars(cls):
... return set("ABC")
...
...
... @classproperty
... def default_gap_char(cls):
... return '-'
...
... @classproperty
... def gap_chars(cls):
... return set('-.')
>>> seq = CustomSequence('ABABACAC')
>>> seq
CustomSequence
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
--------------------------
0 ABABACAC
>>> seq = CustomSequence('XXXXXX')
>>> seq
CustomSequence
-------------------------
Stats:
length: 6
has gaps: False
has degenerates: True
has definites: False
-------------------------
0 XXXXXX
"""
__validation_mask = None
__degenerate_codes = None
__definite_char_codes = None
__gap_codes = None
@classproperty
def _validation_mask(cls):
# TODO These masks could be defined (as literals) on each concrete
# object. For now, memoize!
if cls.__validation_mask is None:
as_bytes = ''.join(cls.alphabet).encode('ascii')
cls.__validation_mask = np.invert(np.bincount(
np.frombuffer(as_bytes, dtype=np.uint8),
minlength=cls._number_of_extended_ascii_codes).astype(bool))
return cls.__validation_mask
@classproperty
def _degenerate_codes(cls):
if cls.__degenerate_codes is None:
degens = cls.degenerate_chars
cls.__degenerate_codes = np.asarray([ord(d) for d in degens])
return cls.__degenerate_codes
@classproperty
def _definite_char_codes(cls):
if cls.__definite_char_codes is None:
definite_chars = cls.definite_chars
cls.__definite_char_codes = np.asarray(
[ord(d) for d in definite_chars])
return cls.__definite_char_codes
@classproperty
def _gap_codes(cls):
if cls.__gap_codes is None:
gaps = cls.gap_chars
cls.__gap_codes = np.asarray([ord(g) for g in gaps])
return cls.__gap_codes
@classproperty
@stable(as_of='0.4.0')
def alphabet(cls):
"""Return valid characters.
This includes gap, definite, and degenerate characters.
Returns
-------
set
Valid characters.
"""
return cls.degenerate_chars | cls.definite_chars | cls.gap_chars
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def gap_chars(cls):
"""Return characters defined as gaps.
Returns
-------
set
Characters defined as gaps.
"""
raise NotImplementedError
@abstractproperty
@classproperty
@experimental(as_of='0.4.1')
def default_gap_char(cls):
"""Gap character to use when constructing a new gapped sequence.
This character is used when it is necessary to represent gap characters
in a new sequence. For example, a majority consensus sequence will use
this character to represent gaps.
Returns
-------
str
Default gap character.
"""
raise NotImplementedError
@classproperty
@stable(as_of='0.4.0')
def degenerate_chars(cls):
"""Return degenerate characters.
Returns
-------
set
Degenerate characters.
"""
return set(cls.degenerate_map)
@classproperty
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to definite_chars')
def nondegenerate_chars(cls):
"""Return non-degenerate characters.
Returns
-------
set
Non-degenerate characters.
"""
return cls.definite_chars
@abstractproperty
@classproperty
@stable(as_of='0.5.0')
def definite_chars(cls):
"""Return definite characters.
Returns
-------
set
Definite characters.
"""
raise NotImplementedError
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def degenerate_map(cls):
"""Return mapping of degenerate to definite characters.
Returns
-------
dict (set)
Mapping of each degenerate character to the set of
definite characters it represents.
"""
raise NotImplementedError
@property
def _motifs(self):
return _motifs
@overrides(Sequence)
def __init__(self, sequence, metadata=None, positional_metadata=None,
interval_metadata=None, lowercase=False, validate=True):
super(GrammaredSequence, self).__init__(
sequence, metadata, positional_metadata,
interval_metadata, lowercase)
if validate:
self._validate()
def _validate(self):
# This is the fastest way that we have found to identify the
# presence or absence of certain characters (numbers).
# It works by multiplying a mask where the numbers which are
# permitted have a zero at their index, and all others have a one.
# The result is a vector which will propogate counts of invalid
# numbers and remove counts of valid numbers, so that we need only
# see if the array is empty to determine validity.
invalid_characters = np.bincount(
self._bytes, minlength=self._number_of_extended_ascii_codes
) * self._validation_mask
if np.any(invalid_characters):
bad = list(np.where(
invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
raise ValueError(
"Invalid character%s in sequence: %r. \n"
"Valid characters: %r\n"
"Note: Use `lowercase` if your sequence contains lowercase "
"characters not in the sequence's alphabet."
% ('s' if len(bad) > 1 else '',
[str(b.tobytes().decode("ascii")) for b in bad] if
len(bad) > 1 else bad[0],
list(self.alphabet)))
@stable(as_of='0.4.0')
def gaps(self):
"""Find positions containing gaps in the biological sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a gap character is present
at that position in the biological sequence.
See Also
--------
has_gaps
Examples
--------
>>> from skbio import DNA
>>> s = DNA('AC-G-')
>>> s.gaps()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._gap_codes)
@stable(as_of='0.4.0')
def has_gaps(self):
"""Determine if the sequence contains one or more gap characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of gap
characters in the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_gaps()
False
>>> t = DNA('A.CAC--GACGTT')
>>> t.has_gaps()
True
"""
# TODO use count, there aren't that many gap chars
# TODO: cache results
return bool(self.gaps().any())
@stable(as_of='0.4.0')
def degenerates(self):
"""Find positions containing degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a degenerate character is
present at that position in the biological sequence.
See Also
--------
has_degenerates
definites
has_definites
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.degenerates()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._degenerate_codes)
@stable(as_of='0.4.0')
def has_degenerates(self):
"""Determine if sequence contains one or more degenerate characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of degenerate
characters in the biological sequence.
See Also
--------
degenerates
definites
has_definites
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACAC-GACGTT')
>>> s.has_degenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_degenerates()
True
"""
# TODO use bincount!
# TODO: cache results
return bool(self.degenerates().any())
@stable(as_of='0.5.0')
def definites(self):
"""Find positions containing definite characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a definite character
is present at that position in the biological sequence.
See Also
--------
has_definites
degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.definites()
array([ True, True, False, True, False], dtype=bool)
"""
return np.in1d(self._bytes, self._definite_char_codes)
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to definites')
def nondegenerates(self):
"""Find positions containing non-degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a non-degenerate character
is present at that position in the biological sequence.
See Also
--------
has_definites
degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.nondegenerates()
array([ True, True, False, True, False], dtype=bool)
"""
return self.definites()
@stable(as_of='0.5.0')
def has_definites(self):
"""Determine if sequence contains one or more definite characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
definite characters in the biological sequence.
See Also
--------
definites
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_definites()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_definites()
True
"""
# TODO: cache results
return bool(self.definites().any())
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to has_definites')
def has_nondegenerates(self):
"""Determine if sequence contains one or more non-degenerate characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
non-degenerate characters in the biological sequence.
See Also
--------
definites
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_nondegenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_nondegenerates()
True
"""
# TODO: cache results
return self.has_definites()
@stable(as_of='0.4.0')
def degap(self):
"""Return a new sequence with gap characters removed.
Returns
-------
GrammaredSequence
A new sequence with all gap characters removed.
See Also
--------
gap_chars
Notes
-----
The type and metadata of the result will be the same as the
biological sequence. If positional metadata is present, it will be
filtered in the same manner as the sequence characters and included in
the resulting degapped sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('GGTC-C--ATT-C.',
... positional_metadata={'quality':range(14)})
>>> s.degap()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 9
has gaps: False
has degenerates: False
has definites: True
GC-content: 55.56%
-----------------------------
0 GGTCCATTC
"""
return self[np.invert(self.gaps())]
@stable(as_of='0.4.0')
def expand_degenerates(self):
"""Yield all possible definite versions of the sequence.
Yields
------
GrammaredSequence
Definite version of the sequence.
See Also
--------
degenerate_map
Notes
-----
There is no guaranteed ordering to the definite sequences that are
yielded.
Each definite sequence will have the same type, metadata, and
positional metadata as the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> seq_generator = seq.expand_degenerates()
>>> for s in sorted(seq_generator, key=str):
... s
... print('')
DNA
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
GC-content: 33.33%
--------------------------
0 TAG
<BLANKLINE>
DNA
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
GC-content: 66.67%
--------------------------
0 TGG
<BLANKLINE>
"""
degen_chars = self.degenerate_map
nonexpansion_chars = self.definite_chars.union(self.gap_chars)
expansions = []
for char in self:
char = str(char)
if char in nonexpansion_chars:
expansions.append(char)
else:
expansions.append(degen_chars[char])
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
for definite_seq in product(*expansions):
yield self._constructor(
sequence=''.join(definite_seq),
metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=self.interval_metadata)
@stable(as_of='0.4.1')
def to_regex(self, within_capture=False):
"""Return regular expression object that accounts for degenerate chars.
Parameters
----------
within_capture : bool
If ``True``, format the regex pattern for the sequence into a
single capture group. If ``False``, compile the regex pattern as-is
with no capture groups.
Returns
-------
regex
Pre-compiled regular expression object (as from ``re.compile``)
that matches all definite versions of this sequence, and nothing
else.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> regex = seq.to_regex()
>>> regex.match('TAG').string
'TAG'
>>> regex.match('TGG').string
'TGG'
>>> regex.match('TCG') is None
True
>>> regex = seq.to_regex(within_capture=True)
>>> regex.match('TAG').groups(0)
('TAG',)
"""
regex_parts = []
for base in str(self):
if base in self.degenerate_chars:
regex_parts.append('[{0}]'.format(
''.join(self.degenerate_map[base])))
else:
regex_parts.append(base)
regex_string = ''.join(regex_parts)
if within_capture:
regex_string = '({})'.format(regex_string)
return re.compile(regex_string)
@stable(as_of='0.4.0')
def find_motifs(self, motif_type, min_length=1, ignore=None):
"""Search the biological sequence for motifs.
Options for `motif_type`:
Parameters
----------
motif_type : str
Type of motif to find.
min_length : int, optional
Only motifs at least as long as `min_length` will be returned.
ignore : 1D array_like (bool), optional
Boolean vector indicating positions to ignore when matching.
Yields
------
slice
Location of the motif in the biological sequence.
Raises
------
ValueError
If an unknown `motif_type` is specified.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACGGGGAGGCGGAG')
>>> for motif_slice in s.find_motifs('purine-run', min_length=2):
... motif_slice
... str(s[motif_slice])
slice(2, 9, None)
'GGGGAGG'
slice(10, 14, None)
'GGAG'
Gap characters can disrupt motifs:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run'):
... motif_slice
slice(0, 2, None)
slice(3, 5, None)
Gaps can be ignored by passing the gap boolean vector to `ignore`:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):
... motif_slice
slice(0, 5, None)
"""
if motif_type not in self._motifs:
raise ValueError("Not a known motif (%r) for this sequence (%s)." %
(motif_type, self.__class__.__name__))
return self._motifs[motif_type](self, min_length, ignore)
@overrides(Sequence)
def _constructor(self, **kwargs):
return self.__class__(validate=False, lowercase=False, **kwargs)
@overrides(Sequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(GrammaredSequence, self)._repr_stats()
stats.append(('has gaps', '%r' % self.has_gaps()))
stats.append(('has degenerates', '%r' % self.has_degenerates()))
stats.append(('has definites', '%r' % self.has_definites()))
return stats
_motifs = MiniRegistry()
# Leave this at the bottom
_motifs.interpolate(GrammaredSequence, "find_motifs") | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_grammared_sequence.py | _grammared_sequence.py |
from abc import ABCMeta, abstractproperty
from itertools import product
import re
import numpy as np
from skbio.util._decorator import (classproperty, overrides, stable,
deprecated, experimental)
from skbio.util._misc import MiniRegistry
from ._sequence import Sequence
class GrammaredSequenceMeta(ABCMeta, type):
def __new__(mcs, name, bases, dct):
cls = super(GrammaredSequenceMeta, mcs).__new__(mcs, name, bases, dct)
concrete_gap_chars = \
type(cls.gap_chars) is not abstractproperty
concrete_degenerate_map = \
type(cls.degenerate_map) is not abstractproperty
concrete_definite_chars = \
type(cls.definite_chars) is not abstractproperty
concrete_default_gap_char = \
type(cls.default_gap_char) is not abstractproperty
# degenerate_chars is not abstract but it depends on degenerate_map
# which is abstract.
concrete_degenerate_chars = concrete_degenerate_map
# Only perform metaclass checks if none of the attributes on the class
# are abstract.
# TODO: Rather than hard-coding a list of attributes to check, we can
# probably check all the attributes on the class and make sure none of
# them are abstract.
if (concrete_gap_chars and concrete_degenerate_map and
concrete_definite_chars and concrete_default_gap_char and
concrete_degenerate_chars):
if cls.default_gap_char not in cls.gap_chars:
raise TypeError(
"default_gap_char must be in gap_chars for class %s" %
name)
if len(cls.gap_chars & cls.degenerate_chars) > 0:
raise TypeError(
"gap_chars and degenerate_chars must not share any "
"characters for class %s" % name)
for key in cls.degenerate_map.keys():
for definite_char in cls.degenerate_map[key]:
if definite_char not in cls.definite_chars:
raise TypeError(
"degenerate_map must expand only to "
"characters included in definite_chars "
"for class %s" % name)
if len(cls.degenerate_chars & cls.definite_chars) > 0:
raise TypeError(
"degenerate_chars and definite_chars must not "
"share any characters for class %s" % name)
if len(cls.gap_chars & cls.definite_chars) > 0:
raise TypeError(
"gap_chars and definite_chars must not share any "
"characters for class %s" % name)
return cls
# Adapted from http://stackoverflow.com/a/16056691/943814
# Note that inheriting from GrammaredSequenceMeta, rather than something
# more general, is intentional. Multiple inheritance with metaclasses can be
# tricky and is not handled automatically in Python. Since this class needs to
# inherit both from ABCMeta and GrammaredSequenceMeta, the only way we could
# find to make this work was to have GrammaredSequenceMeta inherit from ABCMeta
# and then inherit from GrammaredSequenceMeta here.
class DisableSubclassingMeta(GrammaredSequenceMeta):
def __new__(mcs, name, bases, dct):
for b in bases:
if isinstance(b, DisableSubclassingMeta):
raise TypeError("Subclassing disabled for class %s. To create"
" a custom sequence class, inherit directly"
" from skbio.sequence.%s" %
(b.__name__, GrammaredSequence.__name__))
return super(DisableSubclassingMeta, mcs).__new__(mcs, name, bases,
dict(dct))
class GrammaredSequence(Sequence, metaclass=GrammaredSequenceMeta):
"""Store sequence data conforming to a character set.
This is an abstract base class (ABC) that cannot be instantiated.
This class is intended to be inherited from to create grammared sequences
with custom alphabets.
Raises
------
ValueError
If sequence characters are not in the character set [1]_.
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
Note in the example below that properties either need to be static or
use skbio's `classproperty` decorator.
>>> from skbio.sequence import GrammaredSequence
>>> from skbio.util import classproperty
>>> class CustomSequence(GrammaredSequence):
... @classproperty
... def degenerate_map(cls):
... return {"X": set("AB")}
...
... @classproperty
... def definite_chars(cls):
... return set("ABC")
...
...
... @classproperty
... def default_gap_char(cls):
... return '-'
...
... @classproperty
... def gap_chars(cls):
... return set('-.')
>>> seq = CustomSequence('ABABACAC')
>>> seq
CustomSequence
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
--------------------------
0 ABABACAC
>>> seq = CustomSequence('XXXXXX')
>>> seq
CustomSequence
-------------------------
Stats:
length: 6
has gaps: False
has degenerates: True
has definites: False
-------------------------
0 XXXXXX
"""
__validation_mask = None
__degenerate_codes = None
__definite_char_codes = None
__gap_codes = None
@classproperty
def _validation_mask(cls):
# TODO These masks could be defined (as literals) on each concrete
# object. For now, memoize!
if cls.__validation_mask is None:
as_bytes = ''.join(cls.alphabet).encode('ascii')
cls.__validation_mask = np.invert(np.bincount(
np.frombuffer(as_bytes, dtype=np.uint8),
minlength=cls._number_of_extended_ascii_codes).astype(bool))
return cls.__validation_mask
@classproperty
def _degenerate_codes(cls):
if cls.__degenerate_codes is None:
degens = cls.degenerate_chars
cls.__degenerate_codes = np.asarray([ord(d) for d in degens])
return cls.__degenerate_codes
@classproperty
def _definite_char_codes(cls):
if cls.__definite_char_codes is None:
definite_chars = cls.definite_chars
cls.__definite_char_codes = np.asarray(
[ord(d) for d in definite_chars])
return cls.__definite_char_codes
@classproperty
def _gap_codes(cls):
if cls.__gap_codes is None:
gaps = cls.gap_chars
cls.__gap_codes = np.asarray([ord(g) for g in gaps])
return cls.__gap_codes
@classproperty
@stable(as_of='0.4.0')
def alphabet(cls):
"""Return valid characters.
This includes gap, definite, and degenerate characters.
Returns
-------
set
Valid characters.
"""
return cls.degenerate_chars | cls.definite_chars | cls.gap_chars
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def gap_chars(cls):
"""Return characters defined as gaps.
Returns
-------
set
Characters defined as gaps.
"""
raise NotImplementedError
@abstractproperty
@classproperty
@experimental(as_of='0.4.1')
def default_gap_char(cls):
"""Gap character to use when constructing a new gapped sequence.
This character is used when it is necessary to represent gap characters
in a new sequence. For example, a majority consensus sequence will use
this character to represent gaps.
Returns
-------
str
Default gap character.
"""
raise NotImplementedError
@classproperty
@stable(as_of='0.4.0')
def degenerate_chars(cls):
"""Return degenerate characters.
Returns
-------
set
Degenerate characters.
"""
return set(cls.degenerate_map)
@classproperty
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to definite_chars')
def nondegenerate_chars(cls):
"""Return non-degenerate characters.
Returns
-------
set
Non-degenerate characters.
"""
return cls.definite_chars
@abstractproperty
@classproperty
@stable(as_of='0.5.0')
def definite_chars(cls):
"""Return definite characters.
Returns
-------
set
Definite characters.
"""
raise NotImplementedError
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def degenerate_map(cls):
"""Return mapping of degenerate to definite characters.
Returns
-------
dict (set)
Mapping of each degenerate character to the set of
definite characters it represents.
"""
raise NotImplementedError
@property
def _motifs(self):
return _motifs
@overrides(Sequence)
def __init__(self, sequence, metadata=None, positional_metadata=None,
interval_metadata=None, lowercase=False, validate=True):
super(GrammaredSequence, self).__init__(
sequence, metadata, positional_metadata,
interval_metadata, lowercase)
if validate:
self._validate()
def _validate(self):
# This is the fastest way that we have found to identify the
# presence or absence of certain characters (numbers).
# It works by multiplying a mask where the numbers which are
# permitted have a zero at their index, and all others have a one.
# The result is a vector which will propogate counts of invalid
# numbers and remove counts of valid numbers, so that we need only
# see if the array is empty to determine validity.
invalid_characters = np.bincount(
self._bytes, minlength=self._number_of_extended_ascii_codes
) * self._validation_mask
if np.any(invalid_characters):
bad = list(np.where(
invalid_characters > 0)[0].astype(np.uint8).view('|S1'))
raise ValueError(
"Invalid character%s in sequence: %r. \n"
"Valid characters: %r\n"
"Note: Use `lowercase` if your sequence contains lowercase "
"characters not in the sequence's alphabet."
% ('s' if len(bad) > 1 else '',
[str(b.tobytes().decode("ascii")) for b in bad] if
len(bad) > 1 else bad[0],
list(self.alphabet)))
@stable(as_of='0.4.0')
def gaps(self):
"""Find positions containing gaps in the biological sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a gap character is present
at that position in the biological sequence.
See Also
--------
has_gaps
Examples
--------
>>> from skbio import DNA
>>> s = DNA('AC-G-')
>>> s.gaps()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._gap_codes)
@stable(as_of='0.4.0')
def has_gaps(self):
"""Determine if the sequence contains one or more gap characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of gap
characters in the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_gaps()
False
>>> t = DNA('A.CAC--GACGTT')
>>> t.has_gaps()
True
"""
# TODO use count, there aren't that many gap chars
# TODO: cache results
return bool(self.gaps().any())
@stable(as_of='0.4.0')
def degenerates(self):
"""Find positions containing degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a degenerate character is
present at that position in the biological sequence.
See Also
--------
has_degenerates
definites
has_definites
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.degenerates()
array([False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._degenerate_codes)
@stable(as_of='0.4.0')
def has_degenerates(self):
"""Determine if sequence contains one or more degenerate characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of degenerate
characters in the biological sequence.
See Also
--------
degenerates
definites
has_definites
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACAC-GACGTT')
>>> s.has_degenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_degenerates()
True
"""
# TODO use bincount!
# TODO: cache results
return bool(self.degenerates().any())
@stable(as_of='0.5.0')
def definites(self):
"""Find positions containing definite characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a definite character
is present at that position in the biological sequence.
See Also
--------
has_definites
degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.definites()
array([ True, True, False, True, False], dtype=bool)
"""
return np.in1d(self._bytes, self._definite_char_codes)
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to definites')
def nondegenerates(self):
"""Find positions containing non-degenerate characters in the sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a non-degenerate character
is present at that position in the biological sequence.
See Also
--------
has_definites
degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACWGN')
>>> s.nondegenerates()
array([ True, True, False, True, False], dtype=bool)
"""
return self.definites()
@stable(as_of='0.5.0')
def has_definites(self):
"""Determine if sequence contains one or more definite characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
definite characters in the biological sequence.
See Also
--------
definites
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_definites()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_definites()
True
"""
# TODO: cache results
return bool(self.definites().any())
@deprecated(as_of='0.5.0', until='0.6.0',
reason='Renamed to has_definites')
def has_nondegenerates(self):
"""Determine if sequence contains one or more non-degenerate characters
Returns
-------
bool
Indicates whether there are one or more occurrences of
non-degenerate characters in the biological sequence.
See Also
--------
definites
degenerates
has_degenerates
Examples
--------
>>> from skbio import DNA
>>> s = DNA('NWNNNNNN')
>>> s.has_nondegenerates()
False
>>> t = DNA('ANCACWWGACGTT')
>>> t.has_nondegenerates()
True
"""
# TODO: cache results
return self.has_definites()
@stable(as_of='0.4.0')
def degap(self):
"""Return a new sequence with gap characters removed.
Returns
-------
GrammaredSequence
A new sequence with all gap characters removed.
See Also
--------
gap_chars
Notes
-----
The type and metadata of the result will be the same as the
biological sequence. If positional metadata is present, it will be
filtered in the same manner as the sequence characters and included in
the resulting degapped sequence.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('GGTC-C--ATT-C.',
... positional_metadata={'quality':range(14)})
>>> s.degap()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 9
has gaps: False
has degenerates: False
has definites: True
GC-content: 55.56%
-----------------------------
0 GGTCCATTC
"""
return self[np.invert(self.gaps())]
@stable(as_of='0.4.0')
def expand_degenerates(self):
"""Yield all possible definite versions of the sequence.
Yields
------
GrammaredSequence
Definite version of the sequence.
See Also
--------
degenerate_map
Notes
-----
There is no guaranteed ordering to the definite sequences that are
yielded.
Each definite sequence will have the same type, metadata, and
positional metadata as the biological sequence.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> seq_generator = seq.expand_degenerates()
>>> for s in sorted(seq_generator, key=str):
... s
... print('')
DNA
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
GC-content: 33.33%
--------------------------
0 TAG
<BLANKLINE>
DNA
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
GC-content: 66.67%
--------------------------
0 TGG
<BLANKLINE>
"""
degen_chars = self.degenerate_map
nonexpansion_chars = self.definite_chars.union(self.gap_chars)
expansions = []
for char in self:
char = str(char)
if char in nonexpansion_chars:
expansions.append(char)
else:
expansions.append(degen_chars[char])
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
for definite_seq in product(*expansions):
yield self._constructor(
sequence=''.join(definite_seq),
metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=self.interval_metadata)
@stable(as_of='0.4.1')
def to_regex(self, within_capture=False):
"""Return regular expression object that accounts for degenerate chars.
Parameters
----------
within_capture : bool
If ``True``, format the regex pattern for the sequence into a
single capture group. If ``False``, compile the regex pattern as-is
with no capture groups.
Returns
-------
regex
Pre-compiled regular expression object (as from ``re.compile``)
that matches all definite versions of this sequence, and nothing
else.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TRG')
>>> regex = seq.to_regex()
>>> regex.match('TAG').string
'TAG'
>>> regex.match('TGG').string
'TGG'
>>> regex.match('TCG') is None
True
>>> regex = seq.to_regex(within_capture=True)
>>> regex.match('TAG').groups(0)
('TAG',)
"""
regex_parts = []
for base in str(self):
if base in self.degenerate_chars:
regex_parts.append('[{0}]'.format(
''.join(self.degenerate_map[base])))
else:
regex_parts.append(base)
regex_string = ''.join(regex_parts)
if within_capture:
regex_string = '({})'.format(regex_string)
return re.compile(regex_string)
@stable(as_of='0.4.0')
def find_motifs(self, motif_type, min_length=1, ignore=None):
"""Search the biological sequence for motifs.
Options for `motif_type`:
Parameters
----------
motif_type : str
Type of motif to find.
min_length : int, optional
Only motifs at least as long as `min_length` will be returned.
ignore : 1D array_like (bool), optional
Boolean vector indicating positions to ignore when matching.
Yields
------
slice
Location of the motif in the biological sequence.
Raises
------
ValueError
If an unknown `motif_type` is specified.
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACGGGGAGGCGGAG')
>>> for motif_slice in s.find_motifs('purine-run', min_length=2):
... motif_slice
... str(s[motif_slice])
slice(2, 9, None)
'GGGGAGG'
slice(10, 14, None)
'GGAG'
Gap characters can disrupt motifs:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run'):
... motif_slice
slice(0, 2, None)
slice(3, 5, None)
Gaps can be ignored by passing the gap boolean vector to `ignore`:
>>> s = DNA('GG-GG')
>>> for motif_slice in s.find_motifs('purine-run', ignore=s.gaps()):
... motif_slice
slice(0, 5, None)
"""
if motif_type not in self._motifs:
raise ValueError("Not a known motif (%r) for this sequence (%s)." %
(motif_type, self.__class__.__name__))
return self._motifs[motif_type](self, min_length, ignore)
@overrides(Sequence)
def _constructor(self, **kwargs):
return self.__class__(validate=False, lowercase=False, **kwargs)
@overrides(Sequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(GrammaredSequence, self)._repr_stats()
stats.append(('has gaps', '%r' % self.has_gaps()))
stats.append(('has degenerates', '%r' % self.has_degenerates()))
stats.append(('has definites', '%r' % self.has_definites()))
return stats
_motifs = MiniRegistry()
# Leave this at the bottom
_motifs.interpolate(GrammaredSequence, "find_motifs") | 0.594198 | 0.320688 |
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class DNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as genes on the
DNA sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
RNA
GrammaredSequence
Notes
-----
Subclassing is disabled for DNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> DNA('ACCGAAT')
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
Convert lowercase characters to uppercase:
>>> DNA('AcCGaaT', lowercase=True)
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGT")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def transcribe(self):
"""Transcribe DNA into RNA.
DNA sequence is assumed to be the coding strand. Thymine (T) is
replaced with uracil (U) in the transcribed sequence.
Returns
-------
RNA
Transcribed sequence.
See Also
--------
translate
translate_six_frames
Notes
-----
DNA sequence's metadata, positional, and interval
metadata are included in the transcribed RNA sequence.
Examples
--------
Transcribe DNA into RNA:
>>> from skbio import DNA
>>> dna = DNA('TAACGTTA')
>>> dna
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
>>> dna.transcribe()
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
"""
seq = self._string.replace(b'T', b'U')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.RNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, *args, **kwargs):
"""Translate DNA sequence into protein sequence.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
RNA.reverse_transcribe
RNA.translate
translate_six_frames
transcribe
Notes
-----
DNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> dna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same DNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> dna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
return self.transcribe().translate(*args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, *args, **kwargs):
"""Translate DNA into protein using six possible reading frames.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein. The six possible
reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
RNA.translate_six_frames
translate
transcribe
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
DNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> for protein in dna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
return self.transcribe().translate_six_frames(*args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(DNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(DNA, "find_motifs") | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_dna.py | _dna.py |
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class DNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as genes on the
DNA sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
RNA
GrammaredSequence
Notes
-----
Subclassing is disabled for DNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> DNA('ACCGAAT')
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
Convert lowercase characters to uppercase:
>>> DNA('AcCGaaT', lowercase=True)
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGT")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def transcribe(self):
"""Transcribe DNA into RNA.
DNA sequence is assumed to be the coding strand. Thymine (T) is
replaced with uracil (U) in the transcribed sequence.
Returns
-------
RNA
Transcribed sequence.
See Also
--------
translate
translate_six_frames
Notes
-----
DNA sequence's metadata, positional, and interval
metadata are included in the transcribed RNA sequence.
Examples
--------
Transcribe DNA into RNA:
>>> from skbio import DNA
>>> dna = DNA('TAACGTTA')
>>> dna
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
>>> dna.transcribe()
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
"""
seq = self._string.replace(b'T', b'U')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.RNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, *args, **kwargs):
"""Translate DNA sequence into protein sequence.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
RNA.reverse_transcribe
RNA.translate
translate_six_frames
transcribe
Notes
-----
DNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> dna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same DNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> dna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
return self.transcribe().translate(*args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, *args, **kwargs):
"""Translate DNA into protein using six possible reading frames.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein. The six possible
reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
RNA.translate_six_frames
translate
transcribe
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
DNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> for protein in dna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
return self.transcribe().translate_six_frames(*args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(DNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(DNA, "find_motifs") | 0.914946 | 0.568685 |
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class RNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary metadata which applies to intervals within a sequence to
store interval features (such as exons or introns on the sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
DNA
GrammaredSequence
Notes
-----
Subclassing is disabled for RNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> RNA('ACCGAAU')
RNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAU
Convert lowercase characters to uppercase:
>>> RNA('AcCGaaU', lowercase=True)
RNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAU
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGU")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.1")
def reverse_transcribe(self):
"""Reverse transcribe RNA into DNA.
It returns the coding DNA strand of the RNA sequence, i.e. uracil (U)
is replaced with thymine (T) in the reverse transcribed sequence.
Returns
-------
DNA
Reverse transcribed sequence.
See Also
--------
DNA.transcribe
translate
translate_six_frames
Notes
-----
RNA sequence's metadata and positional metadata are included in the
transcribed DNA sequence.
Examples
--------
Reverse transcribe RNA into DNA:
>>> from skbio import RNA
>>> rna = RNA('UAACGUUA')
>>> rna
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
>>> rna.reverse_transcribe()
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
"""
seq = self._string.replace(b'U', b'T')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.DNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, genetic_code=1, *args, **kwargs):
"""Translate RNA sequence into protein sequence.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by ``GeneticCode.translate``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
GeneticCode.translate
GeneticCode.from_ncbi
translate_six_frames
Notes
-----
RNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> rna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same RNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> rna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate(self, *args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, genetic_code=1, *args, **kwargs):
"""Translate RNA into protein using six possible reading frames.
The six possible reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by
``GeneticCode.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
GeneticCode.translate_six_frames
GeneticCode.from_ncbi
translate
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
RNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> for protein in rna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate_six_frames(self, *args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(RNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(RNA, "find_motifs") | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_rna.py | _rna.py |
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class RNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store RNA sequence data and optional associated metadata.
Only characters in the IUPAC RNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the RNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary metadata which applies to intervals within a sequence to
store interval features (such as exons or introns on the sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC RNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC RNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
DNA
GrammaredSequence
Notes
-----
Subclassing is disabled for RNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import RNA
>>> RNA('ACCGAAU')
RNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAU
Convert lowercase characters to uppercase:
>>> RNA('AcCGaaU', lowercase=True)
RNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAU
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGU")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.1")
def reverse_transcribe(self):
"""Reverse transcribe RNA into DNA.
It returns the coding DNA strand of the RNA sequence, i.e. uracil (U)
is replaced with thymine (T) in the reverse transcribed sequence.
Returns
-------
DNA
Reverse transcribed sequence.
See Also
--------
DNA.transcribe
translate
translate_six_frames
Notes
-----
RNA sequence's metadata and positional metadata are included in the
transcribed DNA sequence.
Examples
--------
Reverse transcribe RNA into DNA:
>>> from skbio import RNA
>>> rna = RNA('UAACGUUA')
>>> rna
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
>>> rna.reverse_transcribe()
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
"""
seq = self._string.replace(b'U', b'T')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.DNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, genetic_code=1, *args, **kwargs):
"""Translate RNA sequence into protein sequence.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by ``GeneticCode.translate``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
GeneticCode.translate
GeneticCode.from_ncbi
translate_six_frames
Notes
-----
RNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> rna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same RNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> rna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate(self, *args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, genetic_code=1, *args, **kwargs):
"""Translate RNA into protein using six possible reading frames.
The six possible reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
genetic_code : int, GeneticCode, optional
Genetic code to use in translation. If ``int``, used as a table ID
to look up the corresponding NCBI genetic code.
args : tuple
Positional arguments accepted by
``GeneticCode.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``GeneticCode.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
GeneticCode.translate_six_frames
GeneticCode.from_ncbi
translate
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
RNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate RNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import RNA
>>> rna = RNA('AUGCCACUUUAA')
>>> for protein in rna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
if not isinstance(genetic_code, skbio.GeneticCode):
genetic_code = skbio.GeneticCode.from_ncbi(genetic_code)
return genetic_code.translate_six_frames(self, *args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(RNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(RNA, "find_motifs") | 0.916615 | 0.579728 |
import numpy as np
import scipy.spatial.distance
import skbio
from skbio.util._decorator import experimental
@experimental(as_of='0.4.2')
def hamming(seq1, seq2):
"""Compute Hamming distance between two sequences.
The Hamming distance between two equal-length sequences is the proportion
of differing characters.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute Hamming distance between.
Returns
-------
float
Hamming distance between `seq1` and `seq2`.
Raises
------
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
ValueError
If `seq1` and `seq2` are not the same length.
See Also
--------
scipy.spatial.distance.hamming
Notes
-----
``np.nan`` will be returned if the sequences do not contain any characters.
This function does not make assumptions about the sequence alphabet in use.
Each sequence object's underlying sequence of characters are used to
compute Hamming distance. Characters that may be considered equivalent in
certain contexts (e.g., `-` and `.` as gap characters) are treated as
distinct characters when computing Hamming distance.
Examples
--------
>>> from skbio import Sequence
>>> from skbio.sequence.distance import hamming
>>> seq1 = Sequence('AGGGTA')
>>> seq2 = Sequence('CGTTTA')
>>> hamming(seq1, seq2)
0.5
"""
_check_seqs(seq1, seq2)
# Hamming requires equal length sequences. We are checking this here
# because the error you would get otherwise is cryptic.
if len(seq1) != len(seq2):
raise ValueError(
"Hamming distance can only be computed between sequences of equal "
"length (%d != %d)" % (len(seq1), len(seq2)))
# scipy throws a RuntimeWarning when computing Hamming distance on length 0
# input.
if not seq1:
distance = np.nan
else:
distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
return float(distance)
@experimental(as_of='0.5.0')
def kmer_distance(seq1, seq2, k, overlap=True):
"""Compute the kmer distance between a pair of sequences
The kmer distance between two sequences is the fraction of kmers that are
unique to either sequence.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute kmer distance between.
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Returns
-------
float
kmer distance between `seq1` and `seq2`.
Raises
------
ValueError
If `k` is less than 1.
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
Notes
-----
kmer counts are not incorporated in this distance metric.
``np.nan`` will be returned if there are no kmers defined for the
sequences.
Examples
--------
>>> from skbio import Sequence
>>> seq1 = Sequence('ATCGGCGAT')
>>> seq2 = Sequence('GCAGATGTG')
>>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
0.9230769230...
"""
_check_seqs(seq1, seq2)
seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
all_kmers = seq1_kmers | seq2_kmers
if not all_kmers:
return np.nan
shared_kmers = seq1_kmers & seq2_kmers
number_unique = len(all_kmers) - len(shared_kmers)
fraction_unique = number_unique / len(all_kmers)
return fraction_unique
def _check_seqs(seq1, seq2):
# Asserts both sequences are skbio.sequence objects
for seq in seq1, seq2:
if not isinstance(seq, skbio.Sequence):
raise TypeError(
"`seq1` and `seq2` must be Sequence instances, not %r"
% type(seq).__name__)
# Asserts sequences have the same type
if type(seq1) is not type(seq2):
raise TypeError(
"Sequences must have matching type. Type %r does not match type %r"
% (type(seq1).__name__, type(seq2).__name__)) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/distance.py | distance.py |
import numpy as np
import scipy.spatial.distance
import skbio
from skbio.util._decorator import experimental
@experimental(as_of='0.4.2')
def hamming(seq1, seq2):
"""Compute Hamming distance between two sequences.
The Hamming distance between two equal-length sequences is the proportion
of differing characters.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute Hamming distance between.
Returns
-------
float
Hamming distance between `seq1` and `seq2`.
Raises
------
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
ValueError
If `seq1` and `seq2` are not the same length.
See Also
--------
scipy.spatial.distance.hamming
Notes
-----
``np.nan`` will be returned if the sequences do not contain any characters.
This function does not make assumptions about the sequence alphabet in use.
Each sequence object's underlying sequence of characters are used to
compute Hamming distance. Characters that may be considered equivalent in
certain contexts (e.g., `-` and `.` as gap characters) are treated as
distinct characters when computing Hamming distance.
Examples
--------
>>> from skbio import Sequence
>>> from skbio.sequence.distance import hamming
>>> seq1 = Sequence('AGGGTA')
>>> seq2 = Sequence('CGTTTA')
>>> hamming(seq1, seq2)
0.5
"""
_check_seqs(seq1, seq2)
# Hamming requires equal length sequences. We are checking this here
# because the error you would get otherwise is cryptic.
if len(seq1) != len(seq2):
raise ValueError(
"Hamming distance can only be computed between sequences of equal "
"length (%d != %d)" % (len(seq1), len(seq2)))
# scipy throws a RuntimeWarning when computing Hamming distance on length 0
# input.
if not seq1:
distance = np.nan
else:
distance = scipy.spatial.distance.hamming(seq1.values, seq2.values)
return float(distance)
@experimental(as_of='0.5.0')
def kmer_distance(seq1, seq2, k, overlap=True):
"""Compute the kmer distance between a pair of sequences
The kmer distance between two sequences is the fraction of kmers that are
unique to either sequence.
Parameters
----------
seq1, seq2 : Sequence
Sequences to compute kmer distance between.
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Returns
-------
float
kmer distance between `seq1` and `seq2`.
Raises
------
ValueError
If `k` is less than 1.
TypeError
If `seq1` and `seq2` are not ``Sequence`` instances.
TypeError
If `seq1` and `seq2` are not the same type.
Notes
-----
kmer counts are not incorporated in this distance metric.
``np.nan`` will be returned if there are no kmers defined for the
sequences.
Examples
--------
>>> from skbio import Sequence
>>> seq1 = Sequence('ATCGGCGAT')
>>> seq2 = Sequence('GCAGATGTG')
>>> kmer_distance(seq1, seq2, 3) # doctest: +ELLIPSIS
0.9230769230...
"""
_check_seqs(seq1, seq2)
seq1_kmers = set(map(str, seq1.iter_kmers(k, overlap=overlap)))
seq2_kmers = set(map(str, seq2.iter_kmers(k, overlap=overlap)))
all_kmers = seq1_kmers | seq2_kmers
if not all_kmers:
return np.nan
shared_kmers = seq1_kmers & seq2_kmers
number_unique = len(all_kmers) - len(shared_kmers)
fraction_unique = number_unique / len(all_kmers)
return fraction_unique
def _check_seqs(seq1, seq2):
# Asserts both sequences are skbio.sequence objects
for seq in seq1, seq2:
if not isinstance(seq, skbio.Sequence):
raise TypeError(
"`seq1` and `seq2` must be Sequence instances, not %r"
% type(seq).__name__)
# Asserts sequences have the same type
if type(seq1) is not type(seq2):
raise TypeError(
"Sequences must have matching type. Type %r does not match type %r"
% (type(seq1).__name__, type(seq2).__name__)) | 0.936807 | 0.747201 |
from abc import ABCMeta, abstractproperty
import numpy as np
from skbio.util._decorator import classproperty, stable
from ._grammared_sequence import _motifs as parent_motifs
class NucleotideMixin(metaclass=ABCMeta):
"""Mixin for adding funtionality for working with sequences of nucleotides.
This is an abstract base class (ABC) that cannot be instantiated.
See Also
--------
DNA
RNA
"""
__complement_lookup = None
__gc_codes = None
@classproperty
def _complement_lookup(cls):
if cls.__complement_lookup is not None:
return cls.__complement_lookup
lookup = np.zeros(cls._number_of_extended_ascii_codes, dtype=np.uint8)
for key, value in cls.complement_map.items():
lookup[ord(key)] = ord(value)
cls.__complement_lookup = lookup
return lookup
@classproperty
def _gc_codes(cls):
if cls.__gc_codes is None:
gc_iupac_chars = 'GCS'
cls.__gc_codes = np.asarray([ord(g) for g in gc_iupac_chars])
return cls.__gc_codes
@property
def _motifs(self):
return _motifs
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def complement_map(cls):
"""Return mapping of nucleotide characters to their complements.
Returns
-------
dict
Mapping of each character to its complement.
Notes
-----
Complements cannot be defined for a generic nucleotide sequence because
the complement of ``A`` is ambiguous. Thanks, nature...
"""
raise NotImplementedError
@stable(as_of='0.4.0')
def complement(self, reverse=False):
"""Return the complement of the nucleotide sequence.
Parameters
----------
reverse : bool, optional
If ``True``, return the reverse complement. If positional and/or
interval metadata are present, they will be reversed.
Returns
-------
NucleotideMixin
The (reverse) complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If `reverse` is ``True``, positional or interval metadata
will be reversed if it is present.
See Also
--------
reverse_complement
complement_map
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT', positional_metadata={'quality':range(6)})
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 TTCATT
>>> seq.complement()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AAGTAA
>>> rc = seq.complement(reverse=True)
>>> rc
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> rc.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
result = self._complement_lookup[self._bytes]
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
complement = self._constructor(
sequence=result,
metadata=metadata,
positional_metadata=positional_metadata)
if reverse:
# this has to be before the interval metadata code,
# because __gititem__ drops interval_metadata.
complement = complement[::-1]
if self.has_interval_metadata():
complement.interval_metadata = self.interval_metadata
if reverse:
# TODO: this can be revised to match
# positional_metadata when __getitem__
# supports interval_metadata
complement.interval_metadata._reverse()
return complement
@stable(as_of='0.4.0')
def reverse_complement(self):
"""Return the reverse complement of the nucleotide sequence.
Returns
-------
NucleotideMixin
The reverse complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If positional metadata is present, it will be reversed.
See Also
--------
complement
is_reverse_complement
Notes
-----
This method is equivalent to ``self.complement(reverse=True)``.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT',
... positional_metadata={'quality':range(6)})
>>> seq = seq.reverse_complement()
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> seq.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
return self.complement(reverse=True)
@stable(as_of='0.4.0')
def is_reverse_complement(self, other):
r"""Determine if a sequence is the reverse complement of this sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
bool
``True`` if `other` is the reverse complement of the nucleotide
sequence.
Raises
------
TypeError
If `other` is a ``Sequence`` object with a different type than the
nucleotide sequence.
See Also
--------
reverse_complement
Examples
--------
>>> from skbio import DNA
>>> DNA('TTCATT').is_reverse_complement('AATGAA')
True
>>> DNA('TTCATT').is_reverse_complement('AATGTT')
False
>>> DNA('ACGT').is_reverse_complement('ACGT')
True
"""
other = self._munge_to_sequence(other, 'is_reverse_complement')
# avoid computing the reverse complement if possible
if len(self) != len(other):
return False
else:
# we reverse complement ourselves because `other` is a `Sequence`
# object at this point and we only care about comparing the
# underlying sequence data
return self.reverse_complement()._string == other._string
@stable(as_of='0.4.0')
def gc_content(self):
"""Calculate the relative frequency of G's and C's in the sequence.
This includes G, C, and S characters. This is equivalent to calling
``gc_frequency(relative=True)``. Note that the sequence will be
degapped before the operation, so gap characters will not be included
when calculating the length of the sequence.
Returns
-------
float
Relative frequency of G's and C's in the sequence.
See Also
--------
gc_frequency
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_content()
0.5
>>> DNA('ACGTACGT').gc_content()
0.5
>>> DNA('ACTTAGTT').gc_content()
0.25
>>> DNA('ACGT--..').gc_content()
0.5
>>> DNA('--..').gc_content()
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_content()
0.5
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_content()
0.0
"""
return self.gc_frequency(relative=True)
@stable(as_of='0.4.0')
def gc_frequency(self, relative=False):
"""Calculate frequency of G's and C's in the sequence.
This calculates the minimum GC frequency, which corresponds to IUPAC
characters G, C, and S (which stands for G or C).
Parameters
----------
relative : bool, optional
If False return the frequency of G, C, and S characters (ie the
count). If True return the relative frequency, ie the proportion
of G, C, and S characters in the sequence. In this case the
sequence will also be degapped before the operation, so gap
characters will not be included when calculating the length of the
sequence.
Returns
-------
int or float
Either frequency (count) or relative frequency (proportion),
depending on `relative`.
See Also
--------
gc_content
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_frequency()
2
>>> DNA('ACGT').gc_frequency(relative=True)
0.5
>>> DNA('ACGT--..').gc_frequency(relative=True)
0.5
>>> DNA('--..').gc_frequency(relative=True)
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_frequency()
2
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_frequency()
0
"""
counts = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
gc = counts[self._gc_codes].sum()
if relative:
seq = self.degap()
if len(seq) != 0:
gc /= len(seq)
return gc
_motifs = parent_motifs.copy()
@_motifs("purine-run")
def _motif_purine_run(sequence, min_length, ignore):
"""Identifies purine runs"""
return sequence.find_with_regex("([AGR]{%d,})" % min_length,
ignore=ignore)
@_motifs("pyrimidine-run")
def _motif_pyrimidine_run(sequence, min_length, ignore):
"""Identifies pyrimidine runs"""
return sequence.find_with_regex("([CTUY]{%d,})" % min_length,
ignore=ignore) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_nucleotide_mixin.py | _nucleotide_mixin.py |
from abc import ABCMeta, abstractproperty
import numpy as np
from skbio.util._decorator import classproperty, stable
from ._grammared_sequence import _motifs as parent_motifs
class NucleotideMixin(metaclass=ABCMeta):
"""Mixin for adding funtionality for working with sequences of nucleotides.
This is an abstract base class (ABC) that cannot be instantiated.
See Also
--------
DNA
RNA
"""
__complement_lookup = None
__gc_codes = None
@classproperty
def _complement_lookup(cls):
if cls.__complement_lookup is not None:
return cls.__complement_lookup
lookup = np.zeros(cls._number_of_extended_ascii_codes, dtype=np.uint8)
for key, value in cls.complement_map.items():
lookup[ord(key)] = ord(value)
cls.__complement_lookup = lookup
return lookup
@classproperty
def _gc_codes(cls):
if cls.__gc_codes is None:
gc_iupac_chars = 'GCS'
cls.__gc_codes = np.asarray([ord(g) for g in gc_iupac_chars])
return cls.__gc_codes
@property
def _motifs(self):
return _motifs
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def complement_map(cls):
"""Return mapping of nucleotide characters to their complements.
Returns
-------
dict
Mapping of each character to its complement.
Notes
-----
Complements cannot be defined for a generic nucleotide sequence because
the complement of ``A`` is ambiguous. Thanks, nature...
"""
raise NotImplementedError
@stable(as_of='0.4.0')
def complement(self, reverse=False):
"""Return the complement of the nucleotide sequence.
Parameters
----------
reverse : bool, optional
If ``True``, return the reverse complement. If positional and/or
interval metadata are present, they will be reversed.
Returns
-------
NucleotideMixin
The (reverse) complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If `reverse` is ``True``, positional or interval metadata
will be reversed if it is present.
See Also
--------
reverse_complement
complement_map
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT', positional_metadata={'quality':range(6)})
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 TTCATT
>>> seq.complement()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AAGTAA
>>> rc = seq.complement(reverse=True)
>>> rc
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> rc.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
result = self._complement_lookup[self._bytes]
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
complement = self._constructor(
sequence=result,
metadata=metadata,
positional_metadata=positional_metadata)
if reverse:
# this has to be before the interval metadata code,
# because __gititem__ drops interval_metadata.
complement = complement[::-1]
if self.has_interval_metadata():
complement.interval_metadata = self.interval_metadata
if reverse:
# TODO: this can be revised to match
# positional_metadata when __getitem__
# supports interval_metadata
complement.interval_metadata._reverse()
return complement
@stable(as_of='0.4.0')
def reverse_complement(self):
"""Return the reverse complement of the nucleotide sequence.
Returns
-------
NucleotideMixin
The reverse complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If positional metadata is present, it will be reversed.
See Also
--------
complement
is_reverse_complement
Notes
-----
This method is equivalent to ``self.complement(reverse=True)``.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT',
... positional_metadata={'quality':range(6)})
>>> seq = seq.reverse_complement()
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> seq.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
return self.complement(reverse=True)
@stable(as_of='0.4.0')
def is_reverse_complement(self, other):
r"""Determine if a sequence is the reverse complement of this sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
bool
``True`` if `other` is the reverse complement of the nucleotide
sequence.
Raises
------
TypeError
If `other` is a ``Sequence`` object with a different type than the
nucleotide sequence.
See Also
--------
reverse_complement
Examples
--------
>>> from skbio import DNA
>>> DNA('TTCATT').is_reverse_complement('AATGAA')
True
>>> DNA('TTCATT').is_reverse_complement('AATGTT')
False
>>> DNA('ACGT').is_reverse_complement('ACGT')
True
"""
other = self._munge_to_sequence(other, 'is_reverse_complement')
# avoid computing the reverse complement if possible
if len(self) != len(other):
return False
else:
# we reverse complement ourselves because `other` is a `Sequence`
# object at this point and we only care about comparing the
# underlying sequence data
return self.reverse_complement()._string == other._string
@stable(as_of='0.4.0')
def gc_content(self):
"""Calculate the relative frequency of G's and C's in the sequence.
This includes G, C, and S characters. This is equivalent to calling
``gc_frequency(relative=True)``. Note that the sequence will be
degapped before the operation, so gap characters will not be included
when calculating the length of the sequence.
Returns
-------
float
Relative frequency of G's and C's in the sequence.
See Also
--------
gc_frequency
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_content()
0.5
>>> DNA('ACGTACGT').gc_content()
0.5
>>> DNA('ACTTAGTT').gc_content()
0.25
>>> DNA('ACGT--..').gc_content()
0.5
>>> DNA('--..').gc_content()
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_content()
0.5
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_content()
0.0
"""
return self.gc_frequency(relative=True)
@stable(as_of='0.4.0')
def gc_frequency(self, relative=False):
"""Calculate frequency of G's and C's in the sequence.
This calculates the minimum GC frequency, which corresponds to IUPAC
characters G, C, and S (which stands for G or C).
Parameters
----------
relative : bool, optional
If False return the frequency of G, C, and S characters (ie the
count). If True return the relative frequency, ie the proportion
of G, C, and S characters in the sequence. In this case the
sequence will also be degapped before the operation, so gap
characters will not be included when calculating the length of the
sequence.
Returns
-------
int or float
Either frequency (count) or relative frequency (proportion),
depending on `relative`.
See Also
--------
gc_content
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_frequency()
2
>>> DNA('ACGT').gc_frequency(relative=True)
0.5
>>> DNA('ACGT--..').gc_frequency(relative=True)
0.5
>>> DNA('--..').gc_frequency(relative=True)
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_frequency()
2
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_frequency()
0
"""
counts = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
gc = counts[self._gc_codes].sum()
if relative:
seq = self.degap()
if len(seq) != 0:
gc /= len(seq)
return gc
_motifs = parent_motifs.copy()
@_motifs("purine-run")
def _motif_purine_run(sequence, min_length, ignore):
"""Identifies purine runs"""
return sequence.find_with_regex("([AGR]{%d,})" % min_length,
ignore=ignore)
@_motifs("pyrimidine-run")
def _motif_pyrimidine_run(sequence, min_length, ignore):
"""Identifies pyrimidine runs"""
return sequence.find_with_regex("([CTUY]{%d,})" % min_length,
ignore=ignore) | 0.851845 | 0.379091 |
import numpy as np
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._grammared_sequence import (GrammaredSequence, DisableSubclassingMeta,
_motifs as parent_motifs)
class Protein(GrammaredSequence, metaclass=DisableSubclassingMeta):
r"""Store protein sequence data and optional associated metadata.
Only characters in the IUPAC protein character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the protein sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as protein domains).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC Protein characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC protein character set. If ``False``,
validation will not be performed. Turning off validation will improve
runtime performance. If invalid characters are present, however, there
is **no guarantee that operations performed on the resulting object
will work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
GrammaredSequence
Notes
-----
Subclassing is disabled for Protein, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import Protein
>>> Protein('PAW')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 PAW
Convert lowercase characters to uppercase:
>>> Protein('paW', lowercase=True)
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 PAW
"""
__stop_codes = None
@classproperty
def _stop_codes(cls):
if cls.__stop_codes is None:
stops = cls.stop_chars
cls.__stop_codes = np.asarray([ord(s) for s in stops])
return cls.__stop_codes
@classproperty
@overrides(GrammaredSequence)
def alphabet(cls):
return super(Protein, cls).alphabet | cls.stop_chars
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACDEFGHIKLMNOPQRSTUVWY")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"B": set("DN"),
"Z": set("EQ"),
"J": set("IL"),
"X": set("ACDEFGHIKLMNOPQRSTUVWY")
}
@classproperty
@stable(as_of="0.4.0")
def stop_chars(cls):
"""Return characters representing translation stop codons.
Returns
-------
set
Characters representing translation stop codons.
"""
return set('*')
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def stops(self):
"""Find positions containing stop characters in the protein sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a stop character is present
at that position in the protein sequence.
See Also
--------
has_stops
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.stops()
array([False, False, False], dtype=bool)
>>> s = Protein('PAW*E*')
>>> s.stops()
array([False, False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._stop_codes)
@stable(as_of="0.4.0")
def has_stops(self):
"""Determine if the sequence contains one or more stop characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of stop
characters in the protein sequence.
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.has_stops()
False
>>> s = Protein('PAW*E*')
>>> s.has_stops()
True
"""
return bool(self.stops().any())
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(Protein, self)._repr_stats()
stats.append(('has stops', '%r' % self.has_stops()))
return stats
_motifs = parent_motifs.copy()
@_motifs("N-glycosylation")
def _motif_nitro_glycosylation(sequence, min_length, ignore):
"""Identifies N-glycosylation runs"""
return sequence.find_with_regex("(N[^PX][ST][^PX])", ignore=ignore)
# Leave this at the bottom
_motifs.interpolate(Protein, "find_motifs") | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/sequence/_protein.py | _protein.py |
import numpy as np
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._grammared_sequence import (GrammaredSequence, DisableSubclassingMeta,
_motifs as parent_motifs)
class Protein(GrammaredSequence, metaclass=DisableSubclassingMeta):
r"""Store protein sequence data and optional associated metadata.
Only characters in the IUPAC protein character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the protein sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as protein domains).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC Protein characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC protein character set. If ``False``,
validation will not be performed. Turning off validation will improve
runtime performance. If invalid characters are present, however, there
is **no guarantee that operations performed on the resulting object
will work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
GrammaredSequence
Notes
-----
Subclassing is disabled for Protein, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import Protein
>>> Protein('PAW')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 PAW
Convert lowercase characters to uppercase:
>>> Protein('paW', lowercase=True)
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 PAW
"""
__stop_codes = None
@classproperty
def _stop_codes(cls):
if cls.__stop_codes is None:
stops = cls.stop_chars
cls.__stop_codes = np.asarray([ord(s) for s in stops])
return cls.__stop_codes
@classproperty
@overrides(GrammaredSequence)
def alphabet(cls):
return super(Protein, cls).alphabet | cls.stop_chars
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACDEFGHIKLMNOPQRSTUVWY")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"B": set("DN"),
"Z": set("EQ"),
"J": set("IL"),
"X": set("ACDEFGHIKLMNOPQRSTUVWY")
}
@classproperty
@stable(as_of="0.4.0")
def stop_chars(cls):
"""Return characters representing translation stop codons.
Returns
-------
set
Characters representing translation stop codons.
"""
return set('*')
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def stops(self):
"""Find positions containing stop characters in the protein sequence.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` indicates a stop character is present
at that position in the protein sequence.
See Also
--------
has_stops
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.stops()
array([False, False, False], dtype=bool)
>>> s = Protein('PAW*E*')
>>> s.stops()
array([False, False, False, True, False, True], dtype=bool)
"""
return np.in1d(self._bytes, self._stop_codes)
@stable(as_of="0.4.0")
def has_stops(self):
"""Determine if the sequence contains one or more stop characters.
Returns
-------
bool
Indicates whether there are one or more occurrences of stop
characters in the protein sequence.
Examples
--------
>>> from skbio import Protein
>>> s = Protein('PAW')
>>> s.has_stops()
False
>>> s = Protein('PAW*E*')
>>> s.has_stops()
True
"""
return bool(self.stops().any())
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(Protein, self)._repr_stats()
stats.append(('has stops', '%r' % self.has_stops()))
return stats
_motifs = parent_motifs.copy()
@_motifs("N-glycosylation")
def _motif_nitro_glycosylation(sequence, min_length, ignore):
"""Identifies N-glycosylation runs"""
return sequence.find_with_regex("(N[^PX][ST][^PX])", ignore=ignore)
# Leave this at the bottom
_motifs.interpolate(Protein, "find_motifs") | 0.924649 | 0.737796 |
import io
def is_binary_file(file):
return isinstance(file, (io.BufferedReader, io.BufferedWriter,
io.BufferedRandom))
# Everything beyond this point will be some kind of hack needed to make
# everything work. It's not pretty and it doesn't make great sense much
# of the time. I am very sorry to the poor soul who has to read beyond.
class FlushDestructorMixin:
def __del__(self):
# By default, the destructor calls close(), which flushes and closes
# the underlying buffer. Override to only flush.
if not self.closed:
self.flush()
class SaneTextIOWrapper(FlushDestructorMixin, io.TextIOWrapper):
pass
class WrappedBufferedRandom(FlushDestructorMixin, io.BufferedRandom):
pass
class CompressedMixin(FlushDestructorMixin):
"""Act as a bridge between worlds"""
def __init__(self, before_file, *args, **kwargs):
self.streamable = kwargs.pop('streamable', True)
self._before_file = before_file
super(CompressedMixin, self).__init__(*args, **kwargs)
@property
def closed(self):
return self.raw.closed or self._before_file.closed
def close(self):
super(CompressedMixin, self).close()
# The above will not usually close before_file. We want the
# decompression to be transparent, so we don't want users to deal with
# this edge case. Instead we can just close the original now that we
# are being closed.
self._before_file.close()
class CompressedBufferedReader(CompressedMixin, io.BufferedReader):
pass
class CompressedBufferedWriter(CompressedMixin, io.BufferedWriter):
pass
class IterableStringReaderIO(io.StringIO):
def __init__(self, iterable, newline):
self._iterable = iterable
super(IterableStringReaderIO, self).__init__(''.join(iterable),
newline=newline)
class IterableStringWriterIO(IterableStringReaderIO):
def close(self):
if not self.closed:
backup = self.tell()
self.seek(0)
for line in self:
self._iterable.append(line)
self.seek(backup)
super(IterableStringWriterIO, self).close() | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/_fileobject.py | _fileobject.py |
import io
def is_binary_file(file):
return isinstance(file, (io.BufferedReader, io.BufferedWriter,
io.BufferedRandom))
# Everything beyond this point will be some kind of hack needed to make
# everything work. It's not pretty and it doesn't make great sense much
# of the time. I am very sorry to the poor soul who has to read beyond.
class FlushDestructorMixin:
def __del__(self):
# By default, the destructor calls close(), which flushes and closes
# the underlying buffer. Override to only flush.
if not self.closed:
self.flush()
class SaneTextIOWrapper(FlushDestructorMixin, io.TextIOWrapper):
pass
class WrappedBufferedRandom(FlushDestructorMixin, io.BufferedRandom):
pass
class CompressedMixin(FlushDestructorMixin):
"""Act as a bridge between worlds"""
def __init__(self, before_file, *args, **kwargs):
self.streamable = kwargs.pop('streamable', True)
self._before_file = before_file
super(CompressedMixin, self).__init__(*args, **kwargs)
@property
def closed(self):
return self.raw.closed or self._before_file.closed
def close(self):
super(CompressedMixin, self).close()
# The above will not usually close before_file. We want the
# decompression to be transparent, so we don't want users to deal with
# this edge case. Instead we can just close the original now that we
# are being closed.
self._before_file.close()
class CompressedBufferedReader(CompressedMixin, io.BufferedReader):
pass
class CompressedBufferedWriter(CompressedMixin, io.BufferedWriter):
pass
class IterableStringReaderIO(io.StringIO):
def __init__(self, iterable, newline):
self._iterable = iterable
super(IterableStringReaderIO, self).__init__(''.join(iterable),
newline=newline)
class IterableStringWriterIO(IterableStringReaderIO):
def close(self):
if not self.closed:
backup = self.tell()
self.seek(0)
for line in self:
self._iterable.append(line)
self.seek(backup)
super(IterableStringWriterIO, self).close() | 0.515376 | 0.182007 |
class IOSourceError(Exception):
"""Raised when a file source cannot be resolved."""
pass
class FileFormatError(Exception):
"""Raised when a file cannot be parsed."""
pass
class UnrecognizedFormatError(FileFormatError):
"""Raised when a file's format is unknown, ambiguous, or unidentifiable."""
pass
class GenBankFormatError(FileFormatError):
"""Raised when a ``genbank`` formatted file cannot be parsed."""
pass
class EMBLFormatError(FileFormatError):
"""Raised when a ``EMBL`` formatted file cannot be parsed."""
pass
class GFF3FormatError(FileFormatError):
"""Raised when a ``GFF3`` formatted file cannot be parsed."""
pass
class BLAST7FormatError(FileFormatError):
"""Raised when a ``blast7`` formatted file cannot be parsed."""
pass
class ClustalFormatError(FileFormatError):
"""Raised when a ``clustal`` formatted file cannot be parsed."""
pass
class FASTAFormatError(FileFormatError):
"""Raised when a ``fasta`` formatted file cannot be parsed."""
pass
class QUALFormatError(FASTAFormatError):
"""Raised when a ``qual`` formatted file cannot be parsed."""
pass
class LSMatFormatError(FileFormatError):
"""Raised when a ``lsmat`` formatted file cannot be parsed."""
pass
class OrdinationFormatError(FileFormatError):
"""Raised when an ``ordination`` formatted file cannot be parsed."""
pass
class NewickFormatError(FileFormatError):
"""Raised when a ``newick`` formatted file cannot be parsed."""
pass
class FASTQFormatError(FileFormatError):
"""Raised when a ``fastq`` formatted file cannot be parsed."""
pass
class PhylipFormatError(FileFormatError):
"""Raised when a ``phylip`` formatted file cannot be parsed.
May also be raised when an object (e.g., ``TabularMSA``) cannot be written
in ``phylip`` format.
"""
pass
class QSeqFormatError(FileFormatError):
"""Raised when a ``qseq`` formatted file cannot be parsed."""
pass
class StockholmFormatError(FileFormatError):
"""Raised when a ``stockholm`` formatted file cannot be parsed."""
pass
class InvalidRegistrationError(Exception):
"""Raised if function doesn't meet the expected API of its registration."""
pass
class DuplicateRegistrationError(Exception):
"""Raised when a function is already registered in skbio.io"""
pass | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/_exception.py | _exception.py |
class IOSourceError(Exception):
"""Raised when a file source cannot be resolved."""
pass
class FileFormatError(Exception):
"""Raised when a file cannot be parsed."""
pass
class UnrecognizedFormatError(FileFormatError):
"""Raised when a file's format is unknown, ambiguous, or unidentifiable."""
pass
class GenBankFormatError(FileFormatError):
"""Raised when a ``genbank`` formatted file cannot be parsed."""
pass
class EMBLFormatError(FileFormatError):
"""Raised when a ``EMBL`` formatted file cannot be parsed."""
pass
class GFF3FormatError(FileFormatError):
"""Raised when a ``GFF3`` formatted file cannot be parsed."""
pass
class BLAST7FormatError(FileFormatError):
"""Raised when a ``blast7`` formatted file cannot be parsed."""
pass
class ClustalFormatError(FileFormatError):
"""Raised when a ``clustal`` formatted file cannot be parsed."""
pass
class FASTAFormatError(FileFormatError):
"""Raised when a ``fasta`` formatted file cannot be parsed."""
pass
class QUALFormatError(FASTAFormatError):
"""Raised when a ``qual`` formatted file cannot be parsed."""
pass
class LSMatFormatError(FileFormatError):
"""Raised when a ``lsmat`` formatted file cannot be parsed."""
pass
class OrdinationFormatError(FileFormatError):
"""Raised when an ``ordination`` formatted file cannot be parsed."""
pass
class NewickFormatError(FileFormatError):
"""Raised when a ``newick`` formatted file cannot be parsed."""
pass
class FASTQFormatError(FileFormatError):
"""Raised when a ``fastq`` formatted file cannot be parsed."""
pass
class PhylipFormatError(FileFormatError):
"""Raised when a ``phylip`` formatted file cannot be parsed.
May also be raised when an object (e.g., ``TabularMSA``) cannot be written
in ``phylip`` format.
"""
pass
class QSeqFormatError(FileFormatError):
"""Raised when a ``qseq`` formatted file cannot be parsed."""
pass
class StockholmFormatError(FileFormatError):
"""Raised when a ``stockholm`` formatted file cannot be parsed."""
pass
class InvalidRegistrationError(Exception):
"""Raised if function doesn't meet the expected API of its registration."""
pass
class DuplicateRegistrationError(Exception):
"""Raised when a function is already registered in skbio.io"""
pass | 0.850903 | 0.321407 |
import re
from functools import partial
from skbio.io import create_format, GenBankFormatError
from skbio.io.format._base import (
_get_nth_sequence, _line_generator, _too_many_blanks)
from skbio.util._misc import chunk_str
from skbio.sequence import Sequence, DNA, RNA, Protein
from skbio.io.format._sequence_feature_vocabulary import (
_yield_section, _parse_section_default, _serialize_section_default,
_parse_feature_table, _serialize_feature_table)
genbank = create_format('genbank')
# This list is ordered
# used to read and write genbank file.
_HEADERS = ['LOCUS',
'DEFINITION',
'ACCESSION',
'VERSION',
'DBSOURCE',
'DBLINK',
'KEYWORDS',
'SOURCE',
'REFERENCE',
'COMMENT',
'FEATURES',
'ORIGIN']
@genbank.sniffer()
def _genbank_sniffer(fh):
# check the 1st real line is a valid LOCUS line
if _too_many_blanks(fh, 5):
return False, {}
try:
line = next(_line_generator(fh, skip_blanks=True, strip=False))
except StopIteration:
return False, {}
try:
_parse_locus([line])
except GenBankFormatError:
return False, {}
return True, {}
@genbank.reader(None)
def _genbank_to_generator(fh, constructor=None, **kwargs):
for record in _parse_genbanks(fh):
yield _construct(record, constructor, **kwargs)
@genbank.reader(Sequence)
def _genbank_to_sequence(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Sequence, **kwargs)
@genbank.reader(DNA)
def _genbank_to_dna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, DNA, **kwargs)
@genbank.reader(RNA)
def _genbank_to_rna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, RNA, **kwargs)
@genbank.reader(Protein)
def _genbank_to_protein(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Protein, **kwargs)
@genbank.writer(None)
def _generator_to_genbank(obj, fh):
for obj_i in obj:
_serialize_single_genbank(obj_i, fh)
@genbank.writer(Sequence)
def _sequence_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(DNA)
def _dna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(RNA)
def _rna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(Protein)
def _protein_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
def _construct(record, constructor=None, **kwargs):
'''Construct the object of Sequence, DNA, RNA, or Protein.
'''
seq, md, imd = record
if 'lowercase' not in kwargs:
kwargs['lowercase'] = True
if constructor is None:
unit = md['LOCUS']['unit']
if unit == 'bp':
# RNA mol type has T instead of U for genbank from from NCBI
constructor = DNA
elif unit == 'aa':
constructor = Protein
if constructor == RNA:
return DNA(
seq, metadata=md, interval_metadata=imd, **kwargs).transcribe()
else:
return constructor(
seq, metadata=md, interval_metadata=imd, **kwargs)
def _parse_genbanks(fh):
data_chunks = []
for line in _line_generator(fh, skip_blanks=True, strip=False):
if line.startswith('//'):
yield _parse_single_genbank(data_chunks)
data_chunks = []
else:
data_chunks.append(line)
def _parse_single_genbank(chunks):
metadata = {}
interval_metadata = None
sequence = ''
# each section starts with a HEADER without indent.
section_splitter = _yield_section(
lambda x: not x[0].isspace(), strip=False)
for section in section_splitter(chunks):
header = section[0].split(None, 1)[0]
parser = _PARSER_TABLE.get(
header, _parse_section_default)
if header == 'FEATURES':
# This requires 'LOCUS' line parsed before 'FEATURES', which should
# be true and is implicitly checked by the sniffer.
parser = partial(
parser, length=metadata['LOCUS']['size'])
parsed = parser(section)
# reference can appear multiple times
if header == 'REFERENCE':
if header in metadata:
metadata[header].append(parsed)
else:
metadata[header] = [parsed]
elif header == 'ORIGIN':
sequence = parsed
elif header == 'FEATURES':
interval_metadata = parsed
else:
metadata[header] = parsed
return sequence, metadata, interval_metadata
def _serialize_single_genbank(obj, fh):
'''Write a GenBank record.
Always write it in NCBI canonical way:
1. sequence in lowercase
2. 'u' as 't' even in RNA molecules.
Parameters
----------
obj : Sequence or its child class
'''
# write out the headers
md = obj.metadata
for header in _HEADERS:
serializer = _SERIALIZER_TABLE.get(
header, _serialize_section_default)
if header in md:
out = serializer(header, md[header])
# test if 'out' is a iterator.
# cf. Effective Python Item 17
if iter(out) is iter(out):
for s in out:
fh.write(s)
else:
fh.write(out)
if header == 'FEATURES':
if obj.has_interval_metadata():
# magic number 21: the amount of indentation before
# feature table starts as defined by INSDC
indent = 21
fh.write('{header:<{indent}}Location/Qualifiers\n'.format(
header=header, indent=indent))
for s in serializer(obj.interval_metadata._intervals, indent):
fh.write(s)
# write out the sequence
# always write RNA seq as DNA
if isinstance(obj, RNA):
obj = obj.reverse_transcribe()
# always write in lowercase
seq_str = str(obj).lower()
for s in _serialize_origin(seq_str):
fh.write(s)
fh.write('//\n')
def _parse_locus(lines):
'''Parse the line LOCUS.
Format:
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
# ??:?? space
# ??:29 Length of sequence, right-justified
# 29:33 space, bp/aa/rc, space
# 33:41 molecule type (can be blank): DNA, ssDNA, dsRNA, tRNA, etc.
# 41:42 space
# 42:51 Blank (implies linear), linear or circular
# 51:52 space
# 52:55 The division code (e.g. BCT, VRL, INV)
# 55:62 space
# 62:73 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
'''
line = lines[0]
pattern = (r'LOCUS'
r' +([^\s]+)'
r' +([0-9]+)'
r' +(bp|aa|rc)'
r' +(.*DNA|.*RNA)?'
r' +(linear|circular)?'
r' +(?!.*DNA|.*RNA)([A-Z]{3})'
r' +([0-9]{2}-[A-Z]{3}-[0-9]{4})')
matches = re.match(pattern, line)
try:
res = dict(zip(
['locus_name', 'size', 'unit', 'mol_type',
'shape', 'division', 'date'],
matches.groups()))
except Exception:
raise GenBankFormatError(
"Could not parse the LOCUS line:\n%s" % line)
res['size'] = int(res['size'])
return res
def _serialize_locus(header, obj, indent=12):
'''Serialize LOCUS line.
Parameters
----------
obj : dict
'''
# use 'or' to convert None to ''
kwargs = {k: v or '' for k, v in obj.items()}
return ('{header:<{indent}}{locus_name} {size} {unit}'
' {mol_type} {shape} {division} {date}\n').format(
header=header, indent=indent, **kwargs)
def _parse_reference(lines):
'''Parse single REFERENCE field.
'''
res = {}
# magic number 11: the non keyworded lines in REFERENCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
for section in section_splitter(lines):
label, data = _parse_section_default(
section, join_delimiter=' ', return_label=True)
res[label] = data
return res
def _serialize_reference(header, obj, indent=12):
'''Serialize REFERENCE.
Parameters
----------
obj : list
'''
padding = ' '
sort_order = {'REFERENCE': 0, 'AUTHORS': 1,
'TITLE': 2, 'JOURNAL': 3, 'PUBMED': 4}
for obj_i in obj:
ref_i = []
for h in sorted(obj_i, key=lambda k: sort_order.get(k, 100)):
if h == header:
s = '{h:<{indent}}{ref}'.format(
h=h, indent=indent, ref=obj_i[h])
else:
s = '{h:<{indent}}{value}'.format(
h=padding + h, indent=indent, value=obj_i[h])
ref_i.append(s)
yield '%s\n' % '\n'.join(ref_i)
def _parse_source(lines):
'''Parse SOURCE field.
'''
res = {}
# magic number 11: the non keyworded lines in SOURCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
# SOURCE line is not informative; skip it
_, organism = list(section_splitter(lines))
res['ORGANISM'] = organism[0].split(None, 1)[1].strip()
res['taxonomy'] = ' '.join([i.strip() for i in organism[1:]])
return res
def _serialize_source(header, obj, indent=12):
'''Serialize SOURCE.
Parameters
----------
obj : dict
'''
s = ('{header:<{indent}}{organism}\n'
'{h:<{indent}}{organism}\n'
'{space}{taxonomy}\n').format(
header=header, indent=indent,
h=' ORGANISM', organism=obj['ORGANISM'],
space=' ' * 12, taxonomy=obj['taxonomy'])
return s
def _parse_origin(lines):
'''Parse the ORIGIN section for sequence.
'''
sequence = []
for line in lines:
if line.startswith('ORIGIN'):
continue
# remove the number at the beg of each line
items = line.split()
sequence.append(''.join(items[1:]))
return ''.join(sequence)
def _serialize_origin(seq, indent=9):
'''Serialize seq to ORIGIN.
Parameters
----------
seq : str
'''
n = 1
line_size = 60
frag_size = 10
for i in range(0, len(seq), line_size):
line = seq[i:i+line_size]
s = '{n:>{indent}} {s}\n'.format(
n=n, indent=indent, s=chunk_str(line, frag_size, ' '))
if n == 1:
s = 'ORIGIN\n' + s
n = n + line_size
yield s
_PARSER_TABLE = {
'LOCUS': _parse_locus,
'SOURCE': _parse_source,
'REFERENCE': _parse_reference,
'FEATURES': _parse_feature_table,
'ORIGIN': _parse_origin}
_SERIALIZER_TABLE = {
'LOCUS': _serialize_locus,
'SOURCE': _serialize_source,
'REFERENCE': _serialize_reference,
'FEATURES': _serialize_feature_table} | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/genbank.py | genbank.py |
import re
from functools import partial
from skbio.io import create_format, GenBankFormatError
from skbio.io.format._base import (
_get_nth_sequence, _line_generator, _too_many_blanks)
from skbio.util._misc import chunk_str
from skbio.sequence import Sequence, DNA, RNA, Protein
from skbio.io.format._sequence_feature_vocabulary import (
_yield_section, _parse_section_default, _serialize_section_default,
_parse_feature_table, _serialize_feature_table)
genbank = create_format('genbank')
# This list is ordered
# used to read and write genbank file.
_HEADERS = ['LOCUS',
'DEFINITION',
'ACCESSION',
'VERSION',
'DBSOURCE',
'DBLINK',
'KEYWORDS',
'SOURCE',
'REFERENCE',
'COMMENT',
'FEATURES',
'ORIGIN']
@genbank.sniffer()
def _genbank_sniffer(fh):
# check the 1st real line is a valid LOCUS line
if _too_many_blanks(fh, 5):
return False, {}
try:
line = next(_line_generator(fh, skip_blanks=True, strip=False))
except StopIteration:
return False, {}
try:
_parse_locus([line])
except GenBankFormatError:
return False, {}
return True, {}
@genbank.reader(None)
def _genbank_to_generator(fh, constructor=None, **kwargs):
for record in _parse_genbanks(fh):
yield _construct(record, constructor, **kwargs)
@genbank.reader(Sequence)
def _genbank_to_sequence(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Sequence, **kwargs)
@genbank.reader(DNA)
def _genbank_to_dna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, DNA, **kwargs)
@genbank.reader(RNA)
def _genbank_to_rna(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, RNA, **kwargs)
@genbank.reader(Protein)
def _genbank_to_protein(fh, seq_num=1, **kwargs):
record = _get_nth_sequence(_parse_genbanks(fh), seq_num)
return _construct(record, Protein, **kwargs)
@genbank.writer(None)
def _generator_to_genbank(obj, fh):
for obj_i in obj:
_serialize_single_genbank(obj_i, fh)
@genbank.writer(Sequence)
def _sequence_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(DNA)
def _dna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(RNA)
def _rna_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
@genbank.writer(Protein)
def _protein_to_genbank(obj, fh):
_serialize_single_genbank(obj, fh)
def _construct(record, constructor=None, **kwargs):
'''Construct the object of Sequence, DNA, RNA, or Protein.
'''
seq, md, imd = record
if 'lowercase' not in kwargs:
kwargs['lowercase'] = True
if constructor is None:
unit = md['LOCUS']['unit']
if unit == 'bp':
# RNA mol type has T instead of U for genbank from from NCBI
constructor = DNA
elif unit == 'aa':
constructor = Protein
if constructor == RNA:
return DNA(
seq, metadata=md, interval_metadata=imd, **kwargs).transcribe()
else:
return constructor(
seq, metadata=md, interval_metadata=imd, **kwargs)
def _parse_genbanks(fh):
data_chunks = []
for line in _line_generator(fh, skip_blanks=True, strip=False):
if line.startswith('//'):
yield _parse_single_genbank(data_chunks)
data_chunks = []
else:
data_chunks.append(line)
def _parse_single_genbank(chunks):
metadata = {}
interval_metadata = None
sequence = ''
# each section starts with a HEADER without indent.
section_splitter = _yield_section(
lambda x: not x[0].isspace(), strip=False)
for section in section_splitter(chunks):
header = section[0].split(None, 1)[0]
parser = _PARSER_TABLE.get(
header, _parse_section_default)
if header == 'FEATURES':
# This requires 'LOCUS' line parsed before 'FEATURES', which should
# be true and is implicitly checked by the sniffer.
parser = partial(
parser, length=metadata['LOCUS']['size'])
parsed = parser(section)
# reference can appear multiple times
if header == 'REFERENCE':
if header in metadata:
metadata[header].append(parsed)
else:
metadata[header] = [parsed]
elif header == 'ORIGIN':
sequence = parsed
elif header == 'FEATURES':
interval_metadata = parsed
else:
metadata[header] = parsed
return sequence, metadata, interval_metadata
def _serialize_single_genbank(obj, fh):
'''Write a GenBank record.
Always write it in NCBI canonical way:
1. sequence in lowercase
2. 'u' as 't' even in RNA molecules.
Parameters
----------
obj : Sequence or its child class
'''
# write out the headers
md = obj.metadata
for header in _HEADERS:
serializer = _SERIALIZER_TABLE.get(
header, _serialize_section_default)
if header in md:
out = serializer(header, md[header])
# test if 'out' is a iterator.
# cf. Effective Python Item 17
if iter(out) is iter(out):
for s in out:
fh.write(s)
else:
fh.write(out)
if header == 'FEATURES':
if obj.has_interval_metadata():
# magic number 21: the amount of indentation before
# feature table starts as defined by INSDC
indent = 21
fh.write('{header:<{indent}}Location/Qualifiers\n'.format(
header=header, indent=indent))
for s in serializer(obj.interval_metadata._intervals, indent):
fh.write(s)
# write out the sequence
# always write RNA seq as DNA
if isinstance(obj, RNA):
obj = obj.reverse_transcribe()
# always write in lowercase
seq_str = str(obj).lower()
for s in _serialize_origin(seq_str):
fh.write(s)
fh.write('//\n')
def _parse_locus(lines):
'''Parse the line LOCUS.
Format:
# Positions Contents
# --------- --------
# 00:06 LOCUS
# 06:12 spaces
# 12:?? Locus name
# ??:?? space
# ??:29 Length of sequence, right-justified
# 29:33 space, bp/aa/rc, space
# 33:41 molecule type (can be blank): DNA, ssDNA, dsRNA, tRNA, etc.
# 41:42 space
# 42:51 Blank (implies linear), linear or circular
# 51:52 space
# 52:55 The division code (e.g. BCT, VRL, INV)
# 55:62 space
# 62:73 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991)
'''
line = lines[0]
pattern = (r'LOCUS'
r' +([^\s]+)'
r' +([0-9]+)'
r' +(bp|aa|rc)'
r' +(.*DNA|.*RNA)?'
r' +(linear|circular)?'
r' +(?!.*DNA|.*RNA)([A-Z]{3})'
r' +([0-9]{2}-[A-Z]{3}-[0-9]{4})')
matches = re.match(pattern, line)
try:
res = dict(zip(
['locus_name', 'size', 'unit', 'mol_type',
'shape', 'division', 'date'],
matches.groups()))
except Exception:
raise GenBankFormatError(
"Could not parse the LOCUS line:\n%s" % line)
res['size'] = int(res['size'])
return res
def _serialize_locus(header, obj, indent=12):
'''Serialize LOCUS line.
Parameters
----------
obj : dict
'''
# use 'or' to convert None to ''
kwargs = {k: v or '' for k, v in obj.items()}
return ('{header:<{indent}}{locus_name} {size} {unit}'
' {mol_type} {shape} {division} {date}\n').format(
header=header, indent=indent, **kwargs)
def _parse_reference(lines):
'''Parse single REFERENCE field.
'''
res = {}
# magic number 11: the non keyworded lines in REFERENCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
for section in section_splitter(lines):
label, data = _parse_section_default(
section, join_delimiter=' ', return_label=True)
res[label] = data
return res
def _serialize_reference(header, obj, indent=12):
'''Serialize REFERENCE.
Parameters
----------
obj : list
'''
padding = ' '
sort_order = {'REFERENCE': 0, 'AUTHORS': 1,
'TITLE': 2, 'JOURNAL': 3, 'PUBMED': 4}
for obj_i in obj:
ref_i = []
for h in sorted(obj_i, key=lambda k: sort_order.get(k, 100)):
if h == header:
s = '{h:<{indent}}{ref}'.format(
h=h, indent=indent, ref=obj_i[h])
else:
s = '{h:<{indent}}{value}'.format(
h=padding + h, indent=indent, value=obj_i[h])
ref_i.append(s)
yield '%s\n' % '\n'.join(ref_i)
def _parse_source(lines):
'''Parse SOURCE field.
'''
res = {}
# magic number 11: the non keyworded lines in SOURCE
# are at least indented with 11 spaces.
feature_indent = ' ' * 11
section_splitter = _yield_section(
lambda x: not x.startswith(feature_indent),
skip_blanks=True, strip=False)
# SOURCE line is not informative; skip it
_, organism = list(section_splitter(lines))
res['ORGANISM'] = organism[0].split(None, 1)[1].strip()
res['taxonomy'] = ' '.join([i.strip() for i in organism[1:]])
return res
def _serialize_source(header, obj, indent=12):
'''Serialize SOURCE.
Parameters
----------
obj : dict
'''
s = ('{header:<{indent}}{organism}\n'
'{h:<{indent}}{organism}\n'
'{space}{taxonomy}\n').format(
header=header, indent=indent,
h=' ORGANISM', organism=obj['ORGANISM'],
space=' ' * 12, taxonomy=obj['taxonomy'])
return s
def _parse_origin(lines):
'''Parse the ORIGIN section for sequence.
'''
sequence = []
for line in lines:
if line.startswith('ORIGIN'):
continue
# remove the number at the beg of each line
items = line.split()
sequence.append(''.join(items[1:]))
return ''.join(sequence)
def _serialize_origin(seq, indent=9):
'''Serialize seq to ORIGIN.
Parameters
----------
seq : str
'''
n = 1
line_size = 60
frag_size = 10
for i in range(0, len(seq), line_size):
line = seq[i:i+line_size]
s = '{n:>{indent}} {s}\n'.format(
n=n, indent=indent, s=chunk_str(line, frag_size, ' '))
if n == 1:
s = 'ORIGIN\n' + s
n = n + line_size
yield s
_PARSER_TABLE = {
'LOCUS': _parse_locus,
'SOURCE': _parse_source,
'REFERENCE': _parse_reference,
'FEATURES': _parse_feature_table,
'ORIGIN': _parse_origin}
_SERIALIZER_TABLE = {
'LOCUS': _serialize_locus,
'SOURCE': _serialize_source,
'REFERENCE': _serialize_reference,
'FEATURES': _serialize_feature_table} | 0.57093 | 0.143938 |
import pandas as pd
from skbio.io import create_format
taxdump = create_format('taxdump')
_taxdump_column_schemes = {
'nodes_slim': {
'tax_id': int,
'parent_tax_id': int,
'rank': str
},
'nodes': {
'tax_id': int,
'parent_tax_id': int,
'rank': str,
'embl_code': str,
'division_id': int,
'inherited_div_flag': bool,
'genetic_code_id': int,
'inherited_GC_flag': bool,
'mitochondrial_genetic_code_id': int,
'inherited_MGC_flag': bool,
'GenBank_hidden_flag': bool,
'hidden_subtree_root_flag': bool,
'comments': str
},
'names': {
'tax_id': int,
'name_txt': str,
'unique_name': str,
'name_class': str
},
'division': {
'division_id': int,
'division_cde': str,
'division_name': str,
'comments': str
},
'gencode': {
'genetic_code_id': int,
'abbreviation': str,
'name': str,
'cde': str,
'starts': str
}
}
_taxdump_column_schemes['nodes_new'] = dict(
_taxdump_column_schemes['nodes'], **{
'plastid_genetic_code_id': bool,
'inherited_PGC_flag': bool,
'specified_species': bool,
'hydrogenosome_genetic_code_id': int,
'inherited_HGC_flag': bool
})
@taxdump.reader(pd.DataFrame, monkey_patch=False)
def _taxdump_to_data_frame(fh, scheme):
'''Read a taxdump file into a data frame.
Parameters
----------
fh : file handle
Input taxdump file
scheme : str
Name of column scheme
Returns
-------
pd.DataFrame
Parsed table
'''
if isinstance(scheme, str):
if scheme not in _taxdump_column_schemes:
raise ValueError(f'Invalid taxdump column scheme: "{scheme}".')
scheme = _taxdump_column_schemes[scheme]
names = list(scheme.keys())
try:
return pd.read_csv(
fh, sep='\t\\|(?:\t|$)', engine='python', index_col=0,
names=names, dtype=scheme, usecols=range(len(names)))
except ValueError:
raise ValueError('Invalid taxdump file format.') | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/taxdump.py | taxdump.py |
import pandas as pd
from skbio.io import create_format
taxdump = create_format('taxdump')
_taxdump_column_schemes = {
'nodes_slim': {
'tax_id': int,
'parent_tax_id': int,
'rank': str
},
'nodes': {
'tax_id': int,
'parent_tax_id': int,
'rank': str,
'embl_code': str,
'division_id': int,
'inherited_div_flag': bool,
'genetic_code_id': int,
'inherited_GC_flag': bool,
'mitochondrial_genetic_code_id': int,
'inherited_MGC_flag': bool,
'GenBank_hidden_flag': bool,
'hidden_subtree_root_flag': bool,
'comments': str
},
'names': {
'tax_id': int,
'name_txt': str,
'unique_name': str,
'name_class': str
},
'division': {
'division_id': int,
'division_cde': str,
'division_name': str,
'comments': str
},
'gencode': {
'genetic_code_id': int,
'abbreviation': str,
'name': str,
'cde': str,
'starts': str
}
}
_taxdump_column_schemes['nodes_new'] = dict(
_taxdump_column_schemes['nodes'], **{
'plastid_genetic_code_id': bool,
'inherited_PGC_flag': bool,
'specified_species': bool,
'hydrogenosome_genetic_code_id': int,
'inherited_HGC_flag': bool
})
@taxdump.reader(pd.DataFrame, monkey_patch=False)
def _taxdump_to_data_frame(fh, scheme):
'''Read a taxdump file into a data frame.
Parameters
----------
fh : file handle
Input taxdump file
scheme : str
Name of column scheme
Returns
-------
pd.DataFrame
Parsed table
'''
if isinstance(scheme, str):
if scheme not in _taxdump_column_schemes:
raise ValueError(f'Invalid taxdump column scheme: "{scheme}".')
scheme = _taxdump_column_schemes[scheme]
names = list(scheme.keys())
try:
return pd.read_csv(
fh, sep='\t\\|(?:\t|$)', engine='python', index_col=0,
names=names, dtype=scheme, usecols=range(len(names)))
except ValueError:
raise ValueError('Invalid taxdump file format.') | 0.636353 | 0.139396 |
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.frombuffer(qual_str.encode('ascii'),
dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise ValueError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual,
lowercase=None):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
if 'id' in seq.metadata:
id_ = '%s' % seq.metadata['id']
else:
id_ = ''
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
if 'description' in seq.metadata:
desc = '%s' % seq.metadata['description']
else:
desc = ''
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and 'quality' not in seq.positional_metadata:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
qual = None
if 'quality' in seq.positional_metadata:
qual = seq.positional_metadata['quality'].values
if lowercase is not None:
seq_str = seq.lowercase(lowercase)
else:
seq_str = str(seq)
yield header, "%s" % seq_str, qual
def _line_generator(fh, skip_blanks=False, strip=True):
for line in fh:
if strip:
line = line.strip()
skip = False
if skip_blanks:
skip = line.isspace() or not line
if not skip:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/_base.py | _base.py |
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.frombuffer(qual_str.encode('ascii'),
dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise ValueError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual,
lowercase=None):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
if 'id' in seq.metadata:
id_ = '%s' % seq.metadata['id']
else:
id_ = ''
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
if 'description' in seq.metadata:
desc = '%s' % seq.metadata['description']
else:
desc = ''
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and 'quality' not in seq.positional_metadata:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
qual = None
if 'quality' in seq.positional_metadata:
qual = seq.positional_metadata['quality'].values
if lowercase is not None:
seq_str = seq.lowercase(lowercase)
else:
seq_str = str(seq)
yield header, "%s" % seq_str, qual
def _line_generator(fh, skip_blanks=False, strip=True):
for line in fh:
if strip:
line = line.strip()
skip = False
if skip_blanks:
skip = line.isspace() or not line
if not skip:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many | 0.601828 | 0.3068 |
import functools
import contextlib
import pandas as pd
_possible_columns = {'qseqid': str, 'qgi': float, 'qacc': str, 'qaccver': str,
'qlen': float, 'sseqid': str, 'sallseqid': str,
'sgi': float, 'sallgi': float, 'sacc': str,
'saccver': str, 'sallacc': str, 'slen': float,
'qstart': float, 'qend': float, 'sstart': float,
'send': float, 'qseq': str, 'sseq': str,
'evalue': float, 'bitscore': float, 'score': float,
'length': float, 'pident': float, 'nident': float,
'mismatch': float, 'positive': float, 'gapopen': float,
'gaps': float, 'ppos': float, 'frames': str,
'qframe': float, 'sframe': float, 'btop': float,
'staxids': str, 'sscinames': str, 'scomnames': str,
'sblastnames': str, 'sskingdoms': str, 'stitle': str,
'salltitles': str, 'sstrand': str, 'qcovs': float,
'qcovhsp': float}
def _parse_blast_data(fh, columns, error, error_message, comment=None,
skiprows=None):
read_csv = functools.partial(pd.read_csv, na_values='N/A', sep='\t',
header=None, keep_default_na=False,
comment=comment, skiprows=skiprows)
# HACK for https://github.com/pandas-dev/pandas/issues/14418
# this avoids closing the `fh`, whose lifetime isn't the responsibility
# of this parser
with _noop_close(fh) as fh:
lineone = read_csv(fh, nrows=1)
if len(lineone.columns) != len(columns):
raise error(error_message % (len(columns), len(lineone.columns)))
fh.seek(0)
return read_csv(fh, names=columns, dtype=_possible_columns)
# HACK for https://github.com/pandas-dev/pandas/issues/14418
@contextlib.contextmanager
def _noop_close(fh):
backup = fh.close
fh.close = lambda: None
try:
yield fh
finally:
fh.close = backup | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/_blast.py | _blast.py |
import functools
import contextlib
import pandas as pd
_possible_columns = {'qseqid': str, 'qgi': float, 'qacc': str, 'qaccver': str,
'qlen': float, 'sseqid': str, 'sallseqid': str,
'sgi': float, 'sallgi': float, 'sacc': str,
'saccver': str, 'sallacc': str, 'slen': float,
'qstart': float, 'qend': float, 'sstart': float,
'send': float, 'qseq': str, 'sseq': str,
'evalue': float, 'bitscore': float, 'score': float,
'length': float, 'pident': float, 'nident': float,
'mismatch': float, 'positive': float, 'gapopen': float,
'gaps': float, 'ppos': float, 'frames': str,
'qframe': float, 'sframe': float, 'btop': float,
'staxids': str, 'sscinames': str, 'scomnames': str,
'sblastnames': str, 'sskingdoms': str, 'stitle': str,
'salltitles': str, 'sstrand': str, 'qcovs': float,
'qcovhsp': float}
def _parse_blast_data(fh, columns, error, error_message, comment=None,
skiprows=None):
read_csv = functools.partial(pd.read_csv, na_values='N/A', sep='\t',
header=None, keep_default_na=False,
comment=comment, skiprows=skiprows)
# HACK for https://github.com/pandas-dev/pandas/issues/14418
# this avoids closing the `fh`, whose lifetime isn't the responsibility
# of this parser
with _noop_close(fh) as fh:
lineone = read_csv(fh, nrows=1)
if len(lineone.columns) != len(columns):
raise error(error_message % (len(columns), len(lineone.columns)))
fh.seek(0)
return read_csv(fh, names=columns, dtype=_possible_columns)
# HACK for https://github.com/pandas-dev/pandas/issues/14418
@contextlib.contextmanager
def _noop_close(fh):
backup = fh.close
fh.close = lambda: None
try:
yield fh
finally:
fh.close = backup | 0.616936 | 0.250411 |
import csv
import numpy as np
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
from skbio.io import create_format, LSMatFormatError
lsmat = create_format('lsmat')
@lsmat.sniffer()
def _lsmat_sniffer(fh):
header = _find_header(fh)
if header is not None:
try:
dialect = csv.Sniffer().sniff(header)
delimiter = dialect.delimiter
ids = _parse_header(header, delimiter)
first_id, _ = next(_parse_data(fh, delimiter), (None, None))
if first_id is not None and first_id == ids[0]:
return True, {'delimiter': delimiter}
except (csv.Error, LSMatFormatError):
pass
return False, {}
@lsmat.reader(DissimilarityMatrix)
def _lsmat_to_dissimilarity_matrix(fh, delimiter='\t'):
return _lsmat_to_matrix(DissimilarityMatrix, fh, delimiter)
@lsmat.reader(DistanceMatrix)
def _lsmat_to_distance_matrix(fh, delimiter='\t'):
return _lsmat_to_matrix(DistanceMatrix, fh, delimiter)
@lsmat.writer(DissimilarityMatrix)
def _dissimilarity_matrix_to_lsmat(obj, fh, delimiter='\t'):
_matrix_to_lsmat(obj, fh, delimiter)
@lsmat.writer(DistanceMatrix)
def _distance_matrix_to_lsmat(obj, fh, delimiter='\t'):
_matrix_to_lsmat(obj, fh, delimiter)
def _lsmat_to_matrix(cls, fh, delimiter):
# We aren't using np.loadtxt because it uses *way* too much memory
# (e.g, a 2GB matrix eats up 10GB, which then isn't freed after parsing
# has finished). See:
# http://mail.scipy.org/pipermail/numpy-tickets/2012-August/006749.html
# Strategy:
# - find the header
# - initialize an empty ndarray
# - for each row of data in the input file:
# - populate the corresponding row in the ndarray with floats
header = _find_header(fh)
if header is None:
raise LSMatFormatError(
"Could not find a header line containing IDs in the "
"dissimilarity matrix file. Please verify that the file is "
"not empty.")
ids = _parse_header(header, delimiter)
num_ids = len(ids)
data = np.empty((num_ids, num_ids), dtype=np.float64)
row_idx = -1
for row_idx, (row_id, row_data) in enumerate(_parse_data(fh, delimiter)):
if row_idx >= num_ids:
# We've hit a nonempty line after we already filled the data
# matrix. Raise an error because we shouldn't ignore extra data.
raise LSMatFormatError(
"Encountered extra row(s) without corresponding IDs in "
"the header.")
num_vals = len(row_data)
if num_vals != num_ids:
raise LSMatFormatError(
"There are %d value(s) in row %d, which is not equal to the "
"number of ID(s) in the header (%d)." %
(num_vals, row_idx + 1, num_ids))
expected_id = ids[row_idx]
if row_id == expected_id:
data[row_idx, :] = np.asarray(row_data, dtype=float)
else:
raise LSMatFormatError(
"Encountered mismatched IDs while parsing the "
"dissimilarity matrix file. Found %r but expected "
"%r. Please ensure that the IDs match between the "
"dissimilarity matrix header (first row) and the row "
"labels (first column)." % (str(row_id), str(expected_id)))
if row_idx != num_ids - 1:
raise LSMatFormatError("Expected %d row(s) of data, but found %d." %
(num_ids, row_idx + 1))
return cls(data, ids)
def _find_header(fh):
header = None
for line in fh:
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith('#'):
# Don't strip the header because the first delimiter might be
# whitespace (e.g., tab).
header = line
break
return header
def _parse_header(header, delimiter):
tokens = header.rstrip().split(delimiter)
if tokens[0]:
raise LSMatFormatError(
"Header must start with delimiter %r." % str(delimiter))
return [e.strip() for e in tokens[1:]]
def _parse_data(fh, delimiter):
for line in fh:
stripped_line = line.strip()
if not stripped_line:
continue
tokens = line.rstrip().split(delimiter)
id_ = tokens[0].strip()
yield id_, tokens[1:]
def _matrix_to_lsmat(obj, fh, delimiter):
delimiter = "%s" % delimiter
ids = obj.ids
fh.write(_format_ids(ids, delimiter))
fh.write('\n')
for id_, vals in zip(ids, obj.data):
fh.write("%s" % id_)
fh.write(delimiter)
fh.write(delimiter.join(np.asarray(vals, dtype=str)))
fh.write('\n')
def _format_ids(ids, delimiter):
return delimiter.join([''] + list(ids)) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/lsmat.py | lsmat.py |
import csv
import numpy as np
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
from skbio.io import create_format, LSMatFormatError
lsmat = create_format('lsmat')
@lsmat.sniffer()
def _lsmat_sniffer(fh):
header = _find_header(fh)
if header is not None:
try:
dialect = csv.Sniffer().sniff(header)
delimiter = dialect.delimiter
ids = _parse_header(header, delimiter)
first_id, _ = next(_parse_data(fh, delimiter), (None, None))
if first_id is not None and first_id == ids[0]:
return True, {'delimiter': delimiter}
except (csv.Error, LSMatFormatError):
pass
return False, {}
@lsmat.reader(DissimilarityMatrix)
def _lsmat_to_dissimilarity_matrix(fh, delimiter='\t'):
return _lsmat_to_matrix(DissimilarityMatrix, fh, delimiter)
@lsmat.reader(DistanceMatrix)
def _lsmat_to_distance_matrix(fh, delimiter='\t'):
return _lsmat_to_matrix(DistanceMatrix, fh, delimiter)
@lsmat.writer(DissimilarityMatrix)
def _dissimilarity_matrix_to_lsmat(obj, fh, delimiter='\t'):
_matrix_to_lsmat(obj, fh, delimiter)
@lsmat.writer(DistanceMatrix)
def _distance_matrix_to_lsmat(obj, fh, delimiter='\t'):
_matrix_to_lsmat(obj, fh, delimiter)
def _lsmat_to_matrix(cls, fh, delimiter):
# We aren't using np.loadtxt because it uses *way* too much memory
# (e.g, a 2GB matrix eats up 10GB, which then isn't freed after parsing
# has finished). See:
# http://mail.scipy.org/pipermail/numpy-tickets/2012-August/006749.html
# Strategy:
# - find the header
# - initialize an empty ndarray
# - for each row of data in the input file:
# - populate the corresponding row in the ndarray with floats
header = _find_header(fh)
if header is None:
raise LSMatFormatError(
"Could not find a header line containing IDs in the "
"dissimilarity matrix file. Please verify that the file is "
"not empty.")
ids = _parse_header(header, delimiter)
num_ids = len(ids)
data = np.empty((num_ids, num_ids), dtype=np.float64)
row_idx = -1
for row_idx, (row_id, row_data) in enumerate(_parse_data(fh, delimiter)):
if row_idx >= num_ids:
# We've hit a nonempty line after we already filled the data
# matrix. Raise an error because we shouldn't ignore extra data.
raise LSMatFormatError(
"Encountered extra row(s) without corresponding IDs in "
"the header.")
num_vals = len(row_data)
if num_vals != num_ids:
raise LSMatFormatError(
"There are %d value(s) in row %d, which is not equal to the "
"number of ID(s) in the header (%d)." %
(num_vals, row_idx + 1, num_ids))
expected_id = ids[row_idx]
if row_id == expected_id:
data[row_idx, :] = np.asarray(row_data, dtype=float)
else:
raise LSMatFormatError(
"Encountered mismatched IDs while parsing the "
"dissimilarity matrix file. Found %r but expected "
"%r. Please ensure that the IDs match between the "
"dissimilarity matrix header (first row) and the row "
"labels (first column)." % (str(row_id), str(expected_id)))
if row_idx != num_ids - 1:
raise LSMatFormatError("Expected %d row(s) of data, but found %d." %
(num_ids, row_idx + 1))
return cls(data, ids)
def _find_header(fh):
header = None
for line in fh:
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith('#'):
# Don't strip the header because the first delimiter might be
# whitespace (e.g., tab).
header = line
break
return header
def _parse_header(header, delimiter):
tokens = header.rstrip().split(delimiter)
if tokens[0]:
raise LSMatFormatError(
"Header must start with delimiter %r." % str(delimiter))
return [e.strip() for e in tokens[1:]]
def _parse_data(fh, delimiter):
for line in fh:
stripped_line = line.strip()
if not stripped_line:
continue
tokens = line.rstrip().split(delimiter)
id_ = tokens[0].strip()
yield id_, tokens[1:]
def _matrix_to_lsmat(obj, fh, delimiter):
delimiter = "%s" % delimiter
ids = obj.ids
fh.write(_format_ids(ids, delimiter))
fh.write('\n')
for id_, vals in zip(ids, obj.data):
fh.write("%s" % id_)
fh.write(delimiter)
fh.write(delimiter.join(np.asarray(vals, dtype=str)))
fh.write('\n')
def _format_ids(ids, delimiter):
return delimiter.join([''] + list(ids)) | 0.541409 | 0.54825 |
from skbio.alignment import TabularMSA
from skbio.io import create_format, PhylipFormatError
from skbio.util._misc import chunk_str
phylip = create_format('phylip')
@phylip.sniffer()
def _phylip_sniffer(fh):
# Strategy:
# Read the header and a single sequence; verify that the sequence length
# matches the header information. Do not verify that the total number of
# lines matches the header information, since that would require reading
# the whole file.
try:
header = next(_line_generator(fh))
_, seq_len = _validate_header(header)
line = next(_line_generator(fh))
_validate_line(line, seq_len)
except (StopIteration, PhylipFormatError):
return False, {}
return True, {}
@phylip.reader(TabularMSA)
def _phylip_to_tabular_msa(fh, constructor=None):
if constructor is None:
raise ValueError("Must provide `constructor`.")
seqs = []
index = []
for seq, ID in _parse_phylip_raw(fh):
seqs.append(constructor(seq))
index.append(ID)
return TabularMSA(seqs, index=index)
@phylip.writer(TabularMSA)
def _tabular_msa_to_phylip(obj, fh):
sequence_count = obj.shape.sequence
if sequence_count < 1:
raise PhylipFormatError(
"TabularMSA can only be written in PHYLIP format if there is at "
"least one sequence in the alignment.")
sequence_length = obj.shape.position
if sequence_length < 1:
raise PhylipFormatError(
"TabularMSA can only be written in PHYLIP format if there is at "
"least one position in the alignment.")
chunk_size = 10
labels = [str(label) for label in obj.index]
for label in labels:
if len(label) > chunk_size:
raise PhylipFormatError(
"``TabularMSA`` can only be written in PHYLIP format if all "
"sequence index labels have %d or fewer characters. Found "
"sequence with index label '%s' that exceeds this limit. Use "
"``TabularMSA.reassign_index`` to assign shorter index labels."
% (chunk_size, label))
fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
fmt = '{0:%d}{1}\n' % chunk_size
for label, seq in zip(labels, obj):
chunked_seq = chunk_str(str(seq), chunk_size, ' ')
fh.write(fmt.format(label, chunked_seq))
def _validate_header(header):
header_vals = header.split()
try:
n_seqs, seq_len = [int(x) for x in header_vals]
if n_seqs < 1 or seq_len < 1:
raise PhylipFormatError(
'The number of sequences and the length must be positive.')
except ValueError:
raise PhylipFormatError(
'Found non-header line when attempting to read the 1st record '
'(header line should have two space-separated integers): '
'"%s"' % header)
return n_seqs, seq_len
def _validate_line(line, seq_len):
if not line:
raise PhylipFormatError("Empty lines are not allowed.")
ID = line[:10].strip()
seq = line[10:].replace(' ', '')
if len(seq) != seq_len:
raise PhylipFormatError(
"The length of sequence %s is not %s as specified in the header."
% (ID, seq_len))
return (seq, ID)
def _parse_phylip_raw(fh):
"""Raw parser for PHYLIP files.
Returns a list of raw (seq, id) values. It is the responsibility of the
caller to construct the correct in-memory object to hold the data.
"""
# Note: this returns the full data instead of yielding each sequence,
# because the header specifies the number of sequences, so the file cannot
# be validated until it's read completely.
# File should have a single header on the first line.
try:
header = next(_line_generator(fh))
except StopIteration:
raise PhylipFormatError("This file is empty.")
n_seqs, seq_len = _validate_header(header)
# All following lines should be ID+sequence. No blank lines are allowed.
data = []
for line in _line_generator(fh):
data.append(_validate_line(line, seq_len))
if len(data) != n_seqs:
raise PhylipFormatError(
"The number of sequences is not %s " % n_seqs +
"as specified in the header.")
return data
def _line_generator(fh):
"""Just remove linebreak characters and yield lines.
"""
for line in fh:
yield line.rstrip('\n') | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/phylip.py | phylip.py |
from skbio.alignment import TabularMSA
from skbio.io import create_format, PhylipFormatError
from skbio.util._misc import chunk_str
phylip = create_format('phylip')
@phylip.sniffer()
def _phylip_sniffer(fh):
# Strategy:
# Read the header and a single sequence; verify that the sequence length
# matches the header information. Do not verify that the total number of
# lines matches the header information, since that would require reading
# the whole file.
try:
header = next(_line_generator(fh))
_, seq_len = _validate_header(header)
line = next(_line_generator(fh))
_validate_line(line, seq_len)
except (StopIteration, PhylipFormatError):
return False, {}
return True, {}
@phylip.reader(TabularMSA)
def _phylip_to_tabular_msa(fh, constructor=None):
if constructor is None:
raise ValueError("Must provide `constructor`.")
seqs = []
index = []
for seq, ID in _parse_phylip_raw(fh):
seqs.append(constructor(seq))
index.append(ID)
return TabularMSA(seqs, index=index)
@phylip.writer(TabularMSA)
def _tabular_msa_to_phylip(obj, fh):
sequence_count = obj.shape.sequence
if sequence_count < 1:
raise PhylipFormatError(
"TabularMSA can only be written in PHYLIP format if there is at "
"least one sequence in the alignment.")
sequence_length = obj.shape.position
if sequence_length < 1:
raise PhylipFormatError(
"TabularMSA can only be written in PHYLIP format if there is at "
"least one position in the alignment.")
chunk_size = 10
labels = [str(label) for label in obj.index]
for label in labels:
if len(label) > chunk_size:
raise PhylipFormatError(
"``TabularMSA`` can only be written in PHYLIP format if all "
"sequence index labels have %d or fewer characters. Found "
"sequence with index label '%s' that exceeds this limit. Use "
"``TabularMSA.reassign_index`` to assign shorter index labels."
% (chunk_size, label))
fh.write('{0:d} {1:d}\n'.format(sequence_count, sequence_length))
fmt = '{0:%d}{1}\n' % chunk_size
for label, seq in zip(labels, obj):
chunked_seq = chunk_str(str(seq), chunk_size, ' ')
fh.write(fmt.format(label, chunked_seq))
def _validate_header(header):
header_vals = header.split()
try:
n_seqs, seq_len = [int(x) for x in header_vals]
if n_seqs < 1 or seq_len < 1:
raise PhylipFormatError(
'The number of sequences and the length must be positive.')
except ValueError:
raise PhylipFormatError(
'Found non-header line when attempting to read the 1st record '
'(header line should have two space-separated integers): '
'"%s"' % header)
return n_seqs, seq_len
def _validate_line(line, seq_len):
if not line:
raise PhylipFormatError("Empty lines are not allowed.")
ID = line[:10].strip()
seq = line[10:].replace(' ', '')
if len(seq) != seq_len:
raise PhylipFormatError(
"The length of sequence %s is not %s as specified in the header."
% (ID, seq_len))
return (seq, ID)
def _parse_phylip_raw(fh):
"""Raw parser for PHYLIP files.
Returns a list of raw (seq, id) values. It is the responsibility of the
caller to construct the correct in-memory object to hold the data.
"""
# Note: this returns the full data instead of yielding each sequence,
# because the header specifies the number of sequences, so the file cannot
# be validated until it's read completely.
# File should have a single header on the first line.
try:
header = next(_line_generator(fh))
except StopIteration:
raise PhylipFormatError("This file is empty.")
n_seqs, seq_len = _validate_header(header)
# All following lines should be ID+sequence. No blank lines are allowed.
data = []
for line in _line_generator(fh):
data.append(_validate_line(line, seq_len))
if len(data) != n_seqs:
raise PhylipFormatError(
"The number of sequences is not %s " % n_seqs +
"as specified in the header.")
return data
def _line_generator(fh):
"""Just remove linebreak characters and yield lines.
"""
for line in fh:
yield line.rstrip('\n') | 0.711331 | 0.440229 |
import pandas as pd
from skbio.io import create_format, BLAST7FormatError
from skbio.io.format._blast import _parse_blast_data
blast7 = create_format('blast+7')
column_converter = {'query id': 'qseqid', 'query gi': 'qgi',
'query acc.': 'qacc', 'query acc.ver': 'qaccver',
'query length': 'qlen', 'subject id': 'sseqid',
'subject ids': 'sallseqid', 'subject gi': 'sgi',
'subject gis': 'sallgi', 'subject acc.': 'sacc',
'subject acc.ver': 'saccver', 'subject accs.': 'sallacc',
'subject length': 'slen', 'q. start': 'qstart',
'q. end': 'qend', 's. start': 'sstart', 's. end': 'send',
'query seq': 'qseq', 'subject seq': 'sseq',
'evalue': 'evalue', 'bit score': 'bitscore',
'score': 'score', 'alignment length': 'length',
'% identity': 'pident', 'identical': 'nident',
'mismatches': 'mismatch', 'positives': 'positive',
'gap opens': 'gapopen', 'gaps': 'gaps',
'% positives': 'ppos', 'query/sbjct frames': 'frames',
'query frame': 'qframe', 'sbjct frame': 'sframe',
'BTOP': 'btop', 'subject tax ids': 'staxids',
'subject sci names': 'sscinames',
'subject com names': 'scomnames',
'subject blast names': 'sblastnames',
'subject super kingdoms': 'sskingdoms',
'subject title': 'stitle', 'subject titles': 'salltitles',
'subject strand': 'sstrand',
'% query coverage per subject': 'qcovs',
'% query coverage per hsp': 'qcovhsp',
'Query id': 'qseqid', 'Subject id': 'sseqid',
'gap openings': 'gapopen', 'e-value': 'evalue'}
@blast7.sniffer()
def _blast7_sniffer(fh):
# Smells a BLAST+7 file if the following conditions are present
# -First line contains "BLAST"
# -Second line contains "Query" or "Database"
# -Third line starts with "Subject" or "Query" or "Database"
lines = [line for _, line in zip(range(3), fh)]
if len(lines) < 3:
return False, {}
if not lines[0].startswith("# BLAST"):
return False, {}
if not (lines[1].startswith("# Query:") or
lines[1].startswith("# Database:")):
return False, {}
if not (lines[2].startswith("# Subject:") or
lines[2].startswith("# Query:") or
lines[2].startswith("# Database:")):
return False, {}
return True, {}
@blast7.reader(pd.DataFrame, monkey_patch=False)
def _blast7_to_data_frame(fh):
line_num = 0
columns = None
skiprows = []
for line in fh:
if line == "# Fields: \n":
# Identifies Legacy BLAST 9 data
line = next(fh)
line_num += 1
if columns is None:
columns = _parse_fields(line, legacy=True)
skiprows.append(line_num)
else:
next_columns = _parse_fields(line, legacy=True)
if columns != next_columns:
raise BLAST7FormatError("Fields %r do not equal fields %r"
% (columns, next_columns))
skiprows.append(line_num)
elif line.startswith("# Fields: "):
# Identifies BLAST+7 data
if columns is None:
columns = _parse_fields(line)
else:
# Affirms data types do not differ throught file
next_columns = _parse_fields(line)
if columns != next_columns:
raise BLAST7FormatError("Fields %r do not equal fields %r"
% (columns, next_columns))
line_num += 1
if columns is None:
# Affirms file contains BLAST data
raise BLAST7FormatError("File contains no BLAST data.")
fh.seek(0)
return _parse_blast_data(fh, columns, BLAST7FormatError,
"Number of fields (%r) does not equal number"
" of data columns (%r).", comment='#',
skiprows=skiprows)
def _parse_fields(line, legacy=False):
"""Removes '\n' from fields line and returns fields as a list (columns)."""
line = line.rstrip('\n')
if legacy:
fields = line.split(',')
else:
line = line.split('# Fields: ')[1]
fields = line.split(', ')
columns = []
for field in fields:
if field not in column_converter:
raise BLAST7FormatError("Unrecognized field (%r)."
" Supported fields: %r"
% (field,
set(column_converter.keys())))
columns.append(column_converter[field])
return columns | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/blast7.py | blast7.py |
import pandas as pd
from skbio.io import create_format, BLAST7FormatError
from skbio.io.format._blast import _parse_blast_data
blast7 = create_format('blast+7')
column_converter = {'query id': 'qseqid', 'query gi': 'qgi',
'query acc.': 'qacc', 'query acc.ver': 'qaccver',
'query length': 'qlen', 'subject id': 'sseqid',
'subject ids': 'sallseqid', 'subject gi': 'sgi',
'subject gis': 'sallgi', 'subject acc.': 'sacc',
'subject acc.ver': 'saccver', 'subject accs.': 'sallacc',
'subject length': 'slen', 'q. start': 'qstart',
'q. end': 'qend', 's. start': 'sstart', 's. end': 'send',
'query seq': 'qseq', 'subject seq': 'sseq',
'evalue': 'evalue', 'bit score': 'bitscore',
'score': 'score', 'alignment length': 'length',
'% identity': 'pident', 'identical': 'nident',
'mismatches': 'mismatch', 'positives': 'positive',
'gap opens': 'gapopen', 'gaps': 'gaps',
'% positives': 'ppos', 'query/sbjct frames': 'frames',
'query frame': 'qframe', 'sbjct frame': 'sframe',
'BTOP': 'btop', 'subject tax ids': 'staxids',
'subject sci names': 'sscinames',
'subject com names': 'scomnames',
'subject blast names': 'sblastnames',
'subject super kingdoms': 'sskingdoms',
'subject title': 'stitle', 'subject titles': 'salltitles',
'subject strand': 'sstrand',
'% query coverage per subject': 'qcovs',
'% query coverage per hsp': 'qcovhsp',
'Query id': 'qseqid', 'Subject id': 'sseqid',
'gap openings': 'gapopen', 'e-value': 'evalue'}
@blast7.sniffer()
def _blast7_sniffer(fh):
# Smells a BLAST+7 file if the following conditions are present
# -First line contains "BLAST"
# -Second line contains "Query" or "Database"
# -Third line starts with "Subject" or "Query" or "Database"
lines = [line for _, line in zip(range(3), fh)]
if len(lines) < 3:
return False, {}
if not lines[0].startswith("# BLAST"):
return False, {}
if not (lines[1].startswith("# Query:") or
lines[1].startswith("# Database:")):
return False, {}
if not (lines[2].startswith("# Subject:") or
lines[2].startswith("# Query:") or
lines[2].startswith("# Database:")):
return False, {}
return True, {}
@blast7.reader(pd.DataFrame, monkey_patch=False)
def _blast7_to_data_frame(fh):
line_num = 0
columns = None
skiprows = []
for line in fh:
if line == "# Fields: \n":
# Identifies Legacy BLAST 9 data
line = next(fh)
line_num += 1
if columns is None:
columns = _parse_fields(line, legacy=True)
skiprows.append(line_num)
else:
next_columns = _parse_fields(line, legacy=True)
if columns != next_columns:
raise BLAST7FormatError("Fields %r do not equal fields %r"
% (columns, next_columns))
skiprows.append(line_num)
elif line.startswith("# Fields: "):
# Identifies BLAST+7 data
if columns is None:
columns = _parse_fields(line)
else:
# Affirms data types do not differ throught file
next_columns = _parse_fields(line)
if columns != next_columns:
raise BLAST7FormatError("Fields %r do not equal fields %r"
% (columns, next_columns))
line_num += 1
if columns is None:
# Affirms file contains BLAST data
raise BLAST7FormatError("File contains no BLAST data.")
fh.seek(0)
return _parse_blast_data(fh, columns, BLAST7FormatError,
"Number of fields (%r) does not equal number"
" of data columns (%r).", comment='#',
skiprows=skiprows)
def _parse_fields(line, legacy=False):
"""Removes '\n' from fields line and returns fields as a list (columns)."""
line = line.rstrip('\n')
if legacy:
fields = line.split(',')
else:
line = line.split('# Fields: ')[1]
fields = line.split(', ')
columns = []
for field in fields:
if field not in column_converter:
raise BLAST7FormatError("Unrecognized field (%r)."
" Supported fields: %r"
% (field,
set(column_converter.keys())))
columns.append(column_converter[field])
return columns | 0.584983 | 0.191517 |
import h5py
from skbio.io import create_format
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
binary_dm = create_format('binary_dm', encoding='binary')
_vlen_dtype = h5py.special_dtype(vlen=str)
@binary_dm.sniffer()
def _binary_dm_sniffer(fh):
try:
f = h5py.File(fh, 'r')
except OSError:
return False, {}
header = _get_header(f)
if header is None:
return False, {}
ids = f.get('order')
if ids is None:
return False, {}
mat = f.get('matrix')
if mat is None:
return False, {}
n = len(ids)
if mat.shape != (n, n):
return False, {}
return True, {}
@binary_dm.reader(DissimilarityMatrix)
def _binary_dm_to_dissimilarity(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.reader(DistanceMatrix)
def _binary_dm_to_distance(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.writer(DissimilarityMatrix)
def _dissimilarity_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
@binary_dm.writer(DistanceMatrix)
def _distance_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
def _h5py_mat_to_skbio_mat(cls, fh):
return cls(fh['matrix'], _parse_ids(fh['order']))
def _skbio_mat_to_h5py_mat(obj, fh):
_set_header(fh)
ids = fh.create_dataset('order', shape=(len(obj.ids), ), dtype=_vlen_dtype)
ids[:] = obj.ids
fh.create_dataset('matrix', data=obj.data)
def _get_header(fh):
format_ = fh.get('format')
version = fh.get('version')
if format is None or version is None:
return None
else:
return {'format': format_[0], 'version': version[0]}
def _parse_ids(ids):
if isinstance(ids[0], bytes):
return _bytes_decoder(ids)
else:
return _passthrough_decoder(ids)
def _verify_dimensions(fh):
if 'order' not in fh or 'matrix' not in fh:
return False
n = len(fh['order'])
return fh['matrix'].shape == (n, n)
def _bytes_decoder(x):
return [i.decode('utf8') for i in x]
def _passthrough_decoder(x):
return x
def _set_header(h5grp):
"""Set format spec header information"""
h5grp['format'] = [b'BDSM', ]
h5grp['version'] = [b'2020.06', ] | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/binary_dm.py | binary_dm.py |
import h5py
from skbio.io import create_format
from skbio.stats.distance import DissimilarityMatrix, DistanceMatrix
binary_dm = create_format('binary_dm', encoding='binary')
_vlen_dtype = h5py.special_dtype(vlen=str)
@binary_dm.sniffer()
def _binary_dm_sniffer(fh):
try:
f = h5py.File(fh, 'r')
except OSError:
return False, {}
header = _get_header(f)
if header is None:
return False, {}
ids = f.get('order')
if ids is None:
return False, {}
mat = f.get('matrix')
if mat is None:
return False, {}
n = len(ids)
if mat.shape != (n, n):
return False, {}
return True, {}
@binary_dm.reader(DissimilarityMatrix)
def _binary_dm_to_dissimilarity(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.reader(DistanceMatrix)
def _binary_dm_to_distance(fh):
return _h5py_mat_to_skbio_mat(fh)
@binary_dm.writer(DissimilarityMatrix)
def _dissimilarity_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
@binary_dm.writer(DistanceMatrix)
def _distance_to_binary_dm(obj, fh):
return _skbio_mat_to_h5py_mat(fh)
def _h5py_mat_to_skbio_mat(cls, fh):
return cls(fh['matrix'], _parse_ids(fh['order']))
def _skbio_mat_to_h5py_mat(obj, fh):
_set_header(fh)
ids = fh.create_dataset('order', shape=(len(obj.ids), ), dtype=_vlen_dtype)
ids[:] = obj.ids
fh.create_dataset('matrix', data=obj.data)
def _get_header(fh):
format_ = fh.get('format')
version = fh.get('version')
if format is None or version is None:
return None
else:
return {'format': format_[0], 'version': version[0]}
def _parse_ids(ids):
if isinstance(ids[0], bytes):
return _bytes_decoder(ids)
else:
return _passthrough_decoder(ids)
def _verify_dimensions(fh):
if 'order' not in fh or 'matrix' not in fh:
return False
n = len(fh['order'])
return fh['matrix'].shape == (n, n)
def _bytes_decoder(x):
return [i.decode('utf8') for i in x]
def _passthrough_decoder(x):
return x
def _set_header(h5grp):
"""Set format spec header information"""
h5grp['format'] = [b'BDSM', ]
h5grp['version'] = [b'2020.06', ] | 0.470737 | 0.310041 |
from collections import OrderedDict
from skbio.alignment import TabularMSA
from skbio.sequence._grammared_sequence import GrammaredSequence
from skbio.io import create_format, StockholmFormatError
stockholm = create_format('stockholm')
_REFERENCE_TAGS = frozenset({'RM', 'RT', 'RA', 'RL', 'RC'})
@stockholm.sniffer()
def _stockholm_sniffer(fh):
# Smells a Stockholm file if the following conditions are met:
# - File isn't empty
# - File contains correct header
try:
line = next(fh)
except StopIteration:
return False, {}
if _is_header(line):
return True, {}
return False, {}
@stockholm.reader(TabularMSA)
def _stockholm_to_tabular_msa(fh, constructor=None):
# Checks that user has passed required constructor parameter
if constructor is None:
raise ValueError("Must provide `constructor` parameter indicating the "
"type of sequences in the alignment. `constructor` "
"must be a subclass of `GrammaredSequence` "
"(e.g., `DNA`, `RNA`, `Protein`).")
# Checks that contructor parameter is supported
elif not issubclass(constructor, GrammaredSequence):
raise TypeError("`constructor` must be a subclass of "
"`GrammaredSequence`.")
# Checks that the file isn't empty
try:
line = next(fh)
except StopIteration:
raise StockholmFormatError("File is empty.")
# Checks that the file follows basic format (includes the required header)
if not _is_header(line):
raise StockholmFormatError("File missing required Stockholm header "
"line.")
msa_data = _MSAData()
for line in fh:
if line.isspace():
continue
line = line.rstrip('\n')
if _is_sequence_line(line):
seq_name, seq_data = _parse_sequence_line(line)
msa_data.add_sequence(seq_name, seq_data)
elif line.startswith("#=GF"):
feature_name, feature_data = _parse_gf_line(line)
msa_data.add_gf_metadata(feature_name, feature_data)
elif line.startswith("#=GS"):
seq_name, feature_name, feature_data = _parse_gs_line(line)
msa_data.add_gs_metadata(seq_name, feature_name, feature_data)
elif line.startswith("#=GR"):
seq_name, feature_name, feature_data = _parse_gr_line(line)
msa_data.add_gr_metadata(seq_name, feature_name, feature_data)
elif line.startswith('#=GC'):
feature_name, feature_data = _parse_gc_line(line)
msa_data.add_gc_metadata(feature_name, feature_data)
elif _is_footer(line):
break
else:
raise StockholmFormatError("Unrecognized line: %r" % line)
if not _is_footer(line):
raise StockholmFormatError('Final line does not conform to Stockholm '
'format. Must contain only "//".')
return msa_data.build_tabular_msa(constructor)
# For storing intermediate data used to construct a Sequence object.
class _MSAData:
def __init__(self):
self._seqs = {}
self._seq_order = []
self._metadata = OrderedDict()
self._positional_metadata = OrderedDict()
def add_sequence(self, seq_name, seq_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].seq = seq_data
self._seq_order.append(seq_name)
def add_gf_metadata(self, feature_name, feature_data):
# Handles first instance of labelled tree
if feature_name == 'TN' and 'NH' not in self._metadata:
self._metadata['NH'] = OrderedDict()
self._metadata['NH'][feature_data] = ''
# Handles second instance of labelled tree
elif feature_name == 'TN' and 'NH' in self._metadata:
if feature_data in self._metadata['NH']:
raise StockholmFormatError("Tree name %r used multiple times "
"in file." % feature_data)
self._metadata['NH'][feature_data] = ''
# Handles extra line(s) of an already created tree
elif feature_name == 'NH' and feature_name in self._metadata:
trees = self._metadata[feature_name]
if isinstance(trees, OrderedDict):
tree_id = next(reversed(trees))
self._metadata[feature_name][tree_id] = (trees[tree_id] +
feature_data)
else:
self._metadata[feature_name] = (self._metadata[feature_name] +
feature_data)
elif feature_name == 'RN':
if feature_name not in self._metadata:
self._metadata[feature_name] = [OrderedDict()]
else:
self._metadata[feature_name].append(OrderedDict())
elif feature_name in _REFERENCE_TAGS:
if 'RN' not in self._metadata:
raise StockholmFormatError("Expected 'RN' tag to precede "
"'%s' tag." % feature_name)
reference_dict = self._metadata['RN'][-1]
if feature_name not in reference_dict:
reference_dict[feature_name] = feature_data
else:
padding = _get_padding(reference_dict[feature_name])
reference_dict[feature_name] += padding + feature_data
elif feature_name in self._metadata:
padding = _get_padding(self._metadata[feature_name][-1])
self._metadata[feature_name] = (self._metadata[feature_name] +
padding + feature_data)
else:
self._metadata[feature_name] = feature_data
def add_gc_metadata(self, feature_name, feature_data):
if feature_name in self._positional_metadata:
_raise_duplicate_error("Found duplicate GC label %r."
% feature_name)
self._positional_metadata[feature_name] = feature_data
def add_gs_metadata(self, seq_name, feature_name, feature_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].add_metadata_feature(feature_name, feature_data)
def add_gr_metadata(self, seq_name, feature_name, feature_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].add_positional_metadata_feature(feature_name,
feature_data)
def build_tabular_msa(self, constructor):
if len(self._seqs) != len(self._seq_order):
invalid_seq_names = set(self._seqs) - set(self._seq_order)
raise StockholmFormatError('Found GS or GR metadata for '
'nonexistent sequence(s): %r'
% invalid_seq_names)
seqs = []
for seq_name in self._seq_order:
seqs.append(self._seqs[seq_name].build_sequence(constructor))
positional_metadata = self._positional_metadata
if not positional_metadata:
positional_metadata = None
metadata = self._metadata
if not metadata:
metadata = None
# Constructs TabularMSA
return TabularMSA(seqs, metadata=metadata,
positional_metadata=positional_metadata,
index=self._seq_order)
class _SeqData:
def __init__(self, name):
self.name = name
self._seq = None
self.metadata = None
self.positional_metadata = None
@property
def seq(self):
return self._seq
@seq.setter
def seq(self, seq):
if self._seq is None:
self._seq = seq
else:
_raise_duplicate_error("Found duplicate sequence name: %r"
% self.name)
def add_metadata_feature(self, feature_name, feature_data):
if self.metadata is None:
self.metadata = OrderedDict()
if feature_name in self.metadata:
padding = _get_padding(self.metadata[feature_name][-1])
self.metadata[feature_name] += padding + feature_data
else:
self.metadata[feature_name] = feature_data
def add_positional_metadata_feature(self, feature_name, feature_data):
if self.positional_metadata is None:
self.positional_metadata = OrderedDict()
if feature_name in self.positional_metadata:
_raise_duplicate_error("Found duplicate GR label %r associated "
"with sequence name %r"
% (feature_name, self.name))
else:
self.positional_metadata[feature_name] = feature_data
def build_sequence(self, constructor):
return constructor(self.seq, metadata=self.metadata,
positional_metadata=(self.positional_metadata))
def _parse_gf_line(line):
line = line.split(None, 2)
_check_for_malformed_line(line, 3)
return line[1:]
def _parse_gs_line(line):
line = line.split(None, 3)
_check_for_malformed_line(line, 4)
return line[1:]
def _parse_gr_line(line):
line = line.split(None, 3)
_check_for_malformed_line(line, 4)
seq_name = line[1]
feature_name = line[2]
feature_data = list(line[3])
return seq_name, feature_name, feature_data
def _parse_gc_line(line):
line = line.split(None, 2)
_check_for_malformed_line(line, 3)
feature_name = line[1]
feature_data = list(line[2])
return feature_name, feature_data
def _parse_sequence_line(line):
line = line.split(None, 1)
_check_for_malformed_line(line, 2)
return line
def _is_header(line):
return line == '# STOCKHOLM 1.0\n'
def _is_footer(line):
return line.rstrip() == '//'
def _is_sequence_line(line):
return not (line.startswith("#") or _is_footer(line))
def _raise_duplicate_error(message):
raise StockholmFormatError(message+' Note: If the file being used is in '
'Stockholm interleaved format, this '
'is not supported by the reader.')
def _check_for_malformed_line(line, expected_len):
if len(line) != expected_len:
raise StockholmFormatError('Line contains %d item(s). It must '
'contain exactly %d item(s).'
% (len(line), expected_len))
@stockholm.writer(TabularMSA)
def _tabular_msa_to_stockholm(obj, fh):
if not obj.index.is_unique:
raise StockholmFormatError("The TabularMSA's index labels must be"
" unique.")
# Writes header
fh.write("# STOCKHOLM 1.0\n")
# Writes GF data to file
if obj.has_metadata():
for gf_feature, gf_feature_data in obj.metadata.items():
if gf_feature == 'NH' and isinstance(gf_feature_data, dict):
for tree_id, tree in gf_feature_data.items():
fh.write("#=GF TN %s\n" % tree_id)
fh.write("#=GF NH %s\n" % tree)
elif gf_feature == 'RN':
if not isinstance(gf_feature_data, list):
raise StockholmFormatError(
"Expected 'RN' to contain a list of reference "
"dictionaries, got %r." % gf_feature_data)
for ref_num, dictionary in enumerate(gf_feature_data, start=1):
if not isinstance(dictionary, dict):
raise StockholmFormatError(
"Expected reference information to be stored as a "
"dictionary, found reference %d stored as %r." %
(ref_num, type(dictionary).__name__))
fh.write("#=GF RN [%d]\n" % ref_num)
for feature in dictionary:
if feature not in _REFERENCE_TAGS:
formatted_reference_tags = ', '.join(
[tag for tag in _REFERENCE_TAGS])
raise StockholmFormatError(
"Invalid reference tag %r found in reference "
"dictionary %d. Valid reference tags are: %s."
% (feature, ref_num, formatted_reference_tags))
fh.write("#=GF %s %s\n" % (feature,
dictionary[feature]))
else:
fh.write("#=GF %s %s\n" % (gf_feature, gf_feature_data))
unpadded_data = []
# Writes GS data to file, retrieves GR data, and retrieves sequence data
for seq, seq_name in zip(obj, obj.index):
seq_name = str(seq_name)
if seq.has_metadata():
for gs_feature, gs_feature_data in seq.metadata.items():
fh.write("#=GS %s %s %s\n" % (seq_name, gs_feature,
gs_feature_data))
unpadded_data.append((seq_name, str(seq)))
if seq.has_positional_metadata():
df = _format_positional_metadata(seq.positional_metadata,
'Sequence-specific positional '
'metadata (GR)')
for gr_feature in df.columns:
gr_feature_data = ''.join(df[gr_feature])
gr_string = "#=GR %s %s" % (seq_name, gr_feature)
unpadded_data.append((gr_string, gr_feature_data))
# Retrieves GC data
if obj.has_positional_metadata():
df = _format_positional_metadata(obj.positional_metadata,
'Multiple sequence alignment '
'positional metadata (GC)')
for gc_feature in df.columns:
gc_feature_data = ''.join(df[gc_feature])
gc_string = "#=GC %s" % gc_feature
unpadded_data.append((gc_string, gc_feature_data))
# Writes GR, GC, and raw data to file with padding
_write_padded_data(unpadded_data, fh)
# Writes footer
fh.write("//\n")
def _write_padded_data(data, fh):
max_data_len = 0
for label, _ in data:
if len(label) > max_data_len:
max_data_len = len(label)
fmt = '{0:%d} {1}\n' % max_data_len
for label, value in data:
fh.write(fmt.format(label, value))
def _format_positional_metadata(df, data_type):
# Asserts positional metadata feature names are unique
if not df.columns.is_unique:
num_repeated_columns = len(df.columns) - len(set(df.columns))
raise StockholmFormatError('%s feature names must be unique. '
'Found %d duplicate names.'
% (data_type, num_repeated_columns))
str_df = df.astype(str)
# Asserts positional metadata dataframe items are one character long
for column in str_df.columns:
if (str_df[column].str.len() != 1).any():
raise StockholmFormatError("%s must contain a single character for"
" each position's value. Found value(s)"
" in column %s of incorrect length."
% (data_type, column))
return str_df
def _get_padding(item):
return '' if item[-1].isspace() else ' ' | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/io/format/stockholm.py | stockholm.py |
from collections import OrderedDict
from skbio.alignment import TabularMSA
from skbio.sequence._grammared_sequence import GrammaredSequence
from skbio.io import create_format, StockholmFormatError
stockholm = create_format('stockholm')
_REFERENCE_TAGS = frozenset({'RM', 'RT', 'RA', 'RL', 'RC'})
@stockholm.sniffer()
def _stockholm_sniffer(fh):
# Smells a Stockholm file if the following conditions are met:
# - File isn't empty
# - File contains correct header
try:
line = next(fh)
except StopIteration:
return False, {}
if _is_header(line):
return True, {}
return False, {}
@stockholm.reader(TabularMSA)
def _stockholm_to_tabular_msa(fh, constructor=None):
# Checks that user has passed required constructor parameter
if constructor is None:
raise ValueError("Must provide `constructor` parameter indicating the "
"type of sequences in the alignment. `constructor` "
"must be a subclass of `GrammaredSequence` "
"(e.g., `DNA`, `RNA`, `Protein`).")
# Checks that contructor parameter is supported
elif not issubclass(constructor, GrammaredSequence):
raise TypeError("`constructor` must be a subclass of "
"`GrammaredSequence`.")
# Checks that the file isn't empty
try:
line = next(fh)
except StopIteration:
raise StockholmFormatError("File is empty.")
# Checks that the file follows basic format (includes the required header)
if not _is_header(line):
raise StockholmFormatError("File missing required Stockholm header "
"line.")
msa_data = _MSAData()
for line in fh:
if line.isspace():
continue
line = line.rstrip('\n')
if _is_sequence_line(line):
seq_name, seq_data = _parse_sequence_line(line)
msa_data.add_sequence(seq_name, seq_data)
elif line.startswith("#=GF"):
feature_name, feature_data = _parse_gf_line(line)
msa_data.add_gf_metadata(feature_name, feature_data)
elif line.startswith("#=GS"):
seq_name, feature_name, feature_data = _parse_gs_line(line)
msa_data.add_gs_metadata(seq_name, feature_name, feature_data)
elif line.startswith("#=GR"):
seq_name, feature_name, feature_data = _parse_gr_line(line)
msa_data.add_gr_metadata(seq_name, feature_name, feature_data)
elif line.startswith('#=GC'):
feature_name, feature_data = _parse_gc_line(line)
msa_data.add_gc_metadata(feature_name, feature_data)
elif _is_footer(line):
break
else:
raise StockholmFormatError("Unrecognized line: %r" % line)
if not _is_footer(line):
raise StockholmFormatError('Final line does not conform to Stockholm '
'format. Must contain only "//".')
return msa_data.build_tabular_msa(constructor)
# For storing intermediate data used to construct a Sequence object.
class _MSAData:
def __init__(self):
self._seqs = {}
self._seq_order = []
self._metadata = OrderedDict()
self._positional_metadata = OrderedDict()
def add_sequence(self, seq_name, seq_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].seq = seq_data
self._seq_order.append(seq_name)
def add_gf_metadata(self, feature_name, feature_data):
# Handles first instance of labelled tree
if feature_name == 'TN' and 'NH' not in self._metadata:
self._metadata['NH'] = OrderedDict()
self._metadata['NH'][feature_data] = ''
# Handles second instance of labelled tree
elif feature_name == 'TN' and 'NH' in self._metadata:
if feature_data in self._metadata['NH']:
raise StockholmFormatError("Tree name %r used multiple times "
"in file." % feature_data)
self._metadata['NH'][feature_data] = ''
# Handles extra line(s) of an already created tree
elif feature_name == 'NH' and feature_name in self._metadata:
trees = self._metadata[feature_name]
if isinstance(trees, OrderedDict):
tree_id = next(reversed(trees))
self._metadata[feature_name][tree_id] = (trees[tree_id] +
feature_data)
else:
self._metadata[feature_name] = (self._metadata[feature_name] +
feature_data)
elif feature_name == 'RN':
if feature_name not in self._metadata:
self._metadata[feature_name] = [OrderedDict()]
else:
self._metadata[feature_name].append(OrderedDict())
elif feature_name in _REFERENCE_TAGS:
if 'RN' not in self._metadata:
raise StockholmFormatError("Expected 'RN' tag to precede "
"'%s' tag." % feature_name)
reference_dict = self._metadata['RN'][-1]
if feature_name not in reference_dict:
reference_dict[feature_name] = feature_data
else:
padding = _get_padding(reference_dict[feature_name])
reference_dict[feature_name] += padding + feature_data
elif feature_name in self._metadata:
padding = _get_padding(self._metadata[feature_name][-1])
self._metadata[feature_name] = (self._metadata[feature_name] +
padding + feature_data)
else:
self._metadata[feature_name] = feature_data
def add_gc_metadata(self, feature_name, feature_data):
if feature_name in self._positional_metadata:
_raise_duplicate_error("Found duplicate GC label %r."
% feature_name)
self._positional_metadata[feature_name] = feature_data
def add_gs_metadata(self, seq_name, feature_name, feature_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].add_metadata_feature(feature_name, feature_data)
def add_gr_metadata(self, seq_name, feature_name, feature_data):
if seq_name not in self._seqs:
self._seqs[seq_name] = _SeqData(seq_name)
self._seqs[seq_name].add_positional_metadata_feature(feature_name,
feature_data)
def build_tabular_msa(self, constructor):
if len(self._seqs) != len(self._seq_order):
invalid_seq_names = set(self._seqs) - set(self._seq_order)
raise StockholmFormatError('Found GS or GR metadata for '
'nonexistent sequence(s): %r'
% invalid_seq_names)
seqs = []
for seq_name in self._seq_order:
seqs.append(self._seqs[seq_name].build_sequence(constructor))
positional_metadata = self._positional_metadata
if not positional_metadata:
positional_metadata = None
metadata = self._metadata
if not metadata:
metadata = None
# Constructs TabularMSA
return TabularMSA(seqs, metadata=metadata,
positional_metadata=positional_metadata,
index=self._seq_order)
class _SeqData:
def __init__(self, name):
self.name = name
self._seq = None
self.metadata = None
self.positional_metadata = None
@property
def seq(self):
return self._seq
@seq.setter
def seq(self, seq):
if self._seq is None:
self._seq = seq
else:
_raise_duplicate_error("Found duplicate sequence name: %r"
% self.name)
def add_metadata_feature(self, feature_name, feature_data):
if self.metadata is None:
self.metadata = OrderedDict()
if feature_name in self.metadata:
padding = _get_padding(self.metadata[feature_name][-1])
self.metadata[feature_name] += padding + feature_data
else:
self.metadata[feature_name] = feature_data
def add_positional_metadata_feature(self, feature_name, feature_data):
if self.positional_metadata is None:
self.positional_metadata = OrderedDict()
if feature_name in self.positional_metadata:
_raise_duplicate_error("Found duplicate GR label %r associated "
"with sequence name %r"
% (feature_name, self.name))
else:
self.positional_metadata[feature_name] = feature_data
def build_sequence(self, constructor):
return constructor(self.seq, metadata=self.metadata,
positional_metadata=(self.positional_metadata))
def _parse_gf_line(line):
line = line.split(None, 2)
_check_for_malformed_line(line, 3)
return line[1:]
def _parse_gs_line(line):
line = line.split(None, 3)
_check_for_malformed_line(line, 4)
return line[1:]
def _parse_gr_line(line):
line = line.split(None, 3)
_check_for_malformed_line(line, 4)
seq_name = line[1]
feature_name = line[2]
feature_data = list(line[3])
return seq_name, feature_name, feature_data
def _parse_gc_line(line):
line = line.split(None, 2)
_check_for_malformed_line(line, 3)
feature_name = line[1]
feature_data = list(line[2])
return feature_name, feature_data
def _parse_sequence_line(line):
line = line.split(None, 1)
_check_for_malformed_line(line, 2)
return line
def _is_header(line):
return line == '# STOCKHOLM 1.0\n'
def _is_footer(line):
return line.rstrip() == '//'
def _is_sequence_line(line):
return not (line.startswith("#") or _is_footer(line))
def _raise_duplicate_error(message):
raise StockholmFormatError(message+' Note: If the file being used is in '
'Stockholm interleaved format, this '
'is not supported by the reader.')
def _check_for_malformed_line(line, expected_len):
if len(line) != expected_len:
raise StockholmFormatError('Line contains %d item(s). It must '
'contain exactly %d item(s).'
% (len(line), expected_len))
@stockholm.writer(TabularMSA)
def _tabular_msa_to_stockholm(obj, fh):
if not obj.index.is_unique:
raise StockholmFormatError("The TabularMSA's index labels must be"
" unique.")
# Writes header
fh.write("# STOCKHOLM 1.0\n")
# Writes GF data to file
if obj.has_metadata():
for gf_feature, gf_feature_data in obj.metadata.items():
if gf_feature == 'NH' and isinstance(gf_feature_data, dict):
for tree_id, tree in gf_feature_data.items():
fh.write("#=GF TN %s\n" % tree_id)
fh.write("#=GF NH %s\n" % tree)
elif gf_feature == 'RN':
if not isinstance(gf_feature_data, list):
raise StockholmFormatError(
"Expected 'RN' to contain a list of reference "
"dictionaries, got %r." % gf_feature_data)
for ref_num, dictionary in enumerate(gf_feature_data, start=1):
if not isinstance(dictionary, dict):
raise StockholmFormatError(
"Expected reference information to be stored as a "
"dictionary, found reference %d stored as %r." %
(ref_num, type(dictionary).__name__))
fh.write("#=GF RN [%d]\n" % ref_num)
for feature in dictionary:
if feature not in _REFERENCE_TAGS:
formatted_reference_tags = ', '.join(
[tag for tag in _REFERENCE_TAGS])
raise StockholmFormatError(
"Invalid reference tag %r found in reference "
"dictionary %d. Valid reference tags are: %s."
% (feature, ref_num, formatted_reference_tags))
fh.write("#=GF %s %s\n" % (feature,
dictionary[feature]))
else:
fh.write("#=GF %s %s\n" % (gf_feature, gf_feature_data))
unpadded_data = []
# Writes GS data to file, retrieves GR data, and retrieves sequence data
for seq, seq_name in zip(obj, obj.index):
seq_name = str(seq_name)
if seq.has_metadata():
for gs_feature, gs_feature_data in seq.metadata.items():
fh.write("#=GS %s %s %s\n" % (seq_name, gs_feature,
gs_feature_data))
unpadded_data.append((seq_name, str(seq)))
if seq.has_positional_metadata():
df = _format_positional_metadata(seq.positional_metadata,
'Sequence-specific positional '
'metadata (GR)')
for gr_feature in df.columns:
gr_feature_data = ''.join(df[gr_feature])
gr_string = "#=GR %s %s" % (seq_name, gr_feature)
unpadded_data.append((gr_string, gr_feature_data))
# Retrieves GC data
if obj.has_positional_metadata():
df = _format_positional_metadata(obj.positional_metadata,
'Multiple sequence alignment '
'positional metadata (GC)')
for gc_feature in df.columns:
gc_feature_data = ''.join(df[gc_feature])
gc_string = "#=GC %s" % gc_feature
unpadded_data.append((gc_string, gc_feature_data))
# Writes GR, GC, and raw data to file with padding
_write_padded_data(unpadded_data, fh)
# Writes footer
fh.write("//\n")
def _write_padded_data(data, fh):
max_data_len = 0
for label, _ in data:
if len(label) > max_data_len:
max_data_len = len(label)
fmt = '{0:%d} {1}\n' % max_data_len
for label, value in data:
fh.write(fmt.format(label, value))
def _format_positional_metadata(df, data_type):
# Asserts positional metadata feature names are unique
if not df.columns.is_unique:
num_repeated_columns = len(df.columns) - len(set(df.columns))
raise StockholmFormatError('%s feature names must be unique. '
'Found %d duplicate names.'
% (data_type, num_repeated_columns))
str_df = df.astype(str)
# Asserts positional metadata dataframe items are one character long
for column in str_df.columns:
if (str_df[column].str.len() != 1).any():
raise StockholmFormatError("%s must contain a single character for"
" each position's value. Found value(s)"
" in column %s of incorrect length."
% (data_type, column))
return str_df
def _get_padding(item):
return '' if item[-1].isspace() else ' ' | 0.817756 | 0.299995 |
import hashlib
import inspect
from types import FunctionType
from ._decorator import experimental
def resolve_key(obj, key):
"""Resolve key given an object and key."""
if callable(key):
return key(obj)
elif hasattr(obj, 'metadata'):
return obj.metadata[key]
raise TypeError("Could not resolve key %r. Key must be callable or %s must"
" have `metadata` attribute." % (key,
obj.__class__.__name__))
def make_sentinel(name):
return type(name, (), {
'__repr__': lambda s: name,
'__str__': lambda s: name,
'__class__': None
})()
def find_sentinels(function, sentinel):
params = inspect.signature(function).parameters
return [name for name, param in params.items()
if param.default is sentinel]
class MiniRegistry(dict):
def __call__(self, name):
"""Act as a decorator to register functions with self"""
def decorator(func):
self[name] = func
return func
return decorator
def copy(self):
"""Useful for inheritance"""
return self.__class__(super(MiniRegistry, self).copy())
def formatted_listing(self):
"""Produce an RST list with descriptions."""
if len(self) == 0:
return "\tNone"
else:
return "\n".join(["\t%r\n\t %s" %
(name, self[name].__doc__.split("\n")[0])
for name in sorted(self)])
def interpolate(self, obj, name):
"""Inject the formatted listing in the second blank line of `name`."""
f = getattr(obj, name)
f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
# Conveniently the original docstring is on f2, not the new ones if
# inheritance is happening. I have no idea why.
t = f2.__doc__.split("\n\n")
t.insert(2, self.formatted_listing())
f2.__doc__ = "\n\n".join(t)
setattr(obj, name, f2)
def chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
@experimental(as_of="0.4.0")
def cardinal_to_ordinal(n):
"""Return ordinal string version of cardinal int `n`.
Parameters
----------
n : int
Cardinal to convert to ordinal. Must be >= 0.
Returns
-------
str
Ordinal version of cardinal `n`.
Raises
------
ValueError
If `n` is less than 0.
Notes
-----
This function can be useful when writing human-readable error messages.
Examples
--------
>>> from skbio.util import cardinal_to_ordinal
>>> cardinal_to_ordinal(0)
'0th'
>>> cardinal_to_ordinal(1)
'1st'
>>> cardinal_to_ordinal(2)
'2nd'
>>> cardinal_to_ordinal(3)
'3rd'
"""
# Taken and modified from http://stackoverflow.com/a/20007730/3776794
# Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
if n < 0:
raise ValueError("Cannot convert negative integer %d to ordinal "
"string." % n)
return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
@experimental(as_of="0.4.0")
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from io import BytesIO
>>> from skbio.util import safe_md5
>>> fd = BytesIO(b"foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
@experimental(as_of="0.4.0")
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/util/_misc.py | _misc.py |
import hashlib
import inspect
from types import FunctionType
from ._decorator import experimental
def resolve_key(obj, key):
"""Resolve key given an object and key."""
if callable(key):
return key(obj)
elif hasattr(obj, 'metadata'):
return obj.metadata[key]
raise TypeError("Could not resolve key %r. Key must be callable or %s must"
" have `metadata` attribute." % (key,
obj.__class__.__name__))
def make_sentinel(name):
return type(name, (), {
'__repr__': lambda s: name,
'__str__': lambda s: name,
'__class__': None
})()
def find_sentinels(function, sentinel):
params = inspect.signature(function).parameters
return [name for name, param in params.items()
if param.default is sentinel]
class MiniRegistry(dict):
def __call__(self, name):
"""Act as a decorator to register functions with self"""
def decorator(func):
self[name] = func
return func
return decorator
def copy(self):
"""Useful for inheritance"""
return self.__class__(super(MiniRegistry, self).copy())
def formatted_listing(self):
"""Produce an RST list with descriptions."""
if len(self) == 0:
return "\tNone"
else:
return "\n".join(["\t%r\n\t %s" %
(name, self[name].__doc__.split("\n")[0])
for name in sorted(self)])
def interpolate(self, obj, name):
"""Inject the formatted listing in the second blank line of `name`."""
f = getattr(obj, name)
f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
# Conveniently the original docstring is on f2, not the new ones if
# inheritance is happening. I have no idea why.
t = f2.__doc__.split("\n\n")
t.insert(2, self.formatted_listing())
f2.__doc__ = "\n\n".join(t)
setattr(obj, name, f2)
def chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
@experimental(as_of="0.4.0")
def cardinal_to_ordinal(n):
"""Return ordinal string version of cardinal int `n`.
Parameters
----------
n : int
Cardinal to convert to ordinal. Must be >= 0.
Returns
-------
str
Ordinal version of cardinal `n`.
Raises
------
ValueError
If `n` is less than 0.
Notes
-----
This function can be useful when writing human-readable error messages.
Examples
--------
>>> from skbio.util import cardinal_to_ordinal
>>> cardinal_to_ordinal(0)
'0th'
>>> cardinal_to_ordinal(1)
'1st'
>>> cardinal_to_ordinal(2)
'2nd'
>>> cardinal_to_ordinal(3)
'3rd'
"""
# Taken and modified from http://stackoverflow.com/a/20007730/3776794
# Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
if n < 0:
raise ValueError("Cannot convert negative integer %d to ordinal "
"string." % n)
return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
@experimental(as_of="0.4.0")
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from io import BytesIO
>>> from skbio.util import safe_md5
>>> fd = BytesIO(b"foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
@experimental(as_of="0.4.0")
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated | 0.881373 | 0.23709 |
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def cca(y, x, scaling=1):
r"""Compute canonical (also known as constrained) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of samples and constraints variables,
and outputs ordination axes that maximize sample separation among species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
y : DataFrame
Samples by features table (n, m)
x : DataFrame
Samples by constraints table (n, q)
scaling : int, {1, 2}, optional
Scaling type 1 maintains :math:`\chi^2` distances between rows.
Scaling type 2 preserves :math:`\chi^2` distances between columns.
For a more detailed explanation of the interpretation, check Legendre &
Legendre 1998, section 9.4.3.
Returns
-------
OrdinationResults
Object that stores the cca results.
Raises
------
ValueError
If `x` and `y` have different number of rows
If `y` contains negative values
If `y` contains a row of only 0's.
NotImplementedError
If scaling is not 1 or 2.
See Also
--------
ca
rda
OrdinationResults
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(y, x)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
References
----------
.. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.values
X = x.values
# Perform parameter sanity checks
if X.shape[0] != Y.shape[0]:
raise ValueError("The samples by features table 'y' and the samples by"
" constraints table 'x' must have the same number of "
" rows. 'y': {0} 'x': {1}".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError(
"The samples by features table 'y' must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("The samples by features table 'y' cannot contain a "
"row with only 0's")
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of Y (contingency table)
# Features and sample weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of X with respect to sample weights,
# using the maximum likelihood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
Y_hat = X_weighted.dot(B)
Y_res = Q_bar - Y_hat
# Step 4. Eigenvalue decomposition
u, s, vt = svd(Y_hat, full_matrices=False)
rank = svd_rank(Y_hat.shape, s)
s = s[:rank]
u = u[:, :rank]
vt = vt[:rank]
U = vt.T
# Step 5. Eq. 9.38
U_hat = Q_bar.dot(U) * s**-1
# Residuals analysis
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
rank = svd_rank(Y_res.shape, s_res)
s_res = s_res[:rank]
u_res = u_res[:, :rank]
vt_res = vt_res[:rank]
U_res = vt_res.T
U_hat_res = Y_res.dot(U_res) * s_res**-1
eigenvalues = np.r_[s, s_res]**2
# Scalings (p. 596 L&L 1998):
# feature scores, scaling 1
V = (column_marginals**-0.5)[:, None] * U
# sample scores, scaling 2
V_hat = (row_marginals**-0.5)[:, None] * U_hat
# sample scores, scaling 1
F = V_hat * s
# feature scores, scaling 2
F_hat = V * s
# Sample scores which are linear combinations of constraint
# variables
Z_scaling1 = ((row_marginals**-0.5)[:, None] *
Y_hat.dot(U))
Z_scaling2 = Z_scaling1 * s**-1
# Feature residual scores, scaling 1
V_res = (column_marginals**-0.5)[:, None] * U_res
# Sample residual scores, scaling 2
V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
# Sample residual scores, scaling 1
F_res = V_hat_res * s_res
# Feature residual scores, scaling 2
F_hat_res = V_res * s_res
eigvals = eigenvalues
if scaling == 1:
features_scores = np.hstack((V, V_res))
sample_scores = np.hstack((F, F_res))
sample_constraints = np.hstack((Z_scaling1, F_res))
elif scaling == 2:
features_scores = np.hstack((F_hat, F_hat_res))
sample_scores = np.hstack((V_hat, V_hat_res))
sample_constraints = np.hstack((Z_scaling2, V_hat_res))
biplot_scores = corr(X_weighted, u)
pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
sample_ids = y.index
feature_ids = y.columns
eigvals = pd.Series(eigenvalues, index=pc_ids)
samples = pd.DataFrame(sample_scores,
columns=pc_ids, index=sample_ids)
features = pd.DataFrame(features_scores,
columns=pc_ids, index=feature_ids)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
sample_constraints = pd.DataFrame(sample_constraints,
index=sample_ids, columns=pc_ids)
return OrdinationResults(
"CCA", "Canonical Correspondence Analysis", eigvals, samples,
features=features, biplot_scores=biplot_scores,
sample_constraints=sample_constraints,
proportion_explained=eigvals / eigvals.sum()) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/ordination/_canonical_correspondence_analysis.py | _canonical_correspondence_analysis.py |
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def cca(y, x, scaling=1):
r"""Compute canonical (also known as constrained) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of samples and constraints variables,
and outputs ordination axes that maximize sample separation among species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
y : DataFrame
Samples by features table (n, m)
x : DataFrame
Samples by constraints table (n, q)
scaling : int, {1, 2}, optional
Scaling type 1 maintains :math:`\chi^2` distances between rows.
Scaling type 2 preserves :math:`\chi^2` distances between columns.
For a more detailed explanation of the interpretation, check Legendre &
Legendre 1998, section 9.4.3.
Returns
-------
OrdinationResults
Object that stores the cca results.
Raises
------
ValueError
If `x` and `y` have different number of rows
If `y` contains negative values
If `y` contains a row of only 0's.
NotImplementedError
If scaling is not 1 or 2.
See Also
--------
ca
rda
OrdinationResults
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(y, x)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
References
----------
.. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.values
X = x.values
# Perform parameter sanity checks
if X.shape[0] != Y.shape[0]:
raise ValueError("The samples by features table 'y' and the samples by"
" constraints table 'x' must have the same number of "
" rows. 'y': {0} 'x': {1}".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError(
"The samples by features table 'y' must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("The samples by features table 'y' cannot contain a "
"row with only 0's")
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of Y (contingency table)
# Features and sample weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of X with respect to sample weights,
# using the maximum likelihood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
Y_hat = X_weighted.dot(B)
Y_res = Q_bar - Y_hat
# Step 4. Eigenvalue decomposition
u, s, vt = svd(Y_hat, full_matrices=False)
rank = svd_rank(Y_hat.shape, s)
s = s[:rank]
u = u[:, :rank]
vt = vt[:rank]
U = vt.T
# Step 5. Eq. 9.38
U_hat = Q_bar.dot(U) * s**-1
# Residuals analysis
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
rank = svd_rank(Y_res.shape, s_res)
s_res = s_res[:rank]
u_res = u_res[:, :rank]
vt_res = vt_res[:rank]
U_res = vt_res.T
U_hat_res = Y_res.dot(U_res) * s_res**-1
eigenvalues = np.r_[s, s_res]**2
# Scalings (p. 596 L&L 1998):
# feature scores, scaling 1
V = (column_marginals**-0.5)[:, None] * U
# sample scores, scaling 2
V_hat = (row_marginals**-0.5)[:, None] * U_hat
# sample scores, scaling 1
F = V_hat * s
# feature scores, scaling 2
F_hat = V * s
# Sample scores which are linear combinations of constraint
# variables
Z_scaling1 = ((row_marginals**-0.5)[:, None] *
Y_hat.dot(U))
Z_scaling2 = Z_scaling1 * s**-1
# Feature residual scores, scaling 1
V_res = (column_marginals**-0.5)[:, None] * U_res
# Sample residual scores, scaling 2
V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
# Sample residual scores, scaling 1
F_res = V_hat_res * s_res
# Feature residual scores, scaling 2
F_hat_res = V_res * s_res
eigvals = eigenvalues
if scaling == 1:
features_scores = np.hstack((V, V_res))
sample_scores = np.hstack((F, F_res))
sample_constraints = np.hstack((Z_scaling1, F_res))
elif scaling == 2:
features_scores = np.hstack((F_hat, F_hat_res))
sample_scores = np.hstack((V_hat, V_hat_res))
sample_constraints = np.hstack((Z_scaling2, V_hat_res))
biplot_scores = corr(X_weighted, u)
pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
sample_ids = y.index
feature_ids = y.columns
eigvals = pd.Series(eigenvalues, index=pc_ids)
samples = pd.DataFrame(sample_scores,
columns=pc_ids, index=sample_ids)
features = pd.DataFrame(features_scores,
columns=pc_ids, index=feature_ids)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
sample_constraints = pd.DataFrame(sample_constraints,
index=sample_ids, columns=pc_ids)
return OrdinationResults(
"CCA", "Canonical Correspondence Analysis", eigvals, samples,
features=features, biplot_scores=biplot_scores,
sample_constraints=sample_constraints,
proportion_explained=eigvals / eigvals.sum()) | 0.917799 | 0.715772 |
import functools
import numpy as np
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
from skbio._base import SkbioObject
from skbio.stats._misc import _pprint_strs
from skbio.util._decorator import experimental
class OrdinationResults(SkbioObject):
"""Store ordination results, providing serialization and plotting support.
Stores various components of ordination results. Provides methods for
serializing/deserializing results, as well as generation of basic
matplotlib 3-D scatterplots. Will automatically display PNG/SVG
representations of itself within the IPython Notebook.
Attributes
----------
short_method_name : str
Abbreviated ordination method name.
long_method_name : str
Ordination method name.
eigvals : pd.Series
The resulting eigenvalues. The index corresponds to the ordination
axis labels
samples : pd.DataFrame
The position of the samples in the ordination space, row-indexed by the
sample id.
features : pd.DataFrame
The position of the features in the ordination space, row-indexed by
the feature id.
biplot_scores : pd.DataFrame
Correlation coefficients of the samples with respect to the features.
sample_constraints : pd.DataFrame
Site constraints (linear combinations of constraining variables):
coordinates of the sites in the space of the explanatory variables X.
These are the fitted site scores
proportion_explained : pd.Series
Proportion explained by each of the dimensions in the ordination space.
The index corresponds to the ordination axis labels
See Also
--------
ca
cca
pcoa
rda
"""
default_write_format = 'ordination'
@experimental(as_of="0.4.0")
def __init__(self, short_method_name, long_method_name, eigvals,
samples, features=None, biplot_scores=None,
sample_constraints=None, proportion_explained=None):
self.short_method_name = short_method_name
self.long_method_name = long_method_name
self.eigvals = eigvals
self.samples = samples
self.features = features
self.biplot_scores = biplot_scores
self.sample_constraints = sample_constraints
self.proportion_explained = proportion_explained
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the ordination results.
String representation lists ordination results attributes and indicates
whether or not they are present. If an attribute is present, its
dimensions are listed. A truncated list of features and sample IDs are
included (if they are present).
Returns
-------
str
String representation of the ordination results.
"""
lines = ['Ordination results:']
method = '%s (%s)' % (self.long_method_name, self.short_method_name)
lines.append(self._format_attribute(method, 'Method', str))
attrs = [(self.eigvals, 'Eigvals'),
(self.proportion_explained, 'Proportion explained'),
(self.features, 'Features'),
(self.samples, 'Samples'),
(self.biplot_scores, 'Biplot Scores'),
(self.sample_constraints, 'Sample constraints')]
for attr, attr_label in attrs:
def formatter(e):
return 'x'.join(['%d' % s for s in e.shape])
lines.append(self._format_attribute(attr, attr_label, formatter))
lines.append(self._format_attribute(
self.features, 'Feature IDs',
lambda e: _pprint_strs(e.index.tolist())))
lines.append(self._format_attribute(
self.samples, 'Sample IDs',
lambda e: _pprint_strs(e.index.tolist())))
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
title='', cmap=None, s=20):
"""Create a 3-D scatterplot of ordination results colored by metadata.
Creates a 3-D scatterplot of the ordination results, where each point
represents a sample. Optionally, these points can be colored by
metadata (see `df` and `column` below).
Parameters
----------
df : pd.DataFrame, optional
``DataFrame`` containing sample metadata. Must be indexed by sample
ID, and all sample IDs in the ordination results must exist in the
``DataFrame``. If ``None``, samples (i.e., points) will not be
colored by metadata.
column : str, optional
Column name in `df` to color samples (i.e., points in the plot) by.
Cannot have missing data (i.e., ``np.nan``). `column` can be
numeric or categorical. If numeric, all values in the column will
be cast to ``float`` and mapped to colors using `cmap`. A colorbar
will be included to serve as a legend. If categorical (i.e., not
all values in `column` could be cast to ``float``), colors will be
chosen for each category using evenly-spaced points along `cmap`. A
legend will be included. If ``None``, samples (i.e., points) will
not be colored by metadata.
axes : iterable of int, optional
Indices of sample coordinates to plot on the x-, y-, and z-axes.
For example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
Must contain exactly three elements.
axis_labels : iterable of str, optional
Labels for the x-, y-, and z-axes. If ``None``, labels will be the
values of `axes` cast as strings.
title : str, optional
Plot title.
cmap : str or matplotlib.colors.Colormap, optional
Name or instance of matplotlib colormap to use for mapping `column`
values to colors. If ``None``, defaults to the colormap specified
in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
are recommended for categorical data, while sequential colormaps
(e.g., ``Greys``) are recommended for numeric data. See [1]_ for
these colormap classifications.
s : scalar or iterable of scalars, optional
Size of points. See matplotlib's ``Axes3D.scatter`` documentation
for more details.
Returns
-------
matplotlib.figure.Figure
Figure containing the scatterplot and legend/colorbar if metadata
were provided.
Raises
------
ValueError
Raised on invalid input, including the following situations:
- there are not at least three dimensions to plot
- there are not exactly three values in `axes`, they are not
unique, or are out of range
- there are not exactly three values in `axis_labels`
- either `df` or `column` is provided without the other
- `column` is not in the ``DataFrame``
- sample IDs in the ordination results are not in `df` or have
missing data in `column`
See Also
--------
mpl_toolkits.mplot3d.Axes3D.scatter
Notes
-----
This method creates basic plots of ordination results, and is intended
to provide a quick look at the results in the context of metadata
(e.g., from within the IPython Notebook). For more customization and to
generate publication-quality figures, we recommend EMPeror [2]_.
References
----------
.. [1] http://matplotlib.org/examples/color/colormaps_reference.html
.. [2] EMPeror: a tool for visualizing high-throughput microbial
community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
Examples
--------
.. plot::
Define a distance matrix with four samples labelled A-D:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
... [0.21712454, 0., 0.45995501, 0.80332382],
... [0.5007512, 0.45995501, 0., 0.65463348],
... [0.91769271, 0.80332382, 0.65463348, 0.]],
... ['A', 'B', 'C', 'D'])
Define metadata for each sample in a ``pandas.DataFrame``:
>>> import pandas as pd
>>> metadata = {
... 'A': {'body_site': 'skin'},
... 'B': {'body_site': 'gut'},
... 'C': {'body_site': 'gut'},
... 'D': {'body_site': 'skin'}}
>>> df = pd.DataFrame.from_dict(metadata, orient='index')
Run principal coordinate analysis (PCoA) on the distance matrix:
>>> from skbio.stats.ordination import pcoa
>>> pcoa_results = pcoa(dm)
Plot the ordination results, where each sample is colored by body
site (a categorical variable):
>>> fig = pcoa_results.plot(df=df, column='body_site',
... title='Samples colored by body site',
... cmap='Set1', s=50)
"""
# Note: New features should not be added to this method and should
# instead be added to EMPeror (http://biocore.github.io/emperor/).
# Only bug fixes and minor updates should be made to this method.
coord_matrix = self.samples.values.T
self._validate_plot_axes(coord_matrix, axes)
# derived from
# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = coord_matrix[axes[0]]
ys = coord_matrix[axes[1]]
zs = coord_matrix[axes[2]]
point_colors, category_to_color = self._get_plot_point_colors(
df, column, self.samples.index, cmap)
scatter_fn = functools.partial(ax.scatter, xs, ys, zs, s=s)
if point_colors is None:
plot = scatter_fn()
else:
plot = scatter_fn(c=point_colors, cmap=cmap)
if axis_labels is None:
axis_labels = ['%d' % axis for axis in axes]
elif len(axis_labels) != 3:
raise ValueError("axis_labels must contain exactly three elements "
"(found %d elements)." % len(axis_labels))
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
ax.set_zlabel(axis_labels[2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_title(title)
# create legend/colorbar
if point_colors is not None:
if category_to_color is None:
fig.colorbar(plot)
else:
self._plot_categorical_legend(ax, category_to_color)
fig.tight_layout()
return fig
def _validate_plot_axes(self, coord_matrix, axes):
"""Validate `axes` against coordinates matrix."""
num_dims = coord_matrix.shape[0]
if num_dims < 3:
raise ValueError("At least three dimensions are required to plot "
"ordination results. There are only %d "
"dimension(s)." % num_dims)
if len(axes) != 3:
raise ValueError("`axes` must contain exactly three elements "
"(found %d elements)." % len(axes))
if len(set(axes)) != 3:
raise ValueError("The values provided for `axes` must be unique.")
for idx, axis in enumerate(axes):
if axis < 0 or axis >= num_dims:
raise ValueError("`axes[%d]` must be >= 0 and < %d." %
(idx, num_dims))
def _get_plot_point_colors(self, df, column, ids, cmap):
"""Return a list of colors for each plot point given a metadata column.
If `column` is categorical, additionally returns a dictionary mapping
each category (str) to color (used for legend creation).
"""
import matplotlib.pyplot as plt
if ((df is None and column is not None) or (df is not None and
column is None)):
raise ValueError("Both df and column must be provided, or both "
"must be None.")
elif df is None and column is None:
point_colors, category_to_color = None, None
else:
if column not in df:
raise ValueError("Column '%s' not in data frame." % column)
col_vals = df.reindex(ids, axis=0).loc[:, column]
if col_vals.isnull().any():
raise ValueError("One or more IDs in the ordination results "
"are not in the data frame, or there is "
"missing data in the data frame's '%s' "
"column." % column)
category_to_color = None
try:
point_colors = col_vals.astype(float)
except ValueError:
# we have categorical data, so choose a color for each
# category, where colors are evenly spaced across the
# colormap.
# derived from http://stackoverflow.com/a/14887119
categories = col_vals.unique()
cmap = plt.get_cmap(cmap)
category_colors = cmap(np.linspace(0, 1, len(categories)))
category_to_color = dict(zip(categories, category_colors))
point_colors = col_vals.apply(lambda x: category_to_color[x])
point_colors = point_colors.tolist()
return point_colors, category_to_color
def _plot_categorical_legend(self, ax, color_dict):
"""Add legend to plot using specified mapping of category to color."""
# derived from http://stackoverflow.com/a/20505720
import matplotlib as mpl
proxies = []
labels = []
for category in color_dict:
proxy = mpl.lines.Line2D([0], [0], linestyle='none',
c=color_dict[category], marker='o')
proxies.append(proxy)
labels.append(category)
# place legend outside of the axes (centered)
# derived from http://matplotlib.org/users/legend_guide.html
ax.legend(proxies, labels, numpoints=1, loc=6,
bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
# Here we define the special repr methods that provide the IPython display
# protocol. Code derived from:
# https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
# Custom%20Display%20Logic.ipynb
# See licenses/ipython.txt for more details.
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
# We expose the above reprs as properties, so that the user can see them
# directly (since otherwise the client dictates which one it shows by
# default)
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display basic 3-D scatterplot in IPython Notebook as PNG."""
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display basic 3-D scatterplot in IPython Notebook as SVG."""
return SVG(self._repr_svg_())
def _figure_data(self, format):
import matplotlib.pyplot as plt
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def _format_attribute(self, attr, attr_label, formatter):
if attr is None:
formatted_attr = 'N/A'
else:
formatted_attr = formatter(attr)
return '\t%s: %s' % (attr_label, formatted_attr) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/ordination/_ordination_results.py | _ordination_results.py |
import functools
import numpy as np
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
from skbio._base import SkbioObject
from skbio.stats._misc import _pprint_strs
from skbio.util._decorator import experimental
class OrdinationResults(SkbioObject):
"""Store ordination results, providing serialization and plotting support.
Stores various components of ordination results. Provides methods for
serializing/deserializing results, as well as generation of basic
matplotlib 3-D scatterplots. Will automatically display PNG/SVG
representations of itself within the IPython Notebook.
Attributes
----------
short_method_name : str
Abbreviated ordination method name.
long_method_name : str
Ordination method name.
eigvals : pd.Series
The resulting eigenvalues. The index corresponds to the ordination
axis labels
samples : pd.DataFrame
The position of the samples in the ordination space, row-indexed by the
sample id.
features : pd.DataFrame
The position of the features in the ordination space, row-indexed by
the feature id.
biplot_scores : pd.DataFrame
Correlation coefficients of the samples with respect to the features.
sample_constraints : pd.DataFrame
Site constraints (linear combinations of constraining variables):
coordinates of the sites in the space of the explanatory variables X.
These are the fitted site scores
proportion_explained : pd.Series
Proportion explained by each of the dimensions in the ordination space.
The index corresponds to the ordination axis labels
See Also
--------
ca
cca
pcoa
rda
"""
default_write_format = 'ordination'
@experimental(as_of="0.4.0")
def __init__(self, short_method_name, long_method_name, eigvals,
samples, features=None, biplot_scores=None,
sample_constraints=None, proportion_explained=None):
self.short_method_name = short_method_name
self.long_method_name = long_method_name
self.eigvals = eigvals
self.samples = samples
self.features = features
self.biplot_scores = biplot_scores
self.sample_constraints = sample_constraints
self.proportion_explained = proportion_explained
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the ordination results.
String representation lists ordination results attributes and indicates
whether or not they are present. If an attribute is present, its
dimensions are listed. A truncated list of features and sample IDs are
included (if they are present).
Returns
-------
str
String representation of the ordination results.
"""
lines = ['Ordination results:']
method = '%s (%s)' % (self.long_method_name, self.short_method_name)
lines.append(self._format_attribute(method, 'Method', str))
attrs = [(self.eigvals, 'Eigvals'),
(self.proportion_explained, 'Proportion explained'),
(self.features, 'Features'),
(self.samples, 'Samples'),
(self.biplot_scores, 'Biplot Scores'),
(self.sample_constraints, 'Sample constraints')]
for attr, attr_label in attrs:
def formatter(e):
return 'x'.join(['%d' % s for s in e.shape])
lines.append(self._format_attribute(attr, attr_label, formatter))
lines.append(self._format_attribute(
self.features, 'Feature IDs',
lambda e: _pprint_strs(e.index.tolist())))
lines.append(self._format_attribute(
self.samples, 'Sample IDs',
lambda e: _pprint_strs(e.index.tolist())))
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
title='', cmap=None, s=20):
"""Create a 3-D scatterplot of ordination results colored by metadata.
Creates a 3-D scatterplot of the ordination results, where each point
represents a sample. Optionally, these points can be colored by
metadata (see `df` and `column` below).
Parameters
----------
df : pd.DataFrame, optional
``DataFrame`` containing sample metadata. Must be indexed by sample
ID, and all sample IDs in the ordination results must exist in the
``DataFrame``. If ``None``, samples (i.e., points) will not be
colored by metadata.
column : str, optional
Column name in `df` to color samples (i.e., points in the plot) by.
Cannot have missing data (i.e., ``np.nan``). `column` can be
numeric or categorical. If numeric, all values in the column will
be cast to ``float`` and mapped to colors using `cmap`. A colorbar
will be included to serve as a legend. If categorical (i.e., not
all values in `column` could be cast to ``float``), colors will be
chosen for each category using evenly-spaced points along `cmap`. A
legend will be included. If ``None``, samples (i.e., points) will
not be colored by metadata.
axes : iterable of int, optional
Indices of sample coordinates to plot on the x-, y-, and z-axes.
For example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
Must contain exactly three elements.
axis_labels : iterable of str, optional
Labels for the x-, y-, and z-axes. If ``None``, labels will be the
values of `axes` cast as strings.
title : str, optional
Plot title.
cmap : str or matplotlib.colors.Colormap, optional
Name or instance of matplotlib colormap to use for mapping `column`
values to colors. If ``None``, defaults to the colormap specified
in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
are recommended for categorical data, while sequential colormaps
(e.g., ``Greys``) are recommended for numeric data. See [1]_ for
these colormap classifications.
s : scalar or iterable of scalars, optional
Size of points. See matplotlib's ``Axes3D.scatter`` documentation
for more details.
Returns
-------
matplotlib.figure.Figure
Figure containing the scatterplot and legend/colorbar if metadata
were provided.
Raises
------
ValueError
Raised on invalid input, including the following situations:
- there are not at least three dimensions to plot
- there are not exactly three values in `axes`, they are not
unique, or are out of range
- there are not exactly three values in `axis_labels`
- either `df` or `column` is provided without the other
- `column` is not in the ``DataFrame``
- sample IDs in the ordination results are not in `df` or have
missing data in `column`
See Also
--------
mpl_toolkits.mplot3d.Axes3D.scatter
Notes
-----
This method creates basic plots of ordination results, and is intended
to provide a quick look at the results in the context of metadata
(e.g., from within the IPython Notebook). For more customization and to
generate publication-quality figures, we recommend EMPeror [2]_.
References
----------
.. [1] http://matplotlib.org/examples/color/colormaps_reference.html
.. [2] EMPeror: a tool for visualizing high-throughput microbial
community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
Examples
--------
.. plot::
Define a distance matrix with four samples labelled A-D:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
... [0.21712454, 0., 0.45995501, 0.80332382],
... [0.5007512, 0.45995501, 0., 0.65463348],
... [0.91769271, 0.80332382, 0.65463348, 0.]],
... ['A', 'B', 'C', 'D'])
Define metadata for each sample in a ``pandas.DataFrame``:
>>> import pandas as pd
>>> metadata = {
... 'A': {'body_site': 'skin'},
... 'B': {'body_site': 'gut'},
... 'C': {'body_site': 'gut'},
... 'D': {'body_site': 'skin'}}
>>> df = pd.DataFrame.from_dict(metadata, orient='index')
Run principal coordinate analysis (PCoA) on the distance matrix:
>>> from skbio.stats.ordination import pcoa
>>> pcoa_results = pcoa(dm)
Plot the ordination results, where each sample is colored by body
site (a categorical variable):
>>> fig = pcoa_results.plot(df=df, column='body_site',
... title='Samples colored by body site',
... cmap='Set1', s=50)
"""
# Note: New features should not be added to this method and should
# instead be added to EMPeror (http://biocore.github.io/emperor/).
# Only bug fixes and minor updates should be made to this method.
coord_matrix = self.samples.values.T
self._validate_plot_axes(coord_matrix, axes)
# derived from
# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = coord_matrix[axes[0]]
ys = coord_matrix[axes[1]]
zs = coord_matrix[axes[2]]
point_colors, category_to_color = self._get_plot_point_colors(
df, column, self.samples.index, cmap)
scatter_fn = functools.partial(ax.scatter, xs, ys, zs, s=s)
if point_colors is None:
plot = scatter_fn()
else:
plot = scatter_fn(c=point_colors, cmap=cmap)
if axis_labels is None:
axis_labels = ['%d' % axis for axis in axes]
elif len(axis_labels) != 3:
raise ValueError("axis_labels must contain exactly three elements "
"(found %d elements)." % len(axis_labels))
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
ax.set_zlabel(axis_labels[2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_title(title)
# create legend/colorbar
if point_colors is not None:
if category_to_color is None:
fig.colorbar(plot)
else:
self._plot_categorical_legend(ax, category_to_color)
fig.tight_layout()
return fig
def _validate_plot_axes(self, coord_matrix, axes):
"""Validate `axes` against coordinates matrix."""
num_dims = coord_matrix.shape[0]
if num_dims < 3:
raise ValueError("At least three dimensions are required to plot "
"ordination results. There are only %d "
"dimension(s)." % num_dims)
if len(axes) != 3:
raise ValueError("`axes` must contain exactly three elements "
"(found %d elements)." % len(axes))
if len(set(axes)) != 3:
raise ValueError("The values provided for `axes` must be unique.")
for idx, axis in enumerate(axes):
if axis < 0 or axis >= num_dims:
raise ValueError("`axes[%d]` must be >= 0 and < %d." %
(idx, num_dims))
def _get_plot_point_colors(self, df, column, ids, cmap):
"""Return a list of colors for each plot point given a metadata column.
If `column` is categorical, additionally returns a dictionary mapping
each category (str) to color (used for legend creation).
"""
import matplotlib.pyplot as plt
if ((df is None and column is not None) or (df is not None and
column is None)):
raise ValueError("Both df and column must be provided, or both "
"must be None.")
elif df is None and column is None:
point_colors, category_to_color = None, None
else:
if column not in df:
raise ValueError("Column '%s' not in data frame." % column)
col_vals = df.reindex(ids, axis=0).loc[:, column]
if col_vals.isnull().any():
raise ValueError("One or more IDs in the ordination results "
"are not in the data frame, or there is "
"missing data in the data frame's '%s' "
"column." % column)
category_to_color = None
try:
point_colors = col_vals.astype(float)
except ValueError:
# we have categorical data, so choose a color for each
# category, where colors are evenly spaced across the
# colormap.
# derived from http://stackoverflow.com/a/14887119
categories = col_vals.unique()
cmap = plt.get_cmap(cmap)
category_colors = cmap(np.linspace(0, 1, len(categories)))
category_to_color = dict(zip(categories, category_colors))
point_colors = col_vals.apply(lambda x: category_to_color[x])
point_colors = point_colors.tolist()
return point_colors, category_to_color
def _plot_categorical_legend(self, ax, color_dict):
"""Add legend to plot using specified mapping of category to color."""
# derived from http://stackoverflow.com/a/20505720
import matplotlib as mpl
proxies = []
labels = []
for category in color_dict:
proxy = mpl.lines.Line2D([0], [0], linestyle='none',
c=color_dict[category], marker='o')
proxies.append(proxy)
labels.append(category)
# place legend outside of the axes (centered)
# derived from http://matplotlib.org/users/legend_guide.html
ax.legend(proxies, labels, numpoints=1, loc=6,
bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
# Here we define the special repr methods that provide the IPython display
# protocol. Code derived from:
# https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
# Custom%20Display%20Logic.ipynb
# See licenses/ipython.txt for more details.
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
# We expose the above reprs as properties, so that the user can see them
# directly (since otherwise the client dictates which one it shows by
# default)
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display basic 3-D scatterplot in IPython Notebook as PNG."""
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display basic 3-D scatterplot in IPython Notebook as SVG."""
return SVG(self._repr_svg_())
def _figure_data(self, format):
import matplotlib.pyplot as plt
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def _format_attribute(self, attr, attr_label, formatter):
if attr is None:
formatted_attr = 'N/A'
else:
formatted_attr = formatter(attr)
return '\t%s: %s' % (attr_label, formatted_attr) | 0.860867 | 0.558688 |
import numpy as np
from skbio.util._decorator import experimental
from ._cutils import center_distance_matrix_cy
@experimental(as_of="0.4.0")
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
@experimental(as_of="0.4.0")
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
a = np.asarray(a, dtype=np.float64)
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
@experimental(as_of="0.4.0")
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
@experimental(as_of="0.4.0")
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
@experimental(as_of="0.4.0")
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
def f_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
def center_distance_matrix(distance_matrix, inplace=False):
"""
Centers a distance matrix.
Note: If the used distance was euclidean, pairwise distances
needn't be computed from the data table Y because F_matrix =
Y.dot(Y.T) (if Y has been centered).
But since we're expecting distance_matrix to be non-euclidian,
we do the following computation as per
Numerical Ecology (Legendre & Legendre 1998).
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
inplace : bool, optional
Whether or not to center the given distance matrix in-place, which
is more efficient in terms of memory and computation.
"""
if not distance_matrix.flags.c_contiguous:
# center_distance_matrix_cy requires c_contiguous, so make a copy
distance_matrix = np.asarray(distance_matrix, order='C')
if inplace:
center_distance_matrix_cy(distance_matrix, distance_matrix)
return distance_matrix
else:
centered = np.empty(distance_matrix.shape, distance_matrix.dtype)
center_distance_matrix_cy(distance_matrix, centered)
return centered
def _e_matrix_inplace(distance_matrix):
"""
Compute E matrix from a distance matrix inplace.
Squares and divides by -2 the input element-wise. Eq. 9.20 in
Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.e_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
"""
distance_matrix = distance_matrix.astype(float)
for i in np.arange(len(distance_matrix)):
distance_matrix[i] = (distance_matrix[i] * distance_matrix[i]) / -2
return distance_matrix
def _f_matrix_inplace(e_matrix):
"""
Compute F matrix from E matrix inplace.
Centering step: for each element, the mean of the corresponding
row and column are subtracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.f_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
e_matrix : 2D array_like
A matrix representing the "E matrix" as described above.
"""
e_matrix = e_matrix.astype(float)
row_means = np.zeros(len(e_matrix), dtype=float)
col_means = np.zeros(len(e_matrix), dtype=float)
matrix_mean = 0.0
for i in np.arange(len(e_matrix)):
row_means[i] = e_matrix[i].mean()
matrix_mean += e_matrix[i].sum()
col_means += e_matrix[i]
matrix_mean /= len(e_matrix) ** 2
col_means /= len(e_matrix)
for i in np.arange(len(e_matrix)):
v = e_matrix[i]
v -= row_means[i]
v -= col_means
v += matrix_mean
e_matrix[i] = v
return e_matrix | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/ordination/_utils.py | _utils.py |
import numpy as np
from skbio.util._decorator import experimental
from ._cutils import center_distance_matrix_cy
@experimental(as_of="0.4.0")
def mean_and_std(a, axis=None, weights=None, with_mean=True, with_std=True,
ddof=0):
"""Compute the weighted average and standard deviation along the
specified axis.
Parameters
----------
a : array_like
Calculate average and standard deviation of these values.
axis : int, optional
Axis along which the statistics are computed. The default is
to compute them on the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each
value in `a` contributes to the average according to its
associated weight. The weights array can either be 1-D (in
which case its length must be the size of `a` along the given
axis) or of the same shape as `a`. If `weights=None`, then all
data in `a` are assumed to have a weight equal to one.
with_mean : bool, optional, defaults to True
Compute average if True.
with_std : bool, optional, defaults to True
Compute standard deviation if True.
ddof : int, optional, defaults to 0
It means delta degrees of freedom. Variance is calculated by
dividing by `n - ddof` (where `n` is the number of
elements). By default it computes the maximum likelyhood
estimator.
Returns
-------
average, std
Return the average and standard deviation along the specified
axis. If any of them was not required, returns `None` instead
"""
if not (with_mean or with_std):
raise ValueError("Either the mean or standard deviation need to be"
" computed.")
a = np.asarray(a)
if weights is None:
avg = a.mean(axis=axis) if with_mean else None
std = a.std(axis=axis, ddof=ddof) if with_std else None
else:
avg = np.average(a, axis=axis, weights=weights)
if with_std:
if axis is None:
variance = np.average((a - avg)**2, weights=weights)
else:
# Make sure that the subtraction to compute variance works for
# multidimensional arrays
a_rolled = np.rollaxis(a, axis)
# Numpy doesn't have a weighted std implementation, but this is
# stable and fast
variance = np.average((a_rolled - avg)**2, axis=0,
weights=weights)
if ddof != 0: # Don't waste time if variance doesn't need scaling
if axis is None:
variance *= a.size / (a.size - ddof)
else:
variance *= a.shape[axis] / (a.shape[axis] - ddof)
std = np.sqrt(variance)
else:
std = None
avg = avg if with_mean else None
return avg, std
@experimental(as_of="0.4.0")
def scale(a, weights=None, with_mean=True, with_std=True, ddof=0, copy=True):
"""Scale array by columns to have weighted average 0 and standard
deviation 1.
Parameters
----------
a : array_like
2D array whose columns are standardized according to the
weights.
weights : array_like, optional
Array of weights associated with the columns of `a`. By
default, the scaling is unweighted.
with_mean : bool, optional, defaults to True
Center columns to have 0 weighted mean.
with_std : bool, optional, defaults to True
Scale columns to have unit weighted std.
ddof : int, optional, defaults to 0
If with_std is True, variance is calculated by dividing by `n
- ddof` (where `n` is the number of elements). By default it
computes the maximum likelyhood stimator.
copy : bool, optional, defaults to True
Whether to perform the standardization in place, or return a
new copy of `a`.
Returns
-------
2D ndarray
Scaled array.
Notes
-----
Wherever std equals 0, it is replaced by 1 in order to avoid
division by zero.
"""
if copy:
a = a.copy()
a = np.asarray(a, dtype=np.float64)
avg, std = mean_and_std(a, axis=0, weights=weights, with_mean=with_mean,
with_std=with_std, ddof=ddof)
if with_mean:
a -= avg
if with_std:
std[std == 0] = 1.0
a /= std
return a
@experimental(as_of="0.4.0")
def svd_rank(M_shape, S, tol=None):
"""Matrix rank of `M` given its singular values `S`.
See `np.linalg.matrix_rank` for a rationale on the tolerance
(we're not using that function because it doesn't let us reuse a
precomputed SVD)."""
if tol is None:
tol = S.max() * max(M_shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
@experimental(as_of="0.4.0")
def corr(x, y=None):
"""Computes correlation between columns of `x`, or `x` and `y`.
Correlation is covariance of (columnwise) standardized matrices,
so each matrix is first centered and scaled to have variance one,
and then their covariance is computed.
Parameters
----------
x : 2D array_like
Matrix of shape (n, p). Correlation between its columns will
be computed.
y : 2D array_like, optional
Matrix of shape (n, q). If provided, the correlation is
computed between the columns of `x` and the columns of
`y`. Else, it's computed between the columns of `x`.
Returns
-------
correlation
Matrix of computed correlations. Has shape (p, p) if `y` is
not provided, else has shape (p, q).
"""
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if y.shape[0] != x.shape[0]:
raise ValueError("Both matrices must have the same number of rows")
x, y = scale(x), scale(y)
else:
x = scale(x)
y = x
# Notice that scaling was performed with ddof=0 (dividing by n,
# the default), so now we need to remove it by also using ddof=0
# (dividing by n)
return x.T.dot(y) / x.shape[0]
@experimental(as_of="0.4.0")
def e_matrix(distance_matrix):
"""Compute E matrix from a distance matrix.
Squares and divides by -2 the input elementwise. Eq. 9.20 in
Legendre & Legendre 1998."""
return distance_matrix * distance_matrix / -2
def f_matrix(E_matrix):
"""Compute F matrix from E matrix.
Centring step: for each element, the mean of the corresponding
row and column are substracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998."""
row_means = E_matrix.mean(axis=1, keepdims=True)
col_means = E_matrix.mean(axis=0, keepdims=True)
matrix_mean = E_matrix.mean()
return E_matrix - row_means - col_means + matrix_mean
def center_distance_matrix(distance_matrix, inplace=False):
"""
Centers a distance matrix.
Note: If the used distance was euclidean, pairwise distances
needn't be computed from the data table Y because F_matrix =
Y.dot(Y.T) (if Y has been centered).
But since we're expecting distance_matrix to be non-euclidian,
we do the following computation as per
Numerical Ecology (Legendre & Legendre 1998).
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
inplace : bool, optional
Whether or not to center the given distance matrix in-place, which
is more efficient in terms of memory and computation.
"""
if not distance_matrix.flags.c_contiguous:
# center_distance_matrix_cy requires c_contiguous, so make a copy
distance_matrix = np.asarray(distance_matrix, order='C')
if inplace:
center_distance_matrix_cy(distance_matrix, distance_matrix)
return distance_matrix
else:
centered = np.empty(distance_matrix.shape, distance_matrix.dtype)
center_distance_matrix_cy(distance_matrix, centered)
return centered
def _e_matrix_inplace(distance_matrix):
"""
Compute E matrix from a distance matrix inplace.
Squares and divides by -2 the input element-wise. Eq. 9.20 in
Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.e_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
distance_matrix : 2D array_like
Distance matrix.
"""
distance_matrix = distance_matrix.astype(float)
for i in np.arange(len(distance_matrix)):
distance_matrix[i] = (distance_matrix[i] * distance_matrix[i]) / -2
return distance_matrix
def _f_matrix_inplace(e_matrix):
"""
Compute F matrix from E matrix inplace.
Centering step: for each element, the mean of the corresponding
row and column are subtracted, and the mean of the whole
matrix is added. Eq. 9.21 in Legendre & Legendre 1998.
Modified from :func:`skbio.stats.ordination.f_matrix` function,
performing row-wise operations to avoid excessive memory allocations.
Parameters
----------
e_matrix : 2D array_like
A matrix representing the "E matrix" as described above.
"""
e_matrix = e_matrix.astype(float)
row_means = np.zeros(len(e_matrix), dtype=float)
col_means = np.zeros(len(e_matrix), dtype=float)
matrix_mean = 0.0
for i in np.arange(len(e_matrix)):
row_means[i] = e_matrix[i].mean()
matrix_mean += e_matrix[i].sum()
col_means += e_matrix[i]
matrix_mean /= len(e_matrix) ** 2
col_means /= len(e_matrix)
for i in np.arange(len(e_matrix)):
v = e_matrix[i]
v -= row_means[i]
v -= col_means
v += matrix_mean
e_matrix[i] = v
return e_matrix | 0.955496 | 0.736329 |
import numpy as np
from scipy.stats import pearsonr
from skbio import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def hommola_cospeciation(host_dist, par_dist, interaction, permutations=999):
"""Perform Hommola et al (2009) host/parasite cospeciation test.
This test for host/parasite cospeciation is as described in [1]_. This test
is a modification of a Mantel test, expanded to accept the case where
multiple hosts map to a single parasite (and vice versa).
For a basic Mantel test, the distance matrices being compared must have the
same number of values. To determine the significance of the correlations
between distances in the two matrices, the correlation coefficient of those
distances is calculated and compared to the correlation coefficients
calculated from a set of matrices in which rows and columns have been
permuted.
In this test, rather than comparing host-host to parasite-parasite
distances directly (requiring one host per parasite), the distances are
compared for each interaction edge between host and parasite. Thus, a host
interacting with two different parasites will be represented in two
different edges, with the host-host distance for the comparison between
those edges equal to zero, and the parasite-parasite distance equal to the
distance between those two parasites. Like in the Mantel test, significance
of the interaction is assessed by permutation, in this case permutation of
the host-symbiont interaction links.
Note that the null hypothesis being tested here is that the hosts and
parasites have evolved independently of one another. The alternative to
this is a somewhat weaker case than what is often implied with the term
'cospeciation,' which is that each incidence of host speciation is
recapitulated in an incidence of symbiont speciation (strict
co-cladogenesis). Although there may be many factors that could contribute
to non-independence of host and symbiont phylogenies, this loss of
explanatory specificity comes with increased robustness to phylogenetic
uncertainty. Thus, this test may be especially useful for cases where host
and/or symbiont phylogenies are poorly resolved, or when simple correlation
between host and symbiont evolution is of more interest than strict
co-cladogenesis.
This test requires pairwise distance matrices for hosts and symbionts, as
well as an interaction matrix specifying links between hosts (in columns)
and symbionts (in rows). This interaction matrix should have the same
number of columns as the host distance matrix, and the same number of rows
as the symbiont distance matrix. Interactions between hosts and symbionts
should be indicated by values of ``1`` or ``True``, with non-interactions
indicated by values of ``0`` or ``False``.
Parameters
----------
host_dist : 2-D array_like or DistanceMatrix
Symmetric matrix of m x m pairwise distances between hosts.
par_dist : 2-D array_like or DistanceMatrix
Symmetric matrix of n x n pairwise distances between parasites.
interaction : 2-D array_like, bool
n x m binary matrix of parasite x host interactions. Order of hosts
(columns) should be identical to order of hosts in `host_dist`, as
should order of parasites (rows) be identical to order of parasites in
`par_dist`.
permutations : int, optional
Number of permutations used to compute p-value. Must be greater than or
equal to zero. If zero, statistical significance calculations will be
skipped and the p-value will be ``np.nan``.
Returns
-------
corr_coeff : float
Pearson correlation coefficient of host : parasite association.
p_value : float
Significance of host : parasite association computed using
`permutations` and a one-sided (greater) alternative hypothesis.
perm_stats : 1-D numpy.ndarray, float
Correlation coefficients observed using permuted host : parasite
interactions. Length will be equal to the number of permutations used
to compute p-value (see `permutations` parameter above).
See Also
--------
skbio.stats.distance.mantel
scipy.stats.pearsonr
Notes
-----
It is assumed that the ordering of parasites in `par_dist` and hosts in
`host_dist` are identical to their ordering in the rows and columns,
respectively, of the interaction matrix.
This code is loosely based on the original R code from [1]_.
References
----------
.. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
1457-1468.
Examples
--------
>>> from skbio.stats.evolve import hommola_cospeciation
Create arrays for host distances, parasite distances, and their
interactions (data taken from example in [1]_):
>>> hdist = [[0,3,8,8,9], [3,0,7,7,8], [8,7,0,6,7], [8,7,6,0,3],
... [9,8,7,3,0]]
>>> pdist = [[0,5,8,8,8], [5,0,7,7,7], [8,7,0,4,4], [8,7,4,0,2],
... [8,7,4,2,0]]
>>> interaction = [[1,0,0,0,0], [0,1,0,0,0], [0,0,1,0,0], [0,0,0,1,0],
... [0,0,0,1,1]]
Run the cospeciation test with 99 permutations. Note that the correlation
coefficient for the observed values counts against the final reported
p-value:
>>> corr_coeff, p_value, perm_stats = hommola_cospeciation(
... hdist, pdist, interaction, permutations=99)
>>> print("%.3f" % corr_coeff)
0.832
In this case, the host distances have a fairly strong positive correlation
with the symbiont distances. However, this may also reflect structure
inherent in the phylogeny, and is not itself indicative of significance.
>>> p_value <= 0.05
True
After permuting host : parasite interactions, we find that the observed
correlation is indeed greater than we would expect by chance.
"""
host_dist = DistanceMatrix(host_dist)
par_dist = DistanceMatrix(par_dist)
interaction = np.asarray(interaction, dtype=bool)
num_hosts = host_dist.shape[0]
num_pars = par_dist.shape[0]
if num_hosts < 3 or num_pars < 3:
raise ValueError("Distance matrices must be a minimum of 3x3 in size.")
if num_hosts != interaction.shape[1]:
raise ValueError("Number of interaction matrix columns must match "
"number of hosts in `host_dist`.")
if num_pars != interaction.shape[0]:
raise ValueError("Number of interaction matrix rows must match "
"number of parasites in `par_dist`.")
if permutations < 0:
raise ValueError("Number of permutations must be greater than or "
"equal to zero.")
if interaction.sum() < 3:
raise ValueError("Must have at least 3 host-parasite interactions in "
"`interaction`.")
# shortcut to eliminate nested for-loops specifying pairwise interaction
# partners as randomizeable indices
pars, hosts = np.nonzero(interaction)
pars_k_labels, pars_t_labels = _gen_lists(pars)
hosts_k_labels, hosts_t_labels = _gen_lists(hosts)
# get a vector of pairwise distances for each interaction edge
x = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data,
np.arange(num_hosts))
y = _get_dist(pars_k_labels, pars_t_labels, par_dist.data,
np.arange(num_pars))
# calculate the observed correlation coefficient for these hosts/symbionts
corr_coeff = pearsonr(x, y)[0]
# now do permutatitons. initialize index lists of the appropriate size
mp = np.arange(num_pars)
mh = np.arange(num_hosts)
# initialize list of shuffled correlation vals
perm_stats = np.empty(permutations)
if permutations == 0 or np.isnan(corr_coeff):
p_value = np.nan
perm_stats.fill(np.nan)
else:
for i in range(permutations):
# generate a shuffled list of indexes for each permutation. this
# effectively randomizes which host is associated with which
# symbiont, but maintains the distribution of genetic distances
np.random.shuffle(mp)
np.random.shuffle(mh)
# get pairwise distances in shuffled order
y_p = _get_dist(pars_k_labels, pars_t_labels, par_dist.data, mp)
x_p = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data, mh)
# calculate shuffled correlation coefficient
perm_stats[i] = pearsonr(x_p, y_p)[0]
p_value = ((perm_stats >= corr_coeff).sum() + 1) / (permutations + 1)
return corr_coeff, p_value, perm_stats
def _get_dist(k_labels, t_labels, dists, index):
"""Subset a distance matrix using a set of (randomizable) index labels.
Parameters
----------
k_labels : numpy.array
index labels specifying row-wise member of pairwise interaction
t_labels : numpy.array
index labels specifying column-wise member of pairwise interaction
dists : numpy.array
pairwise distance matrix
index : numpy.array of int
permutable indices for changing order in pairwise distance matrix
Returns
-------
vec : list of float
List of distances associated with host:parasite edges.
"""
return dists[index[k_labels], index[t_labels]]
def _gen_lists(labels):
"""Generate matched lists of row and column index labels.
Shortcut function for generating matched lists of row and col index
labels for the set of pairwise comparisons specified by the list of those
indices recovered using ``np.nonzero(interaction)``.
Reproduces values of iterated indices from the nested for-loops contained
in ``get_dist`` function in original code from [1]_.
Parameters
----------
labels : numpy.array
array containing the indices of nonzero elements in one dimension of an
interaction matrix
Returns
-------
k_labels : numpy.array
index labels specifying row-wise member of pairwise interaction
t_labels : numpy.array
index labels specifying column-wise member of pairwise interaction
References
----------
.. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
1457-1468.
"""
i_array, j_array = np.transpose(np.tri(len(labels)-1)).nonzero()
j_array = j_array + 1
return labels[i_array], labels[j_array] | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/evolve/_hommola.py | _hommola.py |
import numpy as np
from scipy.stats import pearsonr
from skbio import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def hommola_cospeciation(host_dist, par_dist, interaction, permutations=999):
"""Perform Hommola et al (2009) host/parasite cospeciation test.
This test for host/parasite cospeciation is as described in [1]_. This test
is a modification of a Mantel test, expanded to accept the case where
multiple hosts map to a single parasite (and vice versa).
For a basic Mantel test, the distance matrices being compared must have the
same number of values. To determine the significance of the correlations
between distances in the two matrices, the correlation coefficient of those
distances is calculated and compared to the correlation coefficients
calculated from a set of matrices in which rows and columns have been
permuted.
In this test, rather than comparing host-host to parasite-parasite
distances directly (requiring one host per parasite), the distances are
compared for each interaction edge between host and parasite. Thus, a host
interacting with two different parasites will be represented in two
different edges, with the host-host distance for the comparison between
those edges equal to zero, and the parasite-parasite distance equal to the
distance between those two parasites. Like in the Mantel test, significance
of the interaction is assessed by permutation, in this case permutation of
the host-symbiont interaction links.
Note that the null hypothesis being tested here is that the hosts and
parasites have evolved independently of one another. The alternative to
this is a somewhat weaker case than what is often implied with the term
'cospeciation,' which is that each incidence of host speciation is
recapitulated in an incidence of symbiont speciation (strict
co-cladogenesis). Although there may be many factors that could contribute
to non-independence of host and symbiont phylogenies, this loss of
explanatory specificity comes with increased robustness to phylogenetic
uncertainty. Thus, this test may be especially useful for cases where host
and/or symbiont phylogenies are poorly resolved, or when simple correlation
between host and symbiont evolution is of more interest than strict
co-cladogenesis.
This test requires pairwise distance matrices for hosts and symbionts, as
well as an interaction matrix specifying links between hosts (in columns)
and symbionts (in rows). This interaction matrix should have the same
number of columns as the host distance matrix, and the same number of rows
as the symbiont distance matrix. Interactions between hosts and symbionts
should be indicated by values of ``1`` or ``True``, with non-interactions
indicated by values of ``0`` or ``False``.
Parameters
----------
host_dist : 2-D array_like or DistanceMatrix
Symmetric matrix of m x m pairwise distances between hosts.
par_dist : 2-D array_like or DistanceMatrix
Symmetric matrix of n x n pairwise distances between parasites.
interaction : 2-D array_like, bool
n x m binary matrix of parasite x host interactions. Order of hosts
(columns) should be identical to order of hosts in `host_dist`, as
should order of parasites (rows) be identical to order of parasites in
`par_dist`.
permutations : int, optional
Number of permutations used to compute p-value. Must be greater than or
equal to zero. If zero, statistical significance calculations will be
skipped and the p-value will be ``np.nan``.
Returns
-------
corr_coeff : float
Pearson correlation coefficient of host : parasite association.
p_value : float
Significance of host : parasite association computed using
`permutations` and a one-sided (greater) alternative hypothesis.
perm_stats : 1-D numpy.ndarray, float
Correlation coefficients observed using permuted host : parasite
interactions. Length will be equal to the number of permutations used
to compute p-value (see `permutations` parameter above).
See Also
--------
skbio.stats.distance.mantel
scipy.stats.pearsonr
Notes
-----
It is assumed that the ordering of parasites in `par_dist` and hosts in
`host_dist` are identical to their ordering in the rows and columns,
respectively, of the interaction matrix.
This code is loosely based on the original R code from [1]_.
References
----------
.. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
1457-1468.
Examples
--------
>>> from skbio.stats.evolve import hommola_cospeciation
Create arrays for host distances, parasite distances, and their
interactions (data taken from example in [1]_):
>>> hdist = [[0,3,8,8,9], [3,0,7,7,8], [8,7,0,6,7], [8,7,6,0,3],
... [9,8,7,3,0]]
>>> pdist = [[0,5,8,8,8], [5,0,7,7,7], [8,7,0,4,4], [8,7,4,0,2],
... [8,7,4,2,0]]
>>> interaction = [[1,0,0,0,0], [0,1,0,0,0], [0,0,1,0,0], [0,0,0,1,0],
... [0,0,0,1,1]]
Run the cospeciation test with 99 permutations. Note that the correlation
coefficient for the observed values counts against the final reported
p-value:
>>> corr_coeff, p_value, perm_stats = hommola_cospeciation(
... hdist, pdist, interaction, permutations=99)
>>> print("%.3f" % corr_coeff)
0.832
In this case, the host distances have a fairly strong positive correlation
with the symbiont distances. However, this may also reflect structure
inherent in the phylogeny, and is not itself indicative of significance.
>>> p_value <= 0.05
True
After permuting host : parasite interactions, we find that the observed
correlation is indeed greater than we would expect by chance.
"""
host_dist = DistanceMatrix(host_dist)
par_dist = DistanceMatrix(par_dist)
interaction = np.asarray(interaction, dtype=bool)
num_hosts = host_dist.shape[0]
num_pars = par_dist.shape[0]
if num_hosts < 3 or num_pars < 3:
raise ValueError("Distance matrices must be a minimum of 3x3 in size.")
if num_hosts != interaction.shape[1]:
raise ValueError("Number of interaction matrix columns must match "
"number of hosts in `host_dist`.")
if num_pars != interaction.shape[0]:
raise ValueError("Number of interaction matrix rows must match "
"number of parasites in `par_dist`.")
if permutations < 0:
raise ValueError("Number of permutations must be greater than or "
"equal to zero.")
if interaction.sum() < 3:
raise ValueError("Must have at least 3 host-parasite interactions in "
"`interaction`.")
# shortcut to eliminate nested for-loops specifying pairwise interaction
# partners as randomizeable indices
pars, hosts = np.nonzero(interaction)
pars_k_labels, pars_t_labels = _gen_lists(pars)
hosts_k_labels, hosts_t_labels = _gen_lists(hosts)
# get a vector of pairwise distances for each interaction edge
x = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data,
np.arange(num_hosts))
y = _get_dist(pars_k_labels, pars_t_labels, par_dist.data,
np.arange(num_pars))
# calculate the observed correlation coefficient for these hosts/symbionts
corr_coeff = pearsonr(x, y)[0]
# now do permutatitons. initialize index lists of the appropriate size
mp = np.arange(num_pars)
mh = np.arange(num_hosts)
# initialize list of shuffled correlation vals
perm_stats = np.empty(permutations)
if permutations == 0 or np.isnan(corr_coeff):
p_value = np.nan
perm_stats.fill(np.nan)
else:
for i in range(permutations):
# generate a shuffled list of indexes for each permutation. this
# effectively randomizes which host is associated with which
# symbiont, but maintains the distribution of genetic distances
np.random.shuffle(mp)
np.random.shuffle(mh)
# get pairwise distances in shuffled order
y_p = _get_dist(pars_k_labels, pars_t_labels, par_dist.data, mp)
x_p = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data, mh)
# calculate shuffled correlation coefficient
perm_stats[i] = pearsonr(x_p, y_p)[0]
p_value = ((perm_stats >= corr_coeff).sum() + 1) / (permutations + 1)
return corr_coeff, p_value, perm_stats
def _get_dist(k_labels, t_labels, dists, index):
"""Subset a distance matrix using a set of (randomizable) index labels.
Parameters
----------
k_labels : numpy.array
index labels specifying row-wise member of pairwise interaction
t_labels : numpy.array
index labels specifying column-wise member of pairwise interaction
dists : numpy.array
pairwise distance matrix
index : numpy.array of int
permutable indices for changing order in pairwise distance matrix
Returns
-------
vec : list of float
List of distances associated with host:parasite edges.
"""
return dists[index[k_labels], index[t_labels]]
def _gen_lists(labels):
"""Generate matched lists of row and column index labels.
Shortcut function for generating matched lists of row and col index
labels for the set of pairwise comparisons specified by the list of those
indices recovered using ``np.nonzero(interaction)``.
Reproduces values of iterated indices from the nested for-loops contained
in ``get_dist`` function in original code from [1]_.
Parameters
----------
labels : numpy.array
array containing the indices of nonzero elements in one dimension of an
interaction matrix
Returns
-------
k_labels : numpy.array
index labels specifying row-wise member of pairwise interaction
t_labels : numpy.array
index labels specifying column-wise member of pairwise interaction
References
----------
.. [1] Hommola K, Smith JE, Qiu Y, Gilks WR (2009) A Permutation Test of
Host-Parasite Cospeciation. Molecular Biology and Evolution, 26,
1457-1468.
"""
i_array, j_array = np.transpose(np.tri(len(labels)-1)).nonzero()
j_array = j_array + 1
return labels[i_array], labels[j_array] | 0.942195 | 0.778733 |
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from scipy.stats import spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def bioenv(distance_matrix, data_frame, columns=None):
"""Find subset of variables maximally correlated with distances.
Finds subsets of variables whose Euclidean distances (after scaling the
variables; see Notes section below for details) are maximally
rank-correlated with the distance matrix. For example, the distance matrix
might contain distances between communities, and the variables might be
numeric environmental variables (e.g., pH). Correlation between the
community distance matrix and Euclidean environmental distance matrix is
computed using Spearman's rank correlation coefficient (:math:`\\rho`).
Subsets of environmental variables range in size from 1 to the total number
of variables (inclusive). For example, if there are 3 variables, the "best"
variable subsets will be computed for subset sizes 1, 2, and 3.
The "best" subset is chosen by computing the correlation between the
community distance matrix and all possible Euclidean environmental distance
matrices at the given subset size. The combination of environmental
variables with maximum correlation is chosen as the "best" subset.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
data_frame : pandas.DataFrame
Contains columns of variables (e.g., numeric environmental variables
such as pH) associated with the objects in `distance_matrix`. Must be
indexed by the IDs in `distance_matrix` (i.e., the row labels must be
distance matrix IDs), but the order of IDs between `distance_matrix`
and `data_frame` need not be the same. All IDs in the distance matrix
must be present in `data_frame`. Extra IDs in `data_frame` are allowed
(they are ignored in the calculations).
columns : iterable of strs, optional
Column names in `data_frame` to include as variables in the
calculations. If not provided, defaults to all columns in `data_frame`.
The values in each column must be numeric or convertible to a numeric
type.
Returns
-------
pandas.DataFrame
Data frame containing the "best" subset of variables at each subset
size, as well as the correlation coefficient of each.
Raises
------
TypeError
If invalid input types are provided, or if one or more specified
columns in `data_frame` are not numeric.
ValueError
If column name(s) or `distance_matrix` IDs cannot be found in
`data_frame`, if there is missing data (``NaN``) in the environmental
variables, or if the environmental variables cannot be scaled (e.g.,
due to zero variance).
See Also
--------
scipy.stats.spearmanr
Notes
-----
See [1]_ for the original method reference (originally called BIO-ENV).
The general algorithm and interface are similar to ``vegan::bioenv``,
available in R's vegan package [2]_. This method can also be found in
PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
.. warning:: This method can take a *long* time to run if a large number of
variables are specified, as all possible subsets are evaluated at each
subset size.
The variables are scaled before computing the Euclidean distance: each
column is centered and then scaled by its standard deviation.
References
----------
.. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate
community structure to environmental variables". Marine Ecology Progress
Series, 92, 205-219.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
.. [3] http://www.primer-e.com/primer.htm
Examples
--------
Import the functionality we'll use in the following examples:
>>> import pandas as pd
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import bioenv
Load a 4x4 community distance matrix:
>>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
... [0.5, 0.0, 0.1, 0.42],
... [0.25, 0.1, 0.0, 0.33],
... [0.75, 0.42, 0.33, 0.0]],
... ['A', 'B', 'C', 'D'])
Load a ``pandas.DataFrame`` with two environmental variables, pH and
elevation:
>>> df = pd.DataFrame([[7.0, 400],
... [8.0, 530],
... [7.5, 450],
... [8.5, 810]],
... index=['A','B','C','D'],
... columns=['pH', 'Elevation'])
Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
order to link the environmental variables (metadata) to each of the objects
in the distance matrix. In this example, the IDs appear in the same order
in both the distance matrix and data frame, but this is not necessary.
Find the best subsets of environmental variables that are correlated with
community distances:
>>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
size correlation
vars
pH 1 0.771517
pH, Elevation 2 0.714286
We see that in this simple example, pH alone is maximally rank-correlated
with the community distances (:math:`\\rho=0.771517`).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix as input.")
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must provide a pandas.DataFrame as input.")
if columns is None:
columns = data_frame.columns.values.tolist()
if len(set(columns)) != len(columns):
raise ValueError("Duplicate column names are not supported.")
if len(columns) < 1:
raise ValueError("Must provide at least one column.")
for column in columns:
if column not in data_frame:
raise ValueError("Column '%s' not in data frame." % column)
# Subset and order the vars data frame to match the IDs in the distance
# matrix, only keeping the specified columns.
vars_df = data_frame.reindex(distance_matrix.ids, axis=0).loc[:, columns]
if vars_df.isnull().any().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame, or there is missing data in the "
"data frame.")
try:
vars_df = vars_df.astype(float)
except ValueError:
raise TypeError("All specified columns in the data frame must be "
"numeric.")
# Scale the vars and extract the underlying numpy array from the data
# frame. We mainly do this for performance as we'll be taking subsets of
# columns within a tight loop and using a numpy array ends up being ~2x
# faster.
vars_array = _scale(vars_df).values
dm_flat = distance_matrix.condensed_form()
num_vars = len(columns)
var_idxs = np.arange(num_vars)
# For each subset size, store the best combination of variables:
# (string identifying best vars, subset size, rho)
max_rhos = np.empty(num_vars, dtype=[('vars', object),
('size', int),
('correlation', float)])
for subset_size in range(1, num_vars + 1):
max_rho = None
for subset_idxs in combinations(var_idxs, subset_size):
# Compute Euclidean distances using the current subset of
# variables. pdist returns the distances in condensed form.
vars_dm_flat = pdist(vars_array[:, subset_idxs],
metric='euclidean')
rho = spearmanr(dm_flat, vars_dm_flat)[0]
# If there are ties for the best rho at a given subset size, choose
# the first one in order to match vegan::bioenv's behavior.
if max_rho is None or rho > max_rho[0]:
max_rho = (rho, subset_idxs)
vars_label = ', '.join([columns[i] for i in max_rho[1]])
max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
return pd.DataFrame.from_records(max_rhos, index='vars')
def _scale(df):
"""Center and scale each column in a data frame.
Each column is centered (by subtracting the mean) and then scaled by its
standard deviation.
"""
# Modified from http://stackoverflow.com/a/18005745
df = df.copy()
df -= df.mean()
df /= df.std()
if df.isnull().any().any():
raise ValueError("Column(s) in the data frame could not be scaled, "
"likely because the column(s) had no variance.")
return df | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/distance/_bioenv.py | _bioenv.py |
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from scipy.stats import spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def bioenv(distance_matrix, data_frame, columns=None):
"""Find subset of variables maximally correlated with distances.
Finds subsets of variables whose Euclidean distances (after scaling the
variables; see Notes section below for details) are maximally
rank-correlated with the distance matrix. For example, the distance matrix
might contain distances between communities, and the variables might be
numeric environmental variables (e.g., pH). Correlation between the
community distance matrix and Euclidean environmental distance matrix is
computed using Spearman's rank correlation coefficient (:math:`\\rho`).
Subsets of environmental variables range in size from 1 to the total number
of variables (inclusive). For example, if there are 3 variables, the "best"
variable subsets will be computed for subset sizes 1, 2, and 3.
The "best" subset is chosen by computing the correlation between the
community distance matrix and all possible Euclidean environmental distance
matrices at the given subset size. The combination of environmental
variables with maximum correlation is chosen as the "best" subset.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
data_frame : pandas.DataFrame
Contains columns of variables (e.g., numeric environmental variables
such as pH) associated with the objects in `distance_matrix`. Must be
indexed by the IDs in `distance_matrix` (i.e., the row labels must be
distance matrix IDs), but the order of IDs between `distance_matrix`
and `data_frame` need not be the same. All IDs in the distance matrix
must be present in `data_frame`. Extra IDs in `data_frame` are allowed
(they are ignored in the calculations).
columns : iterable of strs, optional
Column names in `data_frame` to include as variables in the
calculations. If not provided, defaults to all columns in `data_frame`.
The values in each column must be numeric or convertible to a numeric
type.
Returns
-------
pandas.DataFrame
Data frame containing the "best" subset of variables at each subset
size, as well as the correlation coefficient of each.
Raises
------
TypeError
If invalid input types are provided, or if one or more specified
columns in `data_frame` are not numeric.
ValueError
If column name(s) or `distance_matrix` IDs cannot be found in
`data_frame`, if there is missing data (``NaN``) in the environmental
variables, or if the environmental variables cannot be scaled (e.g.,
due to zero variance).
See Also
--------
scipy.stats.spearmanr
Notes
-----
See [1]_ for the original method reference (originally called BIO-ENV).
The general algorithm and interface are similar to ``vegan::bioenv``,
available in R's vegan package [2]_. This method can also be found in
PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
.. warning:: This method can take a *long* time to run if a large number of
variables are specified, as all possible subsets are evaluated at each
subset size.
The variables are scaled before computing the Euclidean distance: each
column is centered and then scaled by its standard deviation.
References
----------
.. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate
community structure to environmental variables". Marine Ecology Progress
Series, 92, 205-219.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
.. [3] http://www.primer-e.com/primer.htm
Examples
--------
Import the functionality we'll use in the following examples:
>>> import pandas as pd
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import bioenv
Load a 4x4 community distance matrix:
>>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
... [0.5, 0.0, 0.1, 0.42],
... [0.25, 0.1, 0.0, 0.33],
... [0.75, 0.42, 0.33, 0.0]],
... ['A', 'B', 'C', 'D'])
Load a ``pandas.DataFrame`` with two environmental variables, pH and
elevation:
>>> df = pd.DataFrame([[7.0, 400],
... [8.0, 530],
... [7.5, 450],
... [8.5, 810]],
... index=['A','B','C','D'],
... columns=['pH', 'Elevation'])
Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
order to link the environmental variables (metadata) to each of the objects
in the distance matrix. In this example, the IDs appear in the same order
in both the distance matrix and data frame, but this is not necessary.
Find the best subsets of environmental variables that are correlated with
community distances:
>>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
size correlation
vars
pH 1 0.771517
pH, Elevation 2 0.714286
We see that in this simple example, pH alone is maximally rank-correlated
with the community distances (:math:`\\rho=0.771517`).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix as input.")
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must provide a pandas.DataFrame as input.")
if columns is None:
columns = data_frame.columns.values.tolist()
if len(set(columns)) != len(columns):
raise ValueError("Duplicate column names are not supported.")
if len(columns) < 1:
raise ValueError("Must provide at least one column.")
for column in columns:
if column not in data_frame:
raise ValueError("Column '%s' not in data frame." % column)
# Subset and order the vars data frame to match the IDs in the distance
# matrix, only keeping the specified columns.
vars_df = data_frame.reindex(distance_matrix.ids, axis=0).loc[:, columns]
if vars_df.isnull().any().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame, or there is missing data in the "
"data frame.")
try:
vars_df = vars_df.astype(float)
except ValueError:
raise TypeError("All specified columns in the data frame must be "
"numeric.")
# Scale the vars and extract the underlying numpy array from the data
# frame. We mainly do this for performance as we'll be taking subsets of
# columns within a tight loop and using a numpy array ends up being ~2x
# faster.
vars_array = _scale(vars_df).values
dm_flat = distance_matrix.condensed_form()
num_vars = len(columns)
var_idxs = np.arange(num_vars)
# For each subset size, store the best combination of variables:
# (string identifying best vars, subset size, rho)
max_rhos = np.empty(num_vars, dtype=[('vars', object),
('size', int),
('correlation', float)])
for subset_size in range(1, num_vars + 1):
max_rho = None
for subset_idxs in combinations(var_idxs, subset_size):
# Compute Euclidean distances using the current subset of
# variables. pdist returns the distances in condensed form.
vars_dm_flat = pdist(vars_array[:, subset_idxs],
metric='euclidean')
rho = spearmanr(dm_flat, vars_dm_flat)[0]
# If there are ties for the best rho at a given subset size, choose
# the first one in order to match vegan::bioenv's behavior.
if max_rho is None or rho > max_rho[0]:
max_rho = (rho, subset_idxs)
vars_label = ', '.join([columns[i] for i in max_rho[1]])
max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
return pd.DataFrame.from_records(max_rhos, index='vars')
def _scale(df):
"""Center and scale each column in a data frame.
Each column is centered (by subtracting the mean) and then scaled by its
standard deviation.
"""
# Modified from http://stackoverflow.com/a/18005745
df = df.copy()
df -= df.mean()
df /= df.std()
if df.isnull().any().any():
raise ValueError("Column(s) in the data frame could not be scaled, "
"likely because the column(s) had no variance.")
return df | 0.935043 | 0.776242 |
from functools import partial
import numpy as np
from ._base import (_preprocess_input_sng, _run_monte_carlo_stats,
_build_results, DistanceMatrix)
from skbio.util._decorator import experimental
from ._cutils import permanova_f_stat_sW_cy
@experimental(as_of="0.4.0")
def permanova(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using PERMANOVA.
Permutational Multivariate Analysis of Variance (PERMANOVA) is a
non-parametric method that tests whether two or more groups of objects
(e.g., samples) are significantly different based on a categorical factor.
It is conceptually similar to ANOVA except that it operates on a distance
matrix, which allows for multivariate analysis. PERMANOVA computes a
pseudo-F statistic.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). A pseudo-F statistic is computed for each
permutation and the p-value is the proportion of permuted pseudo-F
statisics that are equal to or greater than the original (unpermuted)
pseudo-F statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
anosim
Notes
-----
See [1]_ for the original method reference, as well as ``vegan::adonis``,
available in R's vegan package [2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Anderson, Marti J. "A new method for non-parametric multivariate
analysis of variance." Austral Ecology 26.1 (2001): 32-46.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
See :mod:`skbio.stats.distance.anosim` for usage examples (both functions
provide similar interfaces).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
sample_size = distance_matrix.shape[0]
num_groups, grouping = _preprocess_input_sng(
distance_matrix.ids, sample_size, grouping, column)
# Calculate number of objects in each group.
group_sizes = np.bincount(grouping)
s_T = (distance_matrix[:] ** 2).sum() / sample_size
# we are going over the whole matrix, instead of just upper triangle
# so cut in half
s_T /= 2.0
test_stat_function = partial(_compute_f_stat, sample_size, num_groups,
distance_matrix, group_sizes, s_T)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('PERMANOVA', 'pseudo-F', sample_size, num_groups,
stat, p_value, permutations)
def _compute_f_stat(sample_size, num_groups, distance_matrix, group_sizes,
s_T, grouping):
"""Compute PERMANOVA pseudo-F statistic."""
# Calculate s_W for each group, accounting for different group sizes.
s_W = permanova_f_stat_sW_cy(distance_matrix.data,
group_sizes, grouping)
s_A = s_T - s_W
return (s_A / (num_groups - 1)) / (s_W / (sample_size - num_groups)) | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/stats/distance/_permanova.py | _permanova.py |
from functools import partial
import numpy as np
from ._base import (_preprocess_input_sng, _run_monte_carlo_stats,
_build_results, DistanceMatrix)
from skbio.util._decorator import experimental
from ._cutils import permanova_f_stat_sW_cy
@experimental(as_of="0.4.0")
def permanova(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using PERMANOVA.
Permutational Multivariate Analysis of Variance (PERMANOVA) is a
non-parametric method that tests whether two or more groups of objects
(e.g., samples) are significantly different based on a categorical factor.
It is conceptually similar to ANOVA except that it operates on a distance
matrix, which allows for multivariate analysis. PERMANOVA computes a
pseudo-F statistic.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). A pseudo-F statistic is computed for each
permutation and the p-value is the proportion of permuted pseudo-F
statisics that are equal to or greater than the original (unpermuted)
pseudo-F statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
anosim
Notes
-----
See [1]_ for the original method reference, as well as ``vegan::adonis``,
available in R's vegan package [2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Anderson, Marti J. "A new method for non-parametric multivariate
analysis of variance." Austral Ecology 26.1 (2001): 32-46.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
See :mod:`skbio.stats.distance.anosim` for usage examples (both functions
provide similar interfaces).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
sample_size = distance_matrix.shape[0]
num_groups, grouping = _preprocess_input_sng(
distance_matrix.ids, sample_size, grouping, column)
# Calculate number of objects in each group.
group_sizes = np.bincount(grouping)
s_T = (distance_matrix[:] ** 2).sum() / sample_size
# we are going over the whole matrix, instead of just upper triangle
# so cut in half
s_T /= 2.0
test_stat_function = partial(_compute_f_stat, sample_size, num_groups,
distance_matrix, group_sizes, s_T)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('PERMANOVA', 'pseudo-F', sample_size, num_groups,
stat, p_value, permutations)
def _compute_f_stat(sample_size, num_groups, distance_matrix, group_sizes,
s_T, grouping):
"""Compute PERMANOVA pseudo-F statistic."""
# Calculate s_W for each group, accounting for different group sizes.
s_W = permanova_f_stat_sW_cy(distance_matrix.data,
group_sizes, grouping)
s_A = s_T - s_W
return (s_A / (num_groups - 1)) / (s_W / (sample_size - num_groups)) | 0.922813 | 0.749408 |
import abc
import copy
import pandas as pd
from skbio.util._decorator import stable, experimental
from skbio.metadata import IntervalMetadata
class MetadataMixin(metaclass=abc.ABCMeta):
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire object.
Notes
-----
This property can be set and deleted. When setting new metadata a
shallow copy of the dictionary is made.
Examples
--------
.. note:: scikit-bio objects with metadata share a common interface for
accessing and manipulating their metadata. The following examples
use scikit-bio's ``Sequence`` class to demonstrate metadata
behavior. These examples apply to all other scikit-bio objects
storing metadata.
Create a sequence with metadata:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT', metadata={'description': 'seq description',
... 'id': 'seq-id'})
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> seq.metadata = {'abc': 123}
>>> seq.metadata
{'abc': 123}
Delete metadata:
>>> seq.has_metadata()
True
>>> del seq.metadata
>>> seq.metadata
{}
>>> seq.has_metadata()
False
"""
if self._metadata is None:
# Not using setter to avoid copy.
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict, not type %r" %
type(metadata).__name__)
# Shallow copy.
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@abc.abstractmethod
def __init__(self, metadata=None):
raise NotImplementedError
def _init_(self, metadata=None):
if metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._metadata = None
else:
# Use setter for validation and copy.
self.metadata = metadata
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the objects if
# they don't have metadata.
if self.has_metadata() and other.has_metadata():
return self.metadata == other.metadata
elif not (self.has_metadata() or other.has_metadata()):
# Both don't have metadata.
return True
else:
# One has metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_metadata():
return self.metadata.copy()
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_metadata():
return copy.deepcopy(self.metadata, memo)
else:
return None
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the object has metadata.
An object has metadata if its ``metadata`` dictionary is not empty
(i.e., has at least one key-value pair).
Returns
-------
bool
Indicates whether the object has metadata.
Examples
--------
.. note:: scikit-bio objects with metadata share a common interface for
accessing and manipulating their metadata. The following examples
use scikit-bio's ``Sequence`` class to demonstrate metadata
behavior. These examples apply to all other scikit-bio objects
storing metadata.
>>> from skbio import Sequence
>>> seq = Sequence('ACGT')
>>> seq.has_metadata()
False
>>> seq = Sequence('ACGT', metadata={})
>>> seq.has_metadata()
False
>>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
>>> seq.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
class PositionalMetadataMixin(metaclass=abc.ABCMeta):
@abc.abstractmethod
def _positional_metadata_axis_len_(self):
"""Return length of axis that positional metadata applies to.
Returns
-------
int
Positional metadata axis length.
"""
raise NotImplementedError
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata along an axis.
Notes
-----
This property can be set and deleted. When setting new positional
metadata, a shallow copy is made and the ``pd.DataFrame`` index is set
to ``pd.RangeIndex(start=0, stop=axis_len, step=1)``.
Examples
--------
.. note:: scikit-bio objects with positional metadata share a common
interface for accessing and manipulating their positional metadata.
The following examples use scikit-bio's ``DNA`` class to demonstrate
positional metadata behavior. These examples apply to all other
scikit-bio objects storing positional metadata.
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'exons': [True, True, False, True],
... 'quality': [3, 3, 20, 11]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata # doctest: +NORMALIZE_WHITESPACE
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# Not using setter to avoid copy.
self._positional_metadata = pd.DataFrame(
index=self._get_positional_metadata_index())
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# Pass copy=True to copy underlying data buffer.
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
# Different versions of pandas will raise different error types. We
# don't really care what the type of the error is, just its message, so
# a blanket Exception will do.
except Exception as e:
raise TypeError(
"Invalid positional metadata. Must be consumable by "
"`pd.DataFrame` constructor. Original pandas error message: "
"\"%s\"" % e)
num_rows = len(positional_metadata.index)
axis_len = self._positional_metadata_axis_len_()
if num_rows != axis_len:
raise ValueError(
"Number of positional metadata values (%d) must match the "
"positional metadata axis length (%d)."
% (num_rows, axis_len))
positional_metadata.index = self._get_positional_metadata_index()
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
def _get_positional_metadata_index(self):
"""Create a memory-efficient integer index for positional metadata."""
return pd.RangeIndex(start=0,
stop=self._positional_metadata_axis_len_(),
step=1)
@abc.abstractmethod
def __init__(self, positional_metadata=None):
raise NotImplementedError
def _init_(self, positional_metadata=None):
if positional_metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._positional_metadata = None
else:
# Use setter for validation and copy.
self.positional_metadata = positional_metadata
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.positional_metadata to
# other.positional_metadata in order to avoid creating "empty"
# positional metadata representations on the objects if they don't have
# positional metadata.
if self.has_positional_metadata() and other.has_positional_metadata():
return self.positional_metadata.equals(other.positional_metadata)
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# Both don't have positional metadata.
return (self._positional_metadata_axis_len_() ==
other._positional_metadata_axis_len_())
else:
# One has positional metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_positional_metadata():
# deep=True makes a shallow copy of the underlying data buffer.
return self.positional_metadata.copy(deep=True)
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_positional_metadata():
# `copy.deepcopy` no longer recursively copies contents of the
# DataFrame, so we must handle the deep copy ourselves.
# Reference: https://github.com/pandas-dev/pandas/issues/17406
df = self.positional_metadata
data_cp = copy.deepcopy(df.values.tolist(), memo)
return pd.DataFrame(data_cp,
index=df.index.copy(deep=True),
columns=df.columns.copy(deep=True),
copy=False)
else:
return None
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the object has positional metadata.
An object has positional metadata if its ``positional_metadata``
``pd.DataFrame`` has at least one column.
Returns
-------
bool
Indicates whether the object has positional metadata.
Examples
--------
.. note:: scikit-bio objects with positional metadata share a common
interface for accessing and manipulating their positional metadata.
The following examples use scikit-bio's ``DNA`` class to demonstrate
positional metadata behavior. These examples apply to all other
scikit-bio objects storing positional metadata.
>>> import pandas as pd
>>> from skbio import DNA
>>> seq = DNA('ACGT')
>>> seq.has_positional_metadata()
False
>>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
>>> seq.has_positional_metadata()
False
>>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
>>> seq.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
class IntervalMetadataMixin(metaclass=abc.ABCMeta):
@abc.abstractmethod
def _interval_metadata_axis_len_(self):
'''Return length of axis that interval metadata applies to.
Returns
-------
int
Interval metadata axis length.
'''
raise NotImplementedError
@abc.abstractmethod
def __init__(self, interval_metadata=None):
raise NotImplementedError
def _init_(self, interval_metadata=None):
if interval_metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._interval_metadata = None
else:
# Use setter for validation and copy.
self.interval_metadata = interval_metadata
@property
@experimental(as_of="0.5.1")
def interval_metadata(self):
'''``IntervalMetadata`` object containing info about interval features.
Notes
-----
This property can be set and deleted. When setting new
interval metadata, a shallow copy of the ``IntervalMetadata``
object is made.
'''
if self._interval_metadata is None:
# Not using setter to avoid copy.
self._interval_metadata = IntervalMetadata(
self._interval_metadata_axis_len_())
return self._interval_metadata
@interval_metadata.setter
def interval_metadata(self, interval_metadata):
if isinstance(interval_metadata, IntervalMetadata):
upper_bound = interval_metadata.upper_bound
lower_bound = interval_metadata.lower_bound
axis_len = self._interval_metadata_axis_len_()
if lower_bound != 0:
raise ValueError(
'The lower bound for the interval features (%d) '
'must be zero.' % lower_bound)
if upper_bound is not None and upper_bound != axis_len:
raise ValueError(
'The upper bound for the interval features (%d) '
'must match the interval metadata axis length (%d)'
% (upper_bound, axis_len))
# copy all the data to the mixin
self._interval_metadata = IntervalMetadata(
axis_len, copy_from=interval_metadata)
else:
raise TypeError('You must provide `IntervalMetadata` object, '
'not type %s.' % type(interval_metadata).__name__)
@interval_metadata.deleter
def interval_metadata(self):
self._interval_metadata = None
@experimental(as_of="0.5.1")
def has_interval_metadata(self):
"""Determine if the object has interval metadata.
An object has interval metadata if its ``interval_metadata``
has at least one ```Interval`` objects.
Returns
-------
bool
Indicates whether the object has interval metadata.
"""
return (self._interval_metadata is not None and
self.interval_metadata.num_interval_features > 0)
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.interval_metadata to
# other.interval_metadata in order to avoid creating "empty"
# interval metadata representations on the objects if they don't have
# interval metadata.
if self.has_interval_metadata() and other.has_interval_metadata():
return self.interval_metadata == other.interval_metadata
elif not (self.has_interval_metadata() or
other.has_interval_metadata()):
# Both don't have interval metadata.
return (self._interval_metadata_axis_len_() ==
other._interval_metadata_axis_len_())
else:
# One has interval metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_interval_metadata():
return copy.copy(self.interval_metadata)
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_interval_metadata():
return copy.deepcopy(self.interval_metadata, memo)
else:
return None | scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/skbio/metadata/_mixin.py | _mixin.py |
import abc
import copy
import pandas as pd
from skbio.util._decorator import stable, experimental
from skbio.metadata import IntervalMetadata
class MetadataMixin(metaclass=abc.ABCMeta):
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire object.
Notes
-----
This property can be set and deleted. When setting new metadata a
shallow copy of the dictionary is made.
Examples
--------
.. note:: scikit-bio objects with metadata share a common interface for
accessing and manipulating their metadata. The following examples
use scikit-bio's ``Sequence`` class to demonstrate metadata
behavior. These examples apply to all other scikit-bio objects
storing metadata.
Create a sequence with metadata:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT', metadata={'description': 'seq description',
... 'id': 'seq-id'})
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> seq.metadata = {'abc': 123}
>>> seq.metadata
{'abc': 123}
Delete metadata:
>>> seq.has_metadata()
True
>>> del seq.metadata
>>> seq.metadata
{}
>>> seq.has_metadata()
False
"""
if self._metadata is None:
# Not using setter to avoid copy.
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict, not type %r" %
type(metadata).__name__)
# Shallow copy.
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@abc.abstractmethod
def __init__(self, metadata=None):
raise NotImplementedError
def _init_(self, metadata=None):
if metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._metadata = None
else:
# Use setter for validation and copy.
self.metadata = metadata
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the objects if
# they don't have metadata.
if self.has_metadata() and other.has_metadata():
return self.metadata == other.metadata
elif not (self.has_metadata() or other.has_metadata()):
# Both don't have metadata.
return True
else:
# One has metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_metadata():
return self.metadata.copy()
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_metadata():
return copy.deepcopy(self.metadata, memo)
else:
return None
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the object has metadata.
An object has metadata if its ``metadata`` dictionary is not empty
(i.e., has at least one key-value pair).
Returns
-------
bool
Indicates whether the object has metadata.
Examples
--------
.. note:: scikit-bio objects with metadata share a common interface for
accessing and manipulating their metadata. The following examples
use scikit-bio's ``Sequence`` class to demonstrate metadata
behavior. These examples apply to all other scikit-bio objects
storing metadata.
>>> from skbio import Sequence
>>> seq = Sequence('ACGT')
>>> seq.has_metadata()
False
>>> seq = Sequence('ACGT', metadata={})
>>> seq.has_metadata()
False
>>> seq = Sequence('ACGT', metadata={'id': 'seq-id'})
>>> seq.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
class PositionalMetadataMixin(metaclass=abc.ABCMeta):
@abc.abstractmethod
def _positional_metadata_axis_len_(self):
"""Return length of axis that positional metadata applies to.
Returns
-------
int
Positional metadata axis length.
"""
raise NotImplementedError
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata along an axis.
Notes
-----
This property can be set and deleted. When setting new positional
metadata, a shallow copy is made and the ``pd.DataFrame`` index is set
to ``pd.RangeIndex(start=0, stop=axis_len, step=1)``.
Examples
--------
.. note:: scikit-bio objects with positional metadata share a common
interface for accessing and manipulating their positional metadata.
The following examples use scikit-bio's ``DNA`` class to demonstrate
positional metadata behavior. These examples apply to all other
scikit-bio objects storing positional metadata.
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'exons': [True, True, False, True],
... 'quality': [3, 3, 20, 11]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata # doctest: +NORMALIZE_WHITESPACE
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# Not using setter to avoid copy.
self._positional_metadata = pd.DataFrame(
index=self._get_positional_metadata_index())
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# Pass copy=True to copy underlying data buffer.
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
# Different versions of pandas will raise different error types. We
# don't really care what the type of the error is, just its message, so
# a blanket Exception will do.
except Exception as e:
raise TypeError(
"Invalid positional metadata. Must be consumable by "
"`pd.DataFrame` constructor. Original pandas error message: "
"\"%s\"" % e)
num_rows = len(positional_metadata.index)
axis_len = self._positional_metadata_axis_len_()
if num_rows != axis_len:
raise ValueError(
"Number of positional metadata values (%d) must match the "
"positional metadata axis length (%d)."
% (num_rows, axis_len))
positional_metadata.index = self._get_positional_metadata_index()
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
def _get_positional_metadata_index(self):
"""Create a memory-efficient integer index for positional metadata."""
return pd.RangeIndex(start=0,
stop=self._positional_metadata_axis_len_(),
step=1)
@abc.abstractmethod
def __init__(self, positional_metadata=None):
raise NotImplementedError
def _init_(self, positional_metadata=None):
if positional_metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._positional_metadata = None
else:
# Use setter for validation and copy.
self.positional_metadata = positional_metadata
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.positional_metadata to
# other.positional_metadata in order to avoid creating "empty"
# positional metadata representations on the objects if they don't have
# positional metadata.
if self.has_positional_metadata() and other.has_positional_metadata():
return self.positional_metadata.equals(other.positional_metadata)
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# Both don't have positional metadata.
return (self._positional_metadata_axis_len_() ==
other._positional_metadata_axis_len_())
else:
# One has positional metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_positional_metadata():
# deep=True makes a shallow copy of the underlying data buffer.
return self.positional_metadata.copy(deep=True)
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_positional_metadata():
# `copy.deepcopy` no longer recursively copies contents of the
# DataFrame, so we must handle the deep copy ourselves.
# Reference: https://github.com/pandas-dev/pandas/issues/17406
df = self.positional_metadata
data_cp = copy.deepcopy(df.values.tolist(), memo)
return pd.DataFrame(data_cp,
index=df.index.copy(deep=True),
columns=df.columns.copy(deep=True),
copy=False)
else:
return None
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the object has positional metadata.
An object has positional metadata if its ``positional_metadata``
``pd.DataFrame`` has at least one column.
Returns
-------
bool
Indicates whether the object has positional metadata.
Examples
--------
.. note:: scikit-bio objects with positional metadata share a common
interface for accessing and manipulating their positional metadata.
The following examples use scikit-bio's ``DNA`` class to demonstrate
positional metadata behavior. These examples apply to all other
scikit-bio objects storing positional metadata.
>>> import pandas as pd
>>> from skbio import DNA
>>> seq = DNA('ACGT')
>>> seq.has_positional_metadata()
False
>>> seq = DNA('ACGT', positional_metadata=pd.DataFrame(index=range(4)))
>>> seq.has_positional_metadata()
False
>>> seq = DNA('ACGT', positional_metadata={'quality': range(4)})
>>> seq.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
class IntervalMetadataMixin(metaclass=abc.ABCMeta):
@abc.abstractmethod
def _interval_metadata_axis_len_(self):
'''Return length of axis that interval metadata applies to.
Returns
-------
int
Interval metadata axis length.
'''
raise NotImplementedError
@abc.abstractmethod
def __init__(self, interval_metadata=None):
raise NotImplementedError
def _init_(self, interval_metadata=None):
if interval_metadata is None:
# Could use deleter but this is less overhead and needs to be fast.
self._interval_metadata = None
else:
# Use setter for validation and copy.
self.interval_metadata = interval_metadata
@property
@experimental(as_of="0.5.1")
def interval_metadata(self):
'''``IntervalMetadata`` object containing info about interval features.
Notes
-----
This property can be set and deleted. When setting new
interval metadata, a shallow copy of the ``IntervalMetadata``
object is made.
'''
if self._interval_metadata is None:
# Not using setter to avoid copy.
self._interval_metadata = IntervalMetadata(
self._interval_metadata_axis_len_())
return self._interval_metadata
@interval_metadata.setter
def interval_metadata(self, interval_metadata):
if isinstance(interval_metadata, IntervalMetadata):
upper_bound = interval_metadata.upper_bound
lower_bound = interval_metadata.lower_bound
axis_len = self._interval_metadata_axis_len_()
if lower_bound != 0:
raise ValueError(
'The lower bound for the interval features (%d) '
'must be zero.' % lower_bound)
if upper_bound is not None and upper_bound != axis_len:
raise ValueError(
'The upper bound for the interval features (%d) '
'must match the interval metadata axis length (%d)'
% (upper_bound, axis_len))
# copy all the data to the mixin
self._interval_metadata = IntervalMetadata(
axis_len, copy_from=interval_metadata)
else:
raise TypeError('You must provide `IntervalMetadata` object, '
'not type %s.' % type(interval_metadata).__name__)
@interval_metadata.deleter
def interval_metadata(self):
self._interval_metadata = None
@experimental(as_of="0.5.1")
def has_interval_metadata(self):
"""Determine if the object has interval metadata.
An object has interval metadata if its ``interval_metadata``
has at least one ```Interval`` objects.
Returns
-------
bool
Indicates whether the object has interval metadata.
"""
return (self._interval_metadata is not None and
self.interval_metadata.num_interval_features > 0)
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def _eq_(self, other):
# We're not simply comparing self.interval_metadata to
# other.interval_metadata in order to avoid creating "empty"
# interval metadata representations on the objects if they don't have
# interval metadata.
if self.has_interval_metadata() and other.has_interval_metadata():
return self.interval_metadata == other.interval_metadata
elif not (self.has_interval_metadata() or
other.has_interval_metadata()):
# Both don't have interval metadata.
return (self._interval_metadata_axis_len_() ==
other._interval_metadata_axis_len_())
else:
# One has interval metadata while the other does not.
return False
@abc.abstractmethod
def __ne__(self, other):
raise NotImplementedError
def _ne_(self, other):
return not (self == other)
@abc.abstractmethod
def __copy__(self):
raise NotImplementedError
def _copy_(self):
if self.has_interval_metadata():
return copy.copy(self.interval_metadata)
else:
return None
@abc.abstractmethod
def __deepcopy__(self, memo):
raise NotImplementedError
def _deepcopy_(self, memo):
if self.has_interval_metadata():
return copy.deepcopy(self.interval_metadata, memo)
else:
return None | 0.883488 | 0.296957 |
This README file describes the FASTQ example files provided as supplementary
information to the open-access publication:
P.J.A. Cock, C.J. Fields, N. Goto, M.L. Heuer and P.M. Rice (2009). The Sanger
FASTQ file format for sequences with quality scores, and the Solexa/Illumina
FASTQ variants.
These files are provided freely and we encourage anyone writing a FASTQ parser
to use them as part of your test suite. Permission is granted to freely
distribute and modify the files. We request (but do not insist) that this
README file is included, or at least a reference to the above paper. Please
cite the above paper if appropriate. We also request (but do not insist) that
the example files are not modified, in order that they may serve as a common
reference.
Invalid FASTQ files
===================
The archive contains the following sample FASTQ files with names of the form
error_NAME.fastq, which all contain errors and should be rejected (if parsed
as any of the three FASTQ variants):
error_diff_ids.fastq
error_double_qual.fastq
error_double_seq.fastq
error_long_qual.fastq
error_no_qual.fastq
error_qual_del.fastq
error_qual_escape.fastq
error_qual_null.fastq
error_qual_space.fastq
error_qual_tab.fastq
error_qual_unit_sep.fastq
error_qual_vtab.fastq
error_short_qual.fastq
error_spaces.fastq
error_tabs.fastq
error_trunc_at_seq.fastq
error_trunc_at_plus.fastq
error_trunc_at_qual.fastq
error_trunc_in_title.fastq
error_trunc_in_seq.fastq
error_trunc_in_plus.fastq
error_trunc_in_qual.fastq
Of these, those with names error_qual_XXX.fastq would be valid except for the
inclusion of spaces or non-printing ASCII characters outside the range allowed
in the quality string. The files named error_trunc_XXX.fastq would be valid
but for being truncated (e.g. simulating a partial copy over the network).
The special cases of FASTQ files which would be valid as one variant, but not
another, are covered below.
Valid FASTQ
===========
The archive contains the following valid sample FASTQ input files for testing:
longreads_original_sanger.fastq
wrapping_original_sanger.fastq
illumina_full_range_original_illumina.fastq
sanger_full_range_original_sanger.fastq
solexa_full_range_original_solexa.fastq
misc_dna_original_sanger.fastq
misc_rna_original_sanger.fastq
These all have the form NAME_original_FORMAT.fastq, where NAME is a prefix for
that example, and FORMAT is one of sanger, solexa or illumina indicating which
FASTQ variant that example is using. There are three matching files called
NAME_as_FORMAT.fastq showing how the original file should be converted into
each of the three FASTQ variants. These converted files are standardised not
to use line wrapping (so each record has exactly four lines), and omit the
optional repetition of the read titles on the plus line.
The file longreads_original_sanger.fastq is based on real Roche 454 reads from
the Sanger Institute for the the potato cyst nematodes Globodera pallida. Ten
of the reads have been presented as FASTQ records, wrapping the sequence and
the quality lines at 80 characters. This means some of the quality lines start
with "@" or "+" characters, which may cause problems with naive parsers. Also
note that the sequence is mixed case (with upper case denoting the trimmed
region), and furthermore the free format title lines are over 100 characters
and encode assorted read information (and are repeated on the "+" lines).
The wrapping_original_sanger.fastq is based on three real reads from the NCBI
Short Read Archive, but has been carefully edited to use line wrapping for the
quality lines (but not the sequence lines) such that the due to the occurrence
of "@" and "+" on alternating lines, the file may be misinterpreted by a
simplistic parser. While this is therefore a very artificial example, it
remains a valid FASTQ file, and is useful for testing purposes.
The sanger_full_range_original_sanger.fastq file uses PHRED scores from 0 to
93 inclusive, covering ASCII characters from 33 (!) to 126 (~). This means it
cannot be treated as a Solexa or Illumina 1.3+ FASTQ file, and attempting to
parse it as such should raise an error.
The solexa_full_range_original_solexa.fastq file uses Solexa scores from -5 to
62 inclusive, covering ASCII characters from 59 (;) to 126 (~). This means it
cannot be treated as a Illumina 1.3+ FASTQ file, and attempting to parse it as
such should raise an error. On the basis of the quality characters, the file
would also qualify as a valid Sanger FASTQ file.
The illumina_full_range_original_illumina.fastq file uses PHRED scores from 0
to 62 inclusive, covering ASCII characters from 64 (@) to 126 (~). On the
basis of the quality characters, the file would also qualify as a valid Sanger
or Solexa FASTQ file.
The misc_dna_original_sanger.fastq and misc_rna_original_sanger.fastq files
are artificial reads using the full range of IUPAC DNA or RNA letters,
including ambiguous character codes, and both cases.
| scikit-bio | /scikit-bio-0.5.9.tar.gz/scikit-bio-0.5.9/licenses/fastq-example-files-readme.txt | fastq-example-files-readme.txt | This README file describes the FASTQ example files provided as supplementary
information to the open-access publication:
P.J.A. Cock, C.J. Fields, N. Goto, M.L. Heuer and P.M. Rice (2009). The Sanger
FASTQ file format for sequences with quality scores, and the Solexa/Illumina
FASTQ variants.
These files are provided freely and we encourage anyone writing a FASTQ parser
to use them as part of your test suite. Permission is granted to freely
distribute and modify the files. We request (but do not insist) that this
README file is included, or at least a reference to the above paper. Please
cite the above paper if appropriate. We also request (but do not insist) that
the example files are not modified, in order that they may serve as a common
reference.
Invalid FASTQ files
===================
The archive contains the following sample FASTQ files with names of the form
error_NAME.fastq, which all contain errors and should be rejected (if parsed
as any of the three FASTQ variants):
error_diff_ids.fastq
error_double_qual.fastq
error_double_seq.fastq
error_long_qual.fastq
error_no_qual.fastq
error_qual_del.fastq
error_qual_escape.fastq
error_qual_null.fastq
error_qual_space.fastq
error_qual_tab.fastq
error_qual_unit_sep.fastq
error_qual_vtab.fastq
error_short_qual.fastq
error_spaces.fastq
error_tabs.fastq
error_trunc_at_seq.fastq
error_trunc_at_plus.fastq
error_trunc_at_qual.fastq
error_trunc_in_title.fastq
error_trunc_in_seq.fastq
error_trunc_in_plus.fastq
error_trunc_in_qual.fastq
Of these, those with names error_qual_XXX.fastq would be valid except for the
inclusion of spaces or non-printing ASCII characters outside the range allowed
in the quality string. The files named error_trunc_XXX.fastq would be valid
but for being truncated (e.g. simulating a partial copy over the network).
The special cases of FASTQ files which would be valid as one variant, but not
another, are covered below.
Valid FASTQ
===========
The archive contains the following valid sample FASTQ input files for testing:
longreads_original_sanger.fastq
wrapping_original_sanger.fastq
illumina_full_range_original_illumina.fastq
sanger_full_range_original_sanger.fastq
solexa_full_range_original_solexa.fastq
misc_dna_original_sanger.fastq
misc_rna_original_sanger.fastq
These all have the form NAME_original_FORMAT.fastq, where NAME is a prefix for
that example, and FORMAT is one of sanger, solexa or illumina indicating which
FASTQ variant that example is using. There are three matching files called
NAME_as_FORMAT.fastq showing how the original file should be converted into
each of the three FASTQ variants. These converted files are standardised not
to use line wrapping (so each record has exactly four lines), and omit the
optional repetition of the read titles on the plus line.
The file longreads_original_sanger.fastq is based on real Roche 454 reads from
the Sanger Institute for the the potato cyst nematodes Globodera pallida. Ten
of the reads have been presented as FASTQ records, wrapping the sequence and
the quality lines at 80 characters. This means some of the quality lines start
with "@" or "+" characters, which may cause problems with naive parsers. Also
note that the sequence is mixed case (with upper case denoting the trimmed
region), and furthermore the free format title lines are over 100 characters
and encode assorted read information (and are repeated on the "+" lines).
The wrapping_original_sanger.fastq is based on three real reads from the NCBI
Short Read Archive, but has been carefully edited to use line wrapping for the
quality lines (but not the sequence lines) such that the due to the occurrence
of "@" and "+" on alternating lines, the file may be misinterpreted by a
simplistic parser. While this is therefore a very artificial example, it
remains a valid FASTQ file, and is useful for testing purposes.
The sanger_full_range_original_sanger.fastq file uses PHRED scores from 0 to
93 inclusive, covering ASCII characters from 33 (!) to 126 (~). This means it
cannot be treated as a Solexa or Illumina 1.3+ FASTQ file, and attempting to
parse it as such should raise an error.
The solexa_full_range_original_solexa.fastq file uses Solexa scores from -5 to
62 inclusive, covering ASCII characters from 59 (;) to 126 (~). This means it
cannot be treated as a Illumina 1.3+ FASTQ file, and attempting to parse it as
such should raise an error. On the basis of the quality characters, the file
would also qualify as a valid Sanger FASTQ file.
The illumina_full_range_original_illumina.fastq file uses PHRED scores from 0
to 62 inclusive, covering ASCII characters from 64 (@) to 126 (~). On the
basis of the quality characters, the file would also qualify as a valid Sanger
or Solexa FASTQ file.
The misc_dna_original_sanger.fastq and misc_rna_original_sanger.fastq files
are artificial reads using the full range of IUPAC DNA or RNA letters,
including ambiguous character codes, and both cases.
| 0.799129 | 0.636141 |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Optional, Tuple
import numpy as np
from scipy.optimize import minimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import (
_check_sample_weight,
check_array,
check_is_fitted,
check_X_y,
)
class BaseScipyMinimizeRegressor(BaseEstimator, RegressorMixin, ABC):
"""
Base class for regressors relying on scipy's minimze method. Derive a class from this one and give it the function to be minimized.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms. Defaults to 1.0.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
) -> None:
"""Initialize."""
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.positive = positive
@abstractmethod
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
"""
Produce the loss function to be minimized, and its gradient to speed up computations.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
loss : Callable[[np.ndarray], float]
The loss function to be minimized.
grad_loss : Callable[[np.ndarray], np.ndarray]
The gradient of the loss function. Speeds up finding the minimum.
"""
def _loss_regularize(self, loss):
def regularized_loss(params):
return (
loss(params)
+ self.alpha * self.l1_ratio * np.sum(np.abs(params))
+ 0.5 * self.alpha * (1 - self.l1_ratio) * np.sum(params ** 2)
)
return regularized_loss
def _grad_loss_regularize(self, grad_loss):
def regularized_grad_loss(params):
return (
grad_loss(params)
+ self.alpha * self.l1_ratio * np.sign(params)
+ self.alpha * (1 - self.l1_ratio) * params
)
return regularized_grad_loss
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> BaseScipyMinimizeRegressor:
"""
Fit the model using the SLSQP algorithm.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
Fitted regressor.
"""
X_, grad_loss, loss = self._prepare_inputs(X, sample_weight, y)
d = X_.shape[1] - self.n_features_in_ # This is either zero or one.
bounds = (
self.n_features_in_ * [(0, np.inf)] + d * [(-np.inf, np.inf)]
if self.positive
else None
)
minimize_result = minimize(
loss,
x0=np.zeros(self.n_features_in_ + d),
bounds=bounds,
method="SLSQP",
jac=grad_loss,
tol=1e-20,
)
self.convergence_status_ = minimize_result.message
if self.fit_intercept:
*self.coef_, self.intercept_ = minimize_result.x
else:
self.coef_ = minimize_result.x
self.intercept_ = 0.0
self.coef_ = np.array(self.coef_)
return self
def _prepare_inputs(self, X, sample_weight, y):
X, y = check_X_y(X, y)
sample_weight = _check_sample_weight(sample_weight, X)
self._check_n_features(X, reset=True)
n = X.shape[0]
if self.copy_X:
X_ = X.copy()
else:
X_ = X
if self.fit_intercept:
X_ = np.hstack([X_, np.ones(shape=(n, 1))])
loss, grad_loss = self._get_objective(X_, y, sample_weight)
return X_, grad_loss, loss
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict using the linear model.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return X @ self.coef_ + self.intercept_
class LADRegression(BaseScipyMinimizeRegressor):
"""
Least absolute deviation Regression.
`LADRegression` fits a linear model to minimize the residual sum of absolute deviations between
the observed targets in the dataset, and the targets predicted by the linear approximation, i.e.
1 / (2 * n_samples) * ||y - Xw||_1
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
Compared to linear regression, this approach is robust to outliers. You can even
optimize for the lowest MAPE (Mean Average Percentage Error), if you pass in np.abs(1/y_train) for the
`sample_weight` keyword when fitting the regressor.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4])
>>> l = LADRegression().fit(X, y)
>>> l.coef_
array([1., 2., 3., 4.])
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([-1, 2, -3, 4])
>>> l = LADRegression(positive=True).fit(X, y)
>>> l.coef_
array([8.44480086e-17, 1.42423304e+00, 1.97135192e-16, 4.29789588e+00])
"""
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def mae_loss(params):
return np.mean(sample_weight * np.abs(y - X @ params))
@self._grad_loss_regularize
def grad_mae_loss(params):
return -(sample_weight * np.sign(y - X @ params)) @ X / X.shape[0]
return mae_loss, grad_mae_loss
class QuantileRegression(BaseScipyMinimizeRegressor):
"""
Compute Quantile Regression. This can be used for computing confidence intervals of linear regressions.
`QuantileRegression` fits a linear model to minimize a weighted residual sum of absolute deviations between
the observed targets in the dataset and the targets predicted by the linear approximation, i.e.
1 / (2 * n_samples) * switch * ||y - Xw||_1
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
where switch is a vector with value `quantile` if y - Xw < 0, else `1 - quantile`. The regressor defaults to
`LADRegression` for its default value of `quantile=0.5`.
Compared to linear regression, this approach is robust to outliers.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
quantile : float, between 0 and 1, default=0.5
The line output by the model will have a share of approximately `quantile` data points under it.
A value of `quantile=1` outputs a line that is above each data point, for example. `quantile=0.5` corresponds to LADRegression.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4])
>>> l = QuantileRegression().fit(X, y)
>>> l.coef_
array([1., 2., 3., 4.])
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([-1, 2, -3, 4])
>>> l = QuantileRegression(quantile=0.8).fit(X, y)
>>> l.coef_
array([-1., 2., -3., 4.])
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
quantile: float = 0.5,
) -> None:
"""Initialize."""
super().__init__(alpha, l1_ratio, fit_intercept, copy_X, positive)
self.quantile = quantile
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def imbalanced_loss(params):
return np.mean(
sample_weight
* np.where(X @ params < y, self.quantile, 1 - self.quantile)
* np.abs(y - X @ params)
)
@self._grad_loss_regularize
def grad_imbalanced_loss(params):
return (
-(
sample_weight
* np.where(X @ params < y, self.quantile, 1 - self.quantile)
* np.sign(y - X @ params)
)
@ X
/ X.shape[0]
)
return imbalanced_loss, grad_imbalanced_loss
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> "QuantileRegression":
"""
Fit the model using the SLSQP algorithm.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
Fitted regressor.
"""
if 0 <= self.quantile <= 1:
super().fit(X, y, sample_weight)
else:
raise ValueError("Parameter quantile should be between zero and one.")
return self
class ImbalancedLinearRegression(BaseScipyMinimizeRegressor):
"""
Linear regression where overestimating is `overestimation_punishment_factor` times worse than underestimating.
A value of `overestimation_punishment_factor=5` implies that overestimations by the model are penalized with a factor of 5
while underestimations have a default factor of 1. The formula optimized for is
1 / (2 * n_samples) * switch * ||y - Xw||_2 ** 2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
where switch is a vector with value `overestimation_punishment_factor` if y - Xw < 0, else 1.
ImbalancedLinearRegression fits a linear model to minimize the residual sum of squares between
the observed targets in the dataset, and the targets predicted by the linear approximation.
Compared to normal linear regression, this approach allows for a different treatment of over or under estimations.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
overestimation_punishment_factor : float, default=1
Factor to punish overestimations more (if the value is larger than 1) or less (if the value is between 0 and 1).
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4]) + 2*np.random.randn(100)
>>> over_bad = ImbalancedLinearRegression(overestimation_punishment_factor=50).fit(X, y)
>>> over_bad.coef_
array([0.36267036, 1.39526844, 3.4247146 , 3.93679175])
>>> under_bad = ImbalancedLinearRegression(overestimation_punishment_factor=0.01).fit(X, y)
>>> under_bad.coef_
array([0.73519586, 1.28698197, 2.61362614, 4.35989806])
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
overestimation_punishment_factor: float = 1.0,
) -> None:
"""Initialize."""
super().__init__(alpha, l1_ratio, fit_intercept, copy_X, positive)
self.overestimation_punishment_factor = overestimation_punishment_factor
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def imbalanced_loss(params):
return 0.5 * np.mean(
sample_weight
* np.where(X @ params > y, self.overestimation_punishment_factor, 1)
* np.square(y - X @ params)
)
@self._grad_loss_regularize
def grad_imbalanced_loss(params):
return (
-(
sample_weight
* np.where(X @ params > y, self.overestimation_punishment_factor, 1)
* (y - X @ params)
)
@ X
/ X.shape[0]
)
return imbalanced_loss, grad_imbalanced_loss
class LinearRegression(BaseScipyMinimizeRegressor):
"""
Just plain and simple linear regression.
The formula optimized for is
1 / (2 * n_samples) * ||y - Xw||_2 ** 2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4]) + 2*np.random.randn(100)
>>> lr = LinearRegression().fit(X, y)
>>> lr.coef_
array([0.73202377, 1.75186186, 2.92983272, 3.96578532])
"""
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def ols_loss(params):
return 0.5 * np.mean(sample_weight * np.square(y - X @ params))
@self._grad_loss_regularize
def grad_ols_loss(params):
return -(sample_weight * (y - X @ params)) @ X / X.shape[0]
return ols_loss, grad_ols_loss | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/linear_model/_scipy_regressors.py | _scipy_regressors.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Optional, Tuple
import numpy as np
from scipy.optimize import minimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import (
_check_sample_weight,
check_array,
check_is_fitted,
check_X_y,
)
class BaseScipyMinimizeRegressor(BaseEstimator, RegressorMixin, ABC):
"""
Base class for regressors relying on scipy's minimze method. Derive a class from this one and give it the function to be minimized.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms. Defaults to 1.0.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
) -> None:
"""Initialize."""
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.positive = positive
@abstractmethod
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
"""
Produce the loss function to be minimized, and its gradient to speed up computations.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
loss : Callable[[np.ndarray], float]
The loss function to be minimized.
grad_loss : Callable[[np.ndarray], np.ndarray]
The gradient of the loss function. Speeds up finding the minimum.
"""
def _loss_regularize(self, loss):
def regularized_loss(params):
return (
loss(params)
+ self.alpha * self.l1_ratio * np.sum(np.abs(params))
+ 0.5 * self.alpha * (1 - self.l1_ratio) * np.sum(params ** 2)
)
return regularized_loss
def _grad_loss_regularize(self, grad_loss):
def regularized_grad_loss(params):
return (
grad_loss(params)
+ self.alpha * self.l1_ratio * np.sign(params)
+ self.alpha * (1 - self.l1_ratio) * params
)
return regularized_grad_loss
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> BaseScipyMinimizeRegressor:
"""
Fit the model using the SLSQP algorithm.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
Fitted regressor.
"""
X_, grad_loss, loss = self._prepare_inputs(X, sample_weight, y)
d = X_.shape[1] - self.n_features_in_ # This is either zero or one.
bounds = (
self.n_features_in_ * [(0, np.inf)] + d * [(-np.inf, np.inf)]
if self.positive
else None
)
minimize_result = minimize(
loss,
x0=np.zeros(self.n_features_in_ + d),
bounds=bounds,
method="SLSQP",
jac=grad_loss,
tol=1e-20,
)
self.convergence_status_ = minimize_result.message
if self.fit_intercept:
*self.coef_, self.intercept_ = minimize_result.x
else:
self.coef_ = minimize_result.x
self.intercept_ = 0.0
self.coef_ = np.array(self.coef_)
return self
def _prepare_inputs(self, X, sample_weight, y):
X, y = check_X_y(X, y)
sample_weight = _check_sample_weight(sample_weight, X)
self._check_n_features(X, reset=True)
n = X.shape[0]
if self.copy_X:
X_ = X.copy()
else:
X_ = X
if self.fit_intercept:
X_ = np.hstack([X_, np.ones(shape=(n, 1))])
loss, grad_loss = self._get_objective(X_, y, sample_weight)
return X_, grad_loss, loss
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict using the linear model.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return X @ self.coef_ + self.intercept_
class LADRegression(BaseScipyMinimizeRegressor):
"""
Least absolute deviation Regression.
`LADRegression` fits a linear model to minimize the residual sum of absolute deviations between
the observed targets in the dataset, and the targets predicted by the linear approximation, i.e.
1 / (2 * n_samples) * ||y - Xw||_1
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
Compared to linear regression, this approach is robust to outliers. You can even
optimize for the lowest MAPE (Mean Average Percentage Error), if you pass in np.abs(1/y_train) for the
`sample_weight` keyword when fitting the regressor.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4])
>>> l = LADRegression().fit(X, y)
>>> l.coef_
array([1., 2., 3., 4.])
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([-1, 2, -3, 4])
>>> l = LADRegression(positive=True).fit(X, y)
>>> l.coef_
array([8.44480086e-17, 1.42423304e+00, 1.97135192e-16, 4.29789588e+00])
"""
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def mae_loss(params):
return np.mean(sample_weight * np.abs(y - X @ params))
@self._grad_loss_regularize
def grad_mae_loss(params):
return -(sample_weight * np.sign(y - X @ params)) @ X / X.shape[0]
return mae_loss, grad_mae_loss
class QuantileRegression(BaseScipyMinimizeRegressor):
"""
Compute Quantile Regression. This can be used for computing confidence intervals of linear regressions.
`QuantileRegression` fits a linear model to minimize a weighted residual sum of absolute deviations between
the observed targets in the dataset and the targets predicted by the linear approximation, i.e.
1 / (2 * n_samples) * switch * ||y - Xw||_1
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
where switch is a vector with value `quantile` if y - Xw < 0, else `1 - quantile`. The regressor defaults to
`LADRegression` for its default value of `quantile=0.5`.
Compared to linear regression, this approach is robust to outliers.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
quantile : float, between 0 and 1, default=0.5
The line output by the model will have a share of approximately `quantile` data points under it.
A value of `quantile=1` outputs a line that is above each data point, for example. `quantile=0.5` corresponds to LADRegression.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4])
>>> l = QuantileRegression().fit(X, y)
>>> l.coef_
array([1., 2., 3., 4.])
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([-1, 2, -3, 4])
>>> l = QuantileRegression(quantile=0.8).fit(X, y)
>>> l.coef_
array([-1., 2., -3., 4.])
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
quantile: float = 0.5,
) -> None:
"""Initialize."""
super().__init__(alpha, l1_ratio, fit_intercept, copy_X, positive)
self.quantile = quantile
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def imbalanced_loss(params):
return np.mean(
sample_weight
* np.where(X @ params < y, self.quantile, 1 - self.quantile)
* np.abs(y - X @ params)
)
@self._grad_loss_regularize
def grad_imbalanced_loss(params):
return (
-(
sample_weight
* np.where(X @ params < y, self.quantile, 1 - self.quantile)
* np.sign(y - X @ params)
)
@ X
/ X.shape[0]
)
return imbalanced_loss, grad_imbalanced_loss
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[np.ndarray] = None,
) -> "QuantileRegression":
"""
Fit the model using the SLSQP algorithm.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.ndarray], default=None
Individual weights for each sample.
Returns
-------
Fitted regressor.
"""
if 0 <= self.quantile <= 1:
super().fit(X, y, sample_weight)
else:
raise ValueError("Parameter quantile should be between zero and one.")
return self
class ImbalancedLinearRegression(BaseScipyMinimizeRegressor):
"""
Linear regression where overestimating is `overestimation_punishment_factor` times worse than underestimating.
A value of `overestimation_punishment_factor=5` implies that overestimations by the model are penalized with a factor of 5
while underestimations have a default factor of 1. The formula optimized for is
1 / (2 * n_samples) * switch * ||y - Xw||_2 ** 2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
where switch is a vector with value `overestimation_punishment_factor` if y - Xw < 0, else 1.
ImbalancedLinearRegression fits a linear model to minimize the residual sum of squares between
the observed targets in the dataset, and the targets predicted by the linear approximation.
Compared to normal linear regression, this approach allows for a different treatment of over or under estimations.
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
overestimation_punishment_factor : float, default=1
Factor to punish overestimations more (if the value is larger than 1) or less (if the value is between 0 and 1).
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4]) + 2*np.random.randn(100)
>>> over_bad = ImbalancedLinearRegression(overestimation_punishment_factor=50).fit(X, y)
>>> over_bad.coef_
array([0.36267036, 1.39526844, 3.4247146 , 3.93679175])
>>> under_bad = ImbalancedLinearRegression(overestimation_punishment_factor=0.01).fit(X, y)
>>> under_bad.coef_
array([0.73519586, 1.28698197, 2.61362614, 4.35989806])
"""
def __init__(
self,
alpha: float = 0.0,
l1_ratio: float = 0.0,
fit_intercept: bool = True,
copy_X: bool = True,
positive: bool = False,
overestimation_punishment_factor: float = 1.0,
) -> None:
"""Initialize."""
super().__init__(alpha, l1_ratio, fit_intercept, copy_X, positive)
self.overestimation_punishment_factor = overestimation_punishment_factor
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def imbalanced_loss(params):
return 0.5 * np.mean(
sample_weight
* np.where(X @ params > y, self.overestimation_punishment_factor, 1)
* np.square(y - X @ params)
)
@self._grad_loss_regularize
def grad_imbalanced_loss(params):
return (
-(
sample_weight
* np.where(X @ params > y, self.overestimation_punishment_factor, 1)
* (y - X @ params)
)
@ X
/ X.shape[0]
)
return imbalanced_loss, grad_imbalanced_loss
class LinearRegression(BaseScipyMinimizeRegressor):
"""
Just plain and simple linear regression.
The formula optimized for is
1 / (2 * n_samples) * ||y - Xw||_2 ** 2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||_2 ** 2
Parameters
----------
alpha : float, default=0.0
Constant that multiplies the penalty terms.
l1_ratio : float, default=0.0
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
When set to True, forces the coefficients to be positive.
Attributes
----------
coef_ : np.ndarray of shape (n_features,)
Estimated coefficients of the model.
intercept_ : float
Independent term in the linear model. Set to 0.0 if fit_intercept = False.
Notes
-----
This implementation uses scipy.optimize.minimize, see
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> X = np.random.randn(100, 4)
>>> y = X @ np.array([1, 2, 3, 4]) + 2*np.random.randn(100)
>>> lr = LinearRegression().fit(X, y)
>>> lr.coef_
array([0.73202377, 1.75186186, 2.92983272, 3.96578532])
"""
def _get_objective(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray
) -> Tuple[Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]]:
@self._loss_regularize
def ols_loss(params):
return 0.5 * np.mean(sample_weight * np.square(y - X @ params))
@self._grad_loss_regularize
def grad_ols_loss(params):
return -(sample_weight * (y - X @ params)) @ X / X.shape[0]
return ols_loss, grad_ols_loss | 0.976858 | 0.681329 |
from __future__ import annotations
from typing import Any
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.validation import (
check_X_y,
check_is_fitted,
check_array,
_check_sample_weight,
)
class ExplainableBoostingMetaRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor that outputs a transparent, explainable model given blackbox models.
It works exactly like the `ExplainableBoostingRegressor` by the interpretml team, but here you can choose any base regressor instead of
being restricted to trees. For example, you can use scikit-learn's `IsotonicRegression` to create a model that is
monotonically increasing or decreasing in some of the features, while still being explainable and well-performing.
See the notes below to find a nice explanation of how the algorithm works at a high level.
Parameters
----------
base_regressor : Any, default=DecisionTreeRegressor(max_depth=4)
A single scikit-learn compatible regressor or a list of those regressors of length `n_features`.
max_rounds : int, default=5000
Conduct the boosting for these many rounds.
learning_rate : float, default=0.01
The learning rate. Should be quite small.
grid_points : int, default=1000
The more grid points, the
- more detailed the explanations get and
- the better the model performs, but
- the slower the algorithm gets.
Examples
--------
>>> import numpy as np
>>> from sklearn.isotonic import IsotonicRegression
>>> np.random.seed(0)
>>> X = np.random.randn(100, 2)
>>> y = 2 * X[:, 0] - 3 * X[:, 1] + np.random.randn(100)
>>> e = ExplainableBoostingMetaRegressor(
... base_regressor=[IsotonicRegression(), IsotonicRegression(increasing=False)],
... grid_points=20
... ).fit(X, y)
>>> e.score(X, y)
0.9377382292348461
>>> e.outputs_[0] # increasing in the first feature, as it should be
array([-4.47984456, -4.47984456, -4.47984456, -4.47984456, -3.00182713,
-2.96627696, -1.60843287, -1.06601264, -0.92013822, -0.7217753 ,
-0.66440783, 0.28132994, 1.33664486, 1.47592253, 1.96677286,
2.88969439, 2.96292906, 4.33642573, 4.38506967, 6.42967225])
>>> e.outputs_[1] # decreasing in the second feature, as it should be
array([ 6.35605214, 6.06407947, 6.05458114, 4.8488004 , 4.41880876,
3.45056373, 2.64560385, 1.6138303 , 0.89860987, 0.458301 ,
0.33455608, -0.43609495, -1.55600464, -2.05142528, -2.42791679,
-3.58961475, -4.80134218, -4.94421252, -5.94858712, -6.36828774])
Notes
-----
Check out the original author's Github at https://github.com/interpretml/interpret and https://www.youtube.com/watch?v=MREiHgHgl0k
for a great introduction into the operations of the algorithm.
"""
def __init__(
self,
base_regressor: Any = None,
max_rounds: int = 5000,
learning_rate: float = 0.01,
grid_points: int = 1000,
) -> None:
"""Initialize."""
self.base_regressor = base_regressor
self.max_rounds = max_rounds
self.learning_rate = learning_rate
self.grid_points = grid_points
def fit(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray = None
) -> ExplainableBoostingMetaRegressor:
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
Returns
-------
ExplainableBoostingMetaRegressor
Fitted regressor.
"""
X, y = check_X_y(X, y)
sample_weight = _check_sample_weight(sample_weight, X)
self._check_n_features(X, reset=True)
if not isinstance(self.base_regressor, list):
if self.base_regressor is not None:
self.base_regressors_ = self.n_features_in_ * [self.base_regressor]
else:
self.base_regressors_ = self.n_features_in_ * [
DecisionTreeRegressor(max_depth=4)
]
else:
if len(self.base_regressor) == self.n_features_in_:
self.base_regressors_ = self.base_regressor
else:
raise ValueError(
"Number of regressors in base_regressor should be the same as the number of features."
)
if self.learning_rate <= 0:
raise ValueError("learning_rate has to be positive!")
self.domains_ = [
np.linspace(feature_min, feature_max, self.grid_points)
for feature_min, feature_max in zip(X.min(axis=0), X.max(axis=0))
]
self.outputs_ = [np.zeros_like(domain) for domain in self.domains_]
self.mean_ = y.mean()
y_copy = y.copy() - self.mean_
self._fit(X, sample_weight, y_copy)
return self
def _fit(self, X, sample_weight, y_copy):
for i in range(self.max_rounds):
feature_number = i % self.n_features_in_
h = clone(self.base_regressors_[feature_number])
x = X[:, feature_number].reshape(-1, 1)
h.fit(x, y_copy, sample_weight=sample_weight)
self.outputs_[feature_number] += self.learning_rate * h.predict(
self.domains_[feature_number].reshape(-1, 1)
)
y_copy -= self.learning_rate * h.predict(x)
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
X = check_array(X)
check_is_fitted(self)
self._check_n_features(X, reset=False)
n = len(X)
res = np.zeros(n)
for feature_number in range(self.n_features_in_):
grid = self.domains_[feature_number]
feature_outputs = self.outputs_[feature_number][
np.abs(
np.repeat(grid.reshape(-1, 1), n, axis=1) - X[:, feature_number]
).argmin(axis=0)
]
res += feature_outputs
return res + self.mean_ | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/meta/_explainable_regressor.py | _explainable_regressor.py | from __future__ import annotations
from typing import Any
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.validation import (
check_X_y,
check_is_fitted,
check_array,
_check_sample_weight,
)
class ExplainableBoostingMetaRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor that outputs a transparent, explainable model given blackbox models.
It works exactly like the `ExplainableBoostingRegressor` by the interpretml team, but here you can choose any base regressor instead of
being restricted to trees. For example, you can use scikit-learn's `IsotonicRegression` to create a model that is
monotonically increasing or decreasing in some of the features, while still being explainable and well-performing.
See the notes below to find a nice explanation of how the algorithm works at a high level.
Parameters
----------
base_regressor : Any, default=DecisionTreeRegressor(max_depth=4)
A single scikit-learn compatible regressor or a list of those regressors of length `n_features`.
max_rounds : int, default=5000
Conduct the boosting for these many rounds.
learning_rate : float, default=0.01
The learning rate. Should be quite small.
grid_points : int, default=1000
The more grid points, the
- more detailed the explanations get and
- the better the model performs, but
- the slower the algorithm gets.
Examples
--------
>>> import numpy as np
>>> from sklearn.isotonic import IsotonicRegression
>>> np.random.seed(0)
>>> X = np.random.randn(100, 2)
>>> y = 2 * X[:, 0] - 3 * X[:, 1] + np.random.randn(100)
>>> e = ExplainableBoostingMetaRegressor(
... base_regressor=[IsotonicRegression(), IsotonicRegression(increasing=False)],
... grid_points=20
... ).fit(X, y)
>>> e.score(X, y)
0.9377382292348461
>>> e.outputs_[0] # increasing in the first feature, as it should be
array([-4.47984456, -4.47984456, -4.47984456, -4.47984456, -3.00182713,
-2.96627696, -1.60843287, -1.06601264, -0.92013822, -0.7217753 ,
-0.66440783, 0.28132994, 1.33664486, 1.47592253, 1.96677286,
2.88969439, 2.96292906, 4.33642573, 4.38506967, 6.42967225])
>>> e.outputs_[1] # decreasing in the second feature, as it should be
array([ 6.35605214, 6.06407947, 6.05458114, 4.8488004 , 4.41880876,
3.45056373, 2.64560385, 1.6138303 , 0.89860987, 0.458301 ,
0.33455608, -0.43609495, -1.55600464, -2.05142528, -2.42791679,
-3.58961475, -4.80134218, -4.94421252, -5.94858712, -6.36828774])
Notes
-----
Check out the original author's Github at https://github.com/interpretml/interpret and https://www.youtube.com/watch?v=MREiHgHgl0k
for a great introduction into the operations of the algorithm.
"""
def __init__(
self,
base_regressor: Any = None,
max_rounds: int = 5000,
learning_rate: float = 0.01,
grid_points: int = 1000,
) -> None:
"""Initialize."""
self.base_regressor = base_regressor
self.max_rounds = max_rounds
self.learning_rate = learning_rate
self.grid_points = grid_points
def fit(
self, X: np.ndarray, y: np.ndarray, sample_weight: np.ndarray = None
) -> ExplainableBoostingMetaRegressor:
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
Returns
-------
ExplainableBoostingMetaRegressor
Fitted regressor.
"""
X, y = check_X_y(X, y)
sample_weight = _check_sample_weight(sample_weight, X)
self._check_n_features(X, reset=True)
if not isinstance(self.base_regressor, list):
if self.base_regressor is not None:
self.base_regressors_ = self.n_features_in_ * [self.base_regressor]
else:
self.base_regressors_ = self.n_features_in_ * [
DecisionTreeRegressor(max_depth=4)
]
else:
if len(self.base_regressor) == self.n_features_in_:
self.base_regressors_ = self.base_regressor
else:
raise ValueError(
"Number of regressors in base_regressor should be the same as the number of features."
)
if self.learning_rate <= 0:
raise ValueError("learning_rate has to be positive!")
self.domains_ = [
np.linspace(feature_min, feature_max, self.grid_points)
for feature_min, feature_max in zip(X.min(axis=0), X.max(axis=0))
]
self.outputs_ = [np.zeros_like(domain) for domain in self.domains_]
self.mean_ = y.mean()
y_copy = y.copy() - self.mean_
self._fit(X, sample_weight, y_copy)
return self
def _fit(self, X, sample_weight, y_copy):
for i in range(self.max_rounds):
feature_number = i % self.n_features_in_
h = clone(self.base_regressors_[feature_number])
x = X[:, feature_number].reshape(-1, 1)
h.fit(x, y_copy, sample_weight=sample_weight)
self.outputs_[feature_number] += self.learning_rate * h.predict(
self.domains_[feature_number].reshape(-1, 1)
)
y_copy -= self.learning_rate * h.predict(x)
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
X = check_array(X)
check_is_fitted(self)
self._check_n_features(X, reset=False)
n = len(X)
res = np.zeros(n)
for feature_number in range(self.n_features_in_):
grid = self.domains_[feature_number]
feature_outputs = self.outputs_[feature_number][
np.abs(
np.repeat(grid.reshape(-1, 1), n, axis=1) - X[:, feature_number]
).argmin(axis=0)
]
res += feature_outputs
return res + self.mean_ | 0.936814 | 0.573977 |
from __future__ import annotations
from typing import Any
import numpy as np
from sklearn.base import (
BaseEstimator,
RegressorMixin,
clone,
is_regressor,
is_classifier,
)
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
class ZeroInflatedRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for zero-inflated datasets, i.e. the targets contain a lot of zeroes.
`ZeroInflatedRegressor` consists of a classifier and a regressor.
- The classifier's task is to find of if the target is zero or not.
- The regressor's task is to output a (usually positive) prediction whenever the classifier indicates that the there should be a non-zero prediction.
The regressor is only trained on examples where the target is non-zero, which makes it easier for it to focus.
At prediction time, the classifier is first asked if the output should be zero. If yes, output zero.
Otherwise, ask the regressor for its prediction and output it.
Parameters
----------
classifier : Any, scikit-learn classifier
A classifier that answers the question "Should the output be zero?".
regressor : Any, scikit-learn regressor
A regressor for predicting the target. Its prediction is only used if `classifier` says that the output is non-zero.
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = ZeroInflatedRegressor(
... classifier=ExtraTreesClassifier(random_state=0),
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
ZeroInflatedRegressor(classifier=ExtraTreesClassifier(random_state=0),
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
_required_parameters = ["classifier", "regressor"]
def __init__(self, classifier: Any, regressor: Any) -> None:
"""Initialize."""
self.classifier = classifier
self.regressor = regressor
def _fit_estimator(self, estimator_type: str, X: np.ndarray, y: np.ndarray) -> None:
if estimator_type == 'classifier':
estimator = self.classifier
elif estimator_type == 'regressor':
estimator = self.regressor
else:
raise ValueError(f"Estimator type should be `classifier` or `regressor`, got {estimator_type}.")
try:
check_is_fitted(estimator)
self.estimators_[estimator_type] = estimator
except NotFittedError:
self.estimators_[estimator_type] = clone(estimator)
self.estimators_[estimator_type].fit(X, y)
def _check_estimators(self) -> None:
if not is_classifier(self.classifier):
raise ValueError(
f"`classifier` has to be a classifier. Received instance of {type(self.classifier)} instead."
)
if not is_regressor(self.regressor):
raise ValueError(
f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead."
)
def fit(self, X: np.ndarray, y: np.ndarray) -> ZeroInflatedRegressor:
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
Returns
-------
ZeroInflatedRegressor
Fitted regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
self._check_estimators()
self.estimators_ = {}
self._fit_estimator('classifier', X, y != 0)
non_zero_indices = np.where(self.estimators_['classifier'].predict(X) == 1)[0]
if non_zero_indices.size > 0:
self._fit_estimator('regressor', X[non_zero_indices], y[non_zero_indices])
else:
raise ValueError(
"The predicted training labels are all zero, making the regressor obsolete. Change the classifier or use a plain regressor instead."
)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
output = np.zeros(len(X))
non_zero_indices = np.where(self.estimators_['classifier'].predict(X))[0]
if non_zero_indices.size > 0:
output[non_zero_indices] = self.estimators_['regressor'].predict(X[non_zero_indices])
return output | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/meta/_zero_inflated_regressor.py | _zero_inflated_regressor.py | from __future__ import annotations
from typing import Any
import numpy as np
from sklearn.base import (
BaseEstimator,
RegressorMixin,
clone,
is_regressor,
is_classifier,
)
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
class ZeroInflatedRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for zero-inflated datasets, i.e. the targets contain a lot of zeroes.
`ZeroInflatedRegressor` consists of a classifier and a regressor.
- The classifier's task is to find of if the target is zero or not.
- The regressor's task is to output a (usually positive) prediction whenever the classifier indicates that the there should be a non-zero prediction.
The regressor is only trained on examples where the target is non-zero, which makes it easier for it to focus.
At prediction time, the classifier is first asked if the output should be zero. If yes, output zero.
Otherwise, ask the regressor for its prediction and output it.
Parameters
----------
classifier : Any, scikit-learn classifier
A classifier that answers the question "Should the output be zero?".
regressor : Any, scikit-learn regressor
A regressor for predicting the target. Its prediction is only used if `classifier` says that the output is non-zero.
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = ZeroInflatedRegressor(
... classifier=ExtraTreesClassifier(random_state=0),
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
ZeroInflatedRegressor(classifier=ExtraTreesClassifier(random_state=0),
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
_required_parameters = ["classifier", "regressor"]
def __init__(self, classifier: Any, regressor: Any) -> None:
"""Initialize."""
self.classifier = classifier
self.regressor = regressor
def _fit_estimator(self, estimator_type: str, X: np.ndarray, y: np.ndarray) -> None:
if estimator_type == 'classifier':
estimator = self.classifier
elif estimator_type == 'regressor':
estimator = self.regressor
else:
raise ValueError(f"Estimator type should be `classifier` or `regressor`, got {estimator_type}.")
try:
check_is_fitted(estimator)
self.estimators_[estimator_type] = estimator
except NotFittedError:
self.estimators_[estimator_type] = clone(estimator)
self.estimators_[estimator_type].fit(X, y)
def _check_estimators(self) -> None:
if not is_classifier(self.classifier):
raise ValueError(
f"`classifier` has to be a classifier. Received instance of {type(self.classifier)} instead."
)
if not is_regressor(self.regressor):
raise ValueError(
f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead."
)
def fit(self, X: np.ndarray, y: np.ndarray) -> ZeroInflatedRegressor:
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
Returns
-------
ZeroInflatedRegressor
Fitted regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
self._check_estimators()
self.estimators_ = {}
self._fit_estimator('classifier', X, y != 0)
non_zero_indices = np.where(self.estimators_['classifier'].predict(X) == 1)[0]
if non_zero_indices.size > 0:
self._fit_estimator('regressor', X[non_zero_indices], y[non_zero_indices])
else:
raise ValueError(
"The predicted training labels are all zero, making the regressor obsolete. Change the classifier or use a plain regressor instead."
)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
output = np.zeros(len(X))
non_zero_indices = np.where(self.estimators_['classifier'].predict(X))[0]
if non_zero_indices.size > 0:
output[non_zero_indices] = self.estimators_['regressor'].predict(X[non_zero_indices])
return output | 0.963959 | 0.795618 |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from scipy.signal import convolve2d
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
class Smoother(BaseEstimator, TransformerMixin, ABC):
"""
Smooth the columns of an array by applying a convolution.
Parameters
----------
window : int
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
mode : str
Which convolution mode to use. Can be one of
- "full": The output is the full discrete linear convolution of the inputs.
- "valid": The output consists only of those elements that do not rely on the zero-padding.
- "same": The output is the same size as the first input, centered with respect to the 'full' output.
"""
def __init__(
self,
window: int,
mode: str,
) -> None:
"""Initialize."""
self.window = window
self.mode = mode
@abstractmethod
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
"""
def fit(self, X: np.ndarray, y: None = None) -> Smoother:
"""
Fit the estimator.
The frequency is computed and the sliding window is created.
Parameters
----------
X : np.ndarray
Used for inferring the frequency, if not provided during initialization.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
GeneralGaussianSmoother
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
self._set_sliding_window()
self.sliding_window_ = (
self.sliding_window_.reshape(-1, 1) / self.sliding_window_.sum()
)
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Add the new date feature to the dataframe.
Parameters
----------
X : np.ndarray
A pandas dataframe with a DatetimeIndex.
Returns
-------
np.ndarray
The input dataframe with an additional column for special dates.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
convolution = convolve2d(X, self.sliding_window_, mode=self.mode)
if self.mode == "full" and self.window > 1:
convolution = convolution[: -self.window + 1]
return convolution
class GeneralGaussianSmoother(Smoother):
"""
Smooth the columns of an array by applying a convolution with a generalized Gaussian curve.
Parameters
----------
window : int, default=1
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
p : float, default=1
Parameter for the shape of the curve. p=1 yields a typical Gaussian curve while p=0.5 yields a Laplace curve, for example.
sig : float, default=1
Parameter for the standard deviation of the bell-shaped curve.
tails : str, default="both"
Which tails to use. Can be one of
- "left"
- "right"
- "both"
Examples
--------
>>> import numpy as np
>>> X = np.array([0, 0, 0, 1, 0, 0, 0]).reshape(-1, 1)
>>> GeneralGaussianSmoother().fit_transform(X)
array([[0.],
[0.],
[0.],
[1.],
[0.],
[0.],
[0.]])
>>> GeneralGaussianSmoother(window=5, p=1, sig=1).fit_transform(X)
array([[0. ],
[0.05448868],
[0.24420134],
[0.40261995],
[0.24420134],
[0.05448868],
[0. ]])
>>> GeneralGaussianSmoother(window=7, tails="right").fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.57045881],
[0.34600076],
[0.0772032 ],
[0.00633722]])
"""
def __init__(
self,
window: int = 1,
p: float = 1,
sig: float = 1,
tails: str = "both",
) -> None:
"""Initialize."""
super().__init__(window, mode="same")
self.p = p
self.sig = sig
self.tails = tails
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
Raises
------
ValueError
If the provided value for `tails` is not "left", "right" or "both".
"""
self.sliding_window_ = np.exp(
-0.5
* np.abs(np.arange(-self.window // 2 + 1, self.window // 2 + 1) / self.sig)
** (2 * self.p)
)
if self.tails == "left":
self.sliding_window_[self.window // 2 + 1 :] = 0
elif self.tails == "right":
self.sliding_window_[: self.window // 2] = 0
elif self.tails != "both":
raise ValueError(
"tails keyword has to be one of 'both', 'left' or 'right'."
)
class ExponentialDecaySmoother(Smoother):
"""
Smooth the columns of an array by applying a convolution with a exponentially decaying curve.
This class can be used for modelling carry over effects in marketing mix models.
Parameters
----------
window : int, default=1
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
strength : float, default=0.0
Fraction of the spending effect that is carried over.
peak : float, default=0.0
Where the carryover effect peaks.
exponent : float, default=1.0
To further widen or narrow the carryover curve. A value of 1.0 yields a normal exponential decay.
With values larger than 1.0, a super exponential decay can be achieved.
Examples
--------
>>> import numpy as np
>>> X = np.array([0, 0, 0, 1, 0, 0, 0]).reshape(-1, 1)
>>> ExponentialDecaySmoother().fit_transform(X)
array([[0.],
[0.],
[0.],
[1.],
[0.],
[0.],
[0.]])
>>> ExponentialDecaySmoother(window=3, strength=0.5).fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.57142857],
[0.28571429],
[0.14285714],
[0. ]])
>>> ExponentialDecaySmoother(window=3, strength=0.5, peak=1).fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.25],
[0.5 ],
[0.25],
[0. ]])
"""
def __init__(
self,
window: int = 1,
strength: float = 0.0,
peak: float = 0.0,
exponent: float = 1.0,
) -> None:
"""Initialize."""
super().__init__(window, mode="full")
self.strength = strength
self.peak = peak
self.exponent = exponent
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
"""
self.sliding_window_ = self.strength ** (
np.abs(np.arange(self.window) - self.peak) ** self.exponent
) | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/timeseries/smoothing.py | smoothing.py |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from scipy.signal import convolve2d
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
class Smoother(BaseEstimator, TransformerMixin, ABC):
"""
Smooth the columns of an array by applying a convolution.
Parameters
----------
window : int
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
mode : str
Which convolution mode to use. Can be one of
- "full": The output is the full discrete linear convolution of the inputs.
- "valid": The output consists only of those elements that do not rely on the zero-padding.
- "same": The output is the same size as the first input, centered with respect to the 'full' output.
"""
def __init__(
self,
window: int,
mode: str,
) -> None:
"""Initialize."""
self.window = window
self.mode = mode
@abstractmethod
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
"""
def fit(self, X: np.ndarray, y: None = None) -> Smoother:
"""
Fit the estimator.
The frequency is computed and the sliding window is created.
Parameters
----------
X : np.ndarray
Used for inferring the frequency, if not provided during initialization.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
GeneralGaussianSmoother
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
self._set_sliding_window()
self.sliding_window_ = (
self.sliding_window_.reshape(-1, 1) / self.sliding_window_.sum()
)
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Add the new date feature to the dataframe.
Parameters
----------
X : np.ndarray
A pandas dataframe with a DatetimeIndex.
Returns
-------
np.ndarray
The input dataframe with an additional column for special dates.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
convolution = convolve2d(X, self.sliding_window_, mode=self.mode)
if self.mode == "full" and self.window > 1:
convolution = convolution[: -self.window + 1]
return convolution
class GeneralGaussianSmoother(Smoother):
"""
Smooth the columns of an array by applying a convolution with a generalized Gaussian curve.
Parameters
----------
window : int, default=1
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
p : float, default=1
Parameter for the shape of the curve. p=1 yields a typical Gaussian curve while p=0.5 yields a Laplace curve, for example.
sig : float, default=1
Parameter for the standard deviation of the bell-shaped curve.
tails : str, default="both"
Which tails to use. Can be one of
- "left"
- "right"
- "both"
Examples
--------
>>> import numpy as np
>>> X = np.array([0, 0, 0, 1, 0, 0, 0]).reshape(-1, 1)
>>> GeneralGaussianSmoother().fit_transform(X)
array([[0.],
[0.],
[0.],
[1.],
[0.],
[0.],
[0.]])
>>> GeneralGaussianSmoother(window=5, p=1, sig=1).fit_transform(X)
array([[0. ],
[0.05448868],
[0.24420134],
[0.40261995],
[0.24420134],
[0.05448868],
[0. ]])
>>> GeneralGaussianSmoother(window=7, tails="right").fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.57045881],
[0.34600076],
[0.0772032 ],
[0.00633722]])
"""
def __init__(
self,
window: int = 1,
p: float = 1,
sig: float = 1,
tails: str = "both",
) -> None:
"""Initialize."""
super().__init__(window, mode="same")
self.p = p
self.sig = sig
self.tails = tails
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
Raises
------
ValueError
If the provided value for `tails` is not "left", "right" or "both".
"""
self.sliding_window_ = np.exp(
-0.5
* np.abs(np.arange(-self.window // 2 + 1, self.window // 2 + 1) / self.sig)
** (2 * self.p)
)
if self.tails == "left":
self.sliding_window_[self.window // 2 + 1 :] = 0
elif self.tails == "right":
self.sliding_window_[: self.window // 2] = 0
elif self.tails != "both":
raise ValueError(
"tails keyword has to be one of 'both', 'left' or 'right'."
)
class ExponentialDecaySmoother(Smoother):
"""
Smooth the columns of an array by applying a convolution with a exponentially decaying curve.
This class can be used for modelling carry over effects in marketing mix models.
Parameters
----------
window : int, default=1
Size of the sliding window. The effect of a holiday will reach from approximately
date - `window/2 * frequency` to date + `window/2 * frequency`, i.e. it is centered around the dates in `dates`.
strength : float, default=0.0
Fraction of the spending effect that is carried over.
peak : float, default=0.0
Where the carryover effect peaks.
exponent : float, default=1.0
To further widen or narrow the carryover curve. A value of 1.0 yields a normal exponential decay.
With values larger than 1.0, a super exponential decay can be achieved.
Examples
--------
>>> import numpy as np
>>> X = np.array([0, 0, 0, 1, 0, 0, 0]).reshape(-1, 1)
>>> ExponentialDecaySmoother().fit_transform(X)
array([[0.],
[0.],
[0.],
[1.],
[0.],
[0.],
[0.]])
>>> ExponentialDecaySmoother(window=3, strength=0.5).fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.57142857],
[0.28571429],
[0.14285714],
[0. ]])
>>> ExponentialDecaySmoother(window=3, strength=0.5, peak=1).fit_transform(X)
array([[0. ],
[0. ],
[0. ],
[0.25],
[0.5 ],
[0.25],
[0. ]])
"""
def __init__(
self,
window: int = 1,
strength: float = 0.0,
peak: float = 0.0,
exponent: float = 1.0,
) -> None:
"""Initialize."""
super().__init__(window, mode="full")
self.strength = strength
self.peak = peak
self.exponent = exponent
def _set_sliding_window(self) -> None:
"""
Calculate the sliding window.
Returns
-------
None
"""
self.sliding_window_ = self.strength ** (
np.abs(np.arange(self.window) - self.peak) ** self.exponent
) | 0.977586 | 0.643735 |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.covariance import EllipticEnvelope
from sklearn.utils.validation import check_is_fitted
class SpikeRemover(BaseEstimator, TransformerMixin):
"""
This class takes a time series and removes outlier spikes.
It does so by filtering out observations that are not close to their neighbors and replaces them with the
mean of the neighbors. The amount of spikes being flattened is determined by the `contamination` parameter.
Parameters
----------
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results
across multiple function calls.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> y = np.sin(np.linspace(0, 2*np.pi, 100)) + 0.1*np.random.randn(100); y[4] = 5; y[60] = -10
>>> y_new = SpikeRemover().fit_transform(y.reshape(-1, 1))
>>> y_new[[3, 4, 5]]
array([[0.41334056],
[0.31382311],
[0.21430566]])
>>> y_new[[59, 60, 61]]
array([[-0.60333398],
[-0.65302915],
[-0.70272432]])
"""
def __init__(
self,
contamination: float = 0.05,
random_state: Optional[Union[int, np.random.RandomState]] = None,
):
"""Initialize."""
self.contamination = contamination
self.random_state = random_state
def fit(self, y: np.ndarray) -> SpikeRemover:
"""
Fit the estimator.
Parameters
----------
y : np.ndarray
A time series containing outlier spikes.
Returns
-------
SpikeRemover
Fitted transformer.
"""
self.outlier_detector_ = EllipticEnvelope(
contamination=self.contamination, random_state=self.random_state
)
y_diff = y[1:] - y[:-1]
self.outlier_detector_.fit(y_diff)
return self
def transform(self, y: np.ndarray) -> np.ndarray:
"""
Remove outliers from the time series.
Parameters
----------
y : np.ndarray
The original time series.
Returns
-------
np.ndarray
Time series without outlier spikes.
"""
check_is_fitted(self)
y_copy = y.copy()
outlier_markers = np.convolve(
self.outlier_detector_.predict(y_copy[1:] - y_copy[:-1]), np.array([1, 1])
)
central_spikes = np.where(outlier_markers == -2)[0]
border_spikes = np.where(outlier_markers == -1)[0]
for spike in central_spikes:
y_copy[spike] = np.average(y_copy[[spike - 1, spike + 1]])
for spike in border_spikes:
if spike == 0:
y_copy[0] = y_copy[1]
else:
y_copy[-1] = y_copy[-2]
return y_copy | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/timeseries/outliers.py | outliers.py |
from __future__ import annotations
from typing import Optional, Union
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.covariance import EllipticEnvelope
from sklearn.utils.validation import check_is_fitted
class SpikeRemover(BaseEstimator, TransformerMixin):
"""
This class takes a time series and removes outlier spikes.
It does so by filtering out observations that are not close to their neighbors and replaces them with the
mean of the neighbors. The amount of spikes being flattened is determined by the `contamination` parameter.
Parameters
----------
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
Range is (0, 0.5).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data. Pass an int for reproducible results
across multiple function calls.
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> y = np.sin(np.linspace(0, 2*np.pi, 100)) + 0.1*np.random.randn(100); y[4] = 5; y[60] = -10
>>> y_new = SpikeRemover().fit_transform(y.reshape(-1, 1))
>>> y_new[[3, 4, 5]]
array([[0.41334056],
[0.31382311],
[0.21430566]])
>>> y_new[[59, 60, 61]]
array([[-0.60333398],
[-0.65302915],
[-0.70272432]])
"""
def __init__(
self,
contamination: float = 0.05,
random_state: Optional[Union[int, np.random.RandomState]] = None,
):
"""Initialize."""
self.contamination = contamination
self.random_state = random_state
def fit(self, y: np.ndarray) -> SpikeRemover:
"""
Fit the estimator.
Parameters
----------
y : np.ndarray
A time series containing outlier spikes.
Returns
-------
SpikeRemover
Fitted transformer.
"""
self.outlier_detector_ = EllipticEnvelope(
contamination=self.contamination, random_state=self.random_state
)
y_diff = y[1:] - y[:-1]
self.outlier_detector_.fit(y_diff)
return self
def transform(self, y: np.ndarray) -> np.ndarray:
"""
Remove outliers from the time series.
Parameters
----------
y : np.ndarray
The original time series.
Returns
-------
np.ndarray
Time series without outlier spikes.
"""
check_is_fitted(self)
y_copy = y.copy()
outlier_markers = np.convolve(
self.outlier_detector_.predict(y_copy[1:] - y_copy[:-1]), np.array([1, 1])
)
central_spikes = np.where(outlier_markers == -2)[0]
border_spikes = np.where(outlier_markers == -1)[0]
for spike in central_spikes:
y_copy[spike] = np.average(y_copy[[spike - 1, spike + 1]])
for spike in border_spikes:
if spike == 0:
y_copy[0] = y_copy[1]
else:
y_copy[-1] = y_copy[-2]
return y_copy | 0.969771 | 0.689623 |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, OutlierMixin
from sklearn.utils.validation import check_array, check_is_fitted
class BoxEnvelope(BaseEstimator, OutlierMixin, ABC):
"""
Detect if a data point is an outlier via checking each feature for unusual behavior independently.
It works the following way for each sample:
- Mark the sample as an inlier
- For each feature, do the following:
- If the feature is smaller than the `alpha` / 2 quantile, mark this sample as an outlier.
- If the feature is larger than the 1 - `alpha` / 2 quantile, mark this sample as an outlier.
"""
@abstractmethod
def _get_bounds(self, X):
"""Implement this. This should set `self.lower_bounds_` and `self.upper_bounds`."""
def fit(self, X: np.ndarray, y=None) -> BoxEnvelope:
"""
Fit the estimator.
Parameters
----------
X : np.ndarray
Used for calculating the quantiles.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
BoxEnvelope
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
self._get_bounds(X)
self.offset_ = 0
return self
def score_samples(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return (
2
* np.prod(
np.hstack(
[
np.where(X < self.lower_bounds_, 0, 1),
np.where(X > self.upper_bounds_, 0, 1),
]
),
axis=1,
)
- 1
).astype(float)
def decision_function(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self.score_samples(X)
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self.decision_function(X).astype(int)
class QuantileBoxEnvelope(BoxEnvelope):
"""
Detect if a data point is an outlier via checking each feature for unusual behavior independently.
It works the following way for each sample:
- Mark the sample as an inlier
- For each feature, do the following:
- If the feature is smaller than the `alpha` / 2 quantile, mark this sample as an outlier.
- If the feature is larger than the 1 - `alpha` / 2 quantile, mark this sample as an outlier.
Parameters
----------
alpha : float, default=0.01
Determines how many outliers are produced. The larger `alpha`, the more samples are marked as outliers.
For one-dimensional data, approximately a fraction `alpha` of all samples will be marked as outliers.
Examples
--------
>>> import numpy as np
>>> X = np.array([
... [1, 5],
... [2, 5],
... [3, 5],
... [2, 6]
... ])
>>> QuantileBoxEnvelope(alpha=0.4).fit_predict(X)
array([-1, 1, -1, -1])
>>> np.random.seed(0)
>>> X = np.random.randn(1000, 1)
>>> list(QuantileBoxEnvelope().fit_predict(X)).count(-1) / 1000
0.01
"""
def __init__(self, alpha: float = 0.01):
"""Initialize."""
super().__init__()
self.alpha = alpha
def _get_bounds(self, X):
self.lower_bounds_ = np.quantile(X, q=self.alpha / 2, axis=0)
self.upper_bounds_ = np.quantile(X, q=1 - self.alpha / 2, axis=0) | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/outlier/naive.py | naive.py |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, OutlierMixin
from sklearn.utils.validation import check_array, check_is_fitted
class BoxEnvelope(BaseEstimator, OutlierMixin, ABC):
"""
Detect if a data point is an outlier via checking each feature for unusual behavior independently.
It works the following way for each sample:
- Mark the sample as an inlier
- For each feature, do the following:
- If the feature is smaller than the `alpha` / 2 quantile, mark this sample as an outlier.
- If the feature is larger than the 1 - `alpha` / 2 quantile, mark this sample as an outlier.
"""
@abstractmethod
def _get_bounds(self, X):
"""Implement this. This should set `self.lower_bounds_` and `self.upper_bounds`."""
def fit(self, X: np.ndarray, y=None) -> BoxEnvelope:
"""
Fit the estimator.
Parameters
----------
X : np.ndarray
Used for calculating the quantiles.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
BoxEnvelope
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
self._get_bounds(X)
self.offset_ = 0
return self
def score_samples(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return (
2
* np.prod(
np.hstack(
[
np.where(X < self.lower_bounds_, 0, 1),
np.where(X > self.upper_bounds_, 0, 1),
]
),
axis=1,
)
- 1
).astype(float)
def decision_function(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self.score_samples(X)
def predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict the labels (1 inlier, -1 outlier) of X according to the fitted model.
Parameters
----------
X : np.ndarray
The data.
Returns
-------
np.ndarray
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self.decision_function(X).astype(int)
class QuantileBoxEnvelope(BoxEnvelope):
"""
Detect if a data point is an outlier via checking each feature for unusual behavior independently.
It works the following way for each sample:
- Mark the sample as an inlier
- For each feature, do the following:
- If the feature is smaller than the `alpha` / 2 quantile, mark this sample as an outlier.
- If the feature is larger than the 1 - `alpha` / 2 quantile, mark this sample as an outlier.
Parameters
----------
alpha : float, default=0.01
Determines how many outliers are produced. The larger `alpha`, the more samples are marked as outliers.
For one-dimensional data, approximately a fraction `alpha` of all samples will be marked as outliers.
Examples
--------
>>> import numpy as np
>>> X = np.array([
... [1, 5],
... [2, 5],
... [3, 5],
... [2, 6]
... ])
>>> QuantileBoxEnvelope(alpha=0.4).fit_predict(X)
array([-1, 1, -1, -1])
>>> np.random.seed(0)
>>> X = np.random.randn(1000, 1)
>>> list(QuantileBoxEnvelope().fit_predict(X)).count(-1) / 1000
0.01
"""
def __init__(self, alpha: float = 0.01):
"""Initialize."""
super().__init__()
self.alpha = alpha
def _get_bounds(self, X):
self.lower_bounds_ = np.quantile(X, q=self.alpha / 2, axis=0)
self.upper_bounds_ = np.quantile(X, q=1 - self.alpha / 2, axis=0) | 0.974374 | 0.73659 |
from __future__ import annotations
from typing import List, Tuple, Optional
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
class CyclicalEncoder(BaseEstimator, TransformerMixin):
"""
Break each cyclic feature into two new features, corresponding to the representation of this feature on a circle.
For example, take the hours from 0 to 23. On a normal, round analog clock,
these features are perfectly aligned on a circle already. You can do the same with days, month, ...
Notes
-----
This method has the advantage that close points in time stay close together. See the examples below.
Otherwise, if algorithms deal with the raw value for hour they cannot know that 0 and 23 are actually close.
Another possibility is one hot encoding the hour. This has the disadvantage that it breaks the distances
between different hours. Hour 5 and 16 have the same distance as hour 0 and 23 when doing this.
Parameters
----------
cycles : Optional[List[Tuple[float, float]]], default=None
Define the ranges of the cycles in the format [(col_1_min, col_1_max), (col_2_min, col_2_max), ...).
For example, use [(0, 23), (1, 7)] if your dataset consists of two columns, the first one containing hours and the second one the day of the week.
If left empty, the encoder tries to infer it from the data, i.e. it looks for the minimum and maximum value of each column.
Examples
--------
>>> import numpy as np
>>> df = np.array([[22], [23], [0], [1], [2]])
>>> CyclicalEncoder().fit_transform(df)
array([[ 0.8660254 , -0.5 ],
[ 0.96592583, -0.25881905],
[ 1. , 0. ],
[ 0.96592583, 0.25881905],
[ 0.8660254 , 0.5 ]])
"""
def __init__(
self,
cycles: Optional[List[Tuple[float, float]]] = None,
) -> None:
"""Initialize."""
self.cycles = cycles
def fit(self, X: np.ndarray, y=None) -> CyclicalEncoder:
"""
Fit the estimator. In this special case, nothing is done.
Parameters
----------
X : np.ndarray
Used for inferring the ranges of the data, if not provided during initialization.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
CyclicalEncoder
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
if self.cycles is None:
self.cycles_ = list(zip(X.min(axis=0), X.max(axis=0)))
else:
self.cycles_ = self.cycles
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Add the cyclic features to the dataframe.
Parameters
----------
X : np.ndarray
The data with cyclical features in the columns.
Returns
-------
np.ndarray
The encoded data with twice as man columns as the original.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
def min_max(column):
return (
(column - self.cycles_[i][0])
/ (self.cycles_[i][1] + 1 - self.cycles_[i][0])
* 2
* np.pi
)
res = []
for i in range(X.shape[1]):
res.append(np.cos(min_max(X[:, i])))
res.append(np.sin(min_max(X[:, i])))
return np.vstack(res).T | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/preprocessing/time.py | time.py |
from __future__ import annotations
from typing import List, Tuple, Optional
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted
class CyclicalEncoder(BaseEstimator, TransformerMixin):
"""
Break each cyclic feature into two new features, corresponding to the representation of this feature on a circle.
For example, take the hours from 0 to 23. On a normal, round analog clock,
these features are perfectly aligned on a circle already. You can do the same with days, month, ...
Notes
-----
This method has the advantage that close points in time stay close together. See the examples below.
Otherwise, if algorithms deal with the raw value for hour they cannot know that 0 and 23 are actually close.
Another possibility is one hot encoding the hour. This has the disadvantage that it breaks the distances
between different hours. Hour 5 and 16 have the same distance as hour 0 and 23 when doing this.
Parameters
----------
cycles : Optional[List[Tuple[float, float]]], default=None
Define the ranges of the cycles in the format [(col_1_min, col_1_max), (col_2_min, col_2_max), ...).
For example, use [(0, 23), (1, 7)] if your dataset consists of two columns, the first one containing hours and the second one the day of the week.
If left empty, the encoder tries to infer it from the data, i.e. it looks for the minimum and maximum value of each column.
Examples
--------
>>> import numpy as np
>>> df = np.array([[22], [23], [0], [1], [2]])
>>> CyclicalEncoder().fit_transform(df)
array([[ 0.8660254 , -0.5 ],
[ 0.96592583, -0.25881905],
[ 1. , 0. ],
[ 0.96592583, 0.25881905],
[ 0.8660254 , 0.5 ]])
"""
def __init__(
self,
cycles: Optional[List[Tuple[float, float]]] = None,
) -> None:
"""Initialize."""
self.cycles = cycles
def fit(self, X: np.ndarray, y=None) -> CyclicalEncoder:
"""
Fit the estimator. In this special case, nothing is done.
Parameters
----------
X : np.ndarray
Used for inferring the ranges of the data, if not provided during initialization.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
CyclicalEncoder
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
if self.cycles is None:
self.cycles_ = list(zip(X.min(axis=0), X.max(axis=0)))
else:
self.cycles_ = self.cycles
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Add the cyclic features to the dataframe.
Parameters
----------
X : np.ndarray
The data with cyclical features in the columns.
Returns
-------
np.ndarray
The encoded data with twice as man columns as the original.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
def min_max(column):
return (
(column - self.cycles_[i][0])
/ (self.cycles_[i][1] + 1 - self.cycles_[i][0])
* 2
* np.pi
)
res = []
for i in range(X.shape[1]):
res.append(np.cos(min_max(X[:, i])))
res.append(np.sin(min_max(X[:, i])))
return np.vstack(res).T | 0.958333 | 0.759091 |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array
class Saturation(BaseEstimator, TransformerMixin, ABC):
"""Base class for all saturations, such as Box-Cox, Adbudg, ..."""
def fit(self, X: np.ndarray, y: None = None) -> Saturation:
"""
Fit the transformer.
In this special case, nothing is done.
Parameters
----------
X : Ignored
Not used, present here for API consistency by convention.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
Saturation
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Apply the saturation effect.
Parameters
----------
X : np.ndarray
Data to be transformed.
Returns
-------
np.ndarray
Data with saturation effect applied.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return self._transformation(X)
@abstractmethod
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
class BoxCoxSaturation(Saturation):
"""
Apply the Box-Cox saturation.
The formula is ((x + shift) ** exponent-1) / exponent if exponent!=0, else ln(x+shift).
Parameters
----------
exponent: float, default=1.0
The exponent.
shift : float, default=1.0
The shift.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> BoxCoxSaturation(exponent=0.5).fit_transform(X)
array([[ 0.82842712, 61.27716808],
[ 1.46410162, 61.27716808],
[ 2. , 61.27716808]])
"""
def __init__(self, exponent: float = 1.0, shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.shift = shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
if self.exponent != 0:
return ((X + self.shift) ** self.exponent - 1) / self.exponent
else:
return np.log(X + self.shift)
class AdbudgSaturation(Saturation):
"""
Apply the Adbudg saturation.
The formula is x ** exponent / (denominator_shift + x ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
denominator_shift : float, default=1.0
The shift in the denominator.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> AdbudgSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, denominator_shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.denominator_shift = denominator_shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return X ** self.exponent / (self.denominator_shift + X ** self.exponent)
class HillSaturation(Saturation):
"""
Apply the Hill saturation.
The formula is 1 / (1 + (half_saturation / x) ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
half_saturation : float, default=1.0
The point of half saturation, i.e. Hill(half_saturation) = 0.5.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> HillSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, half_saturation: float = 1.0) -> None:
"""Initialize."""
self.half_saturation = half_saturation
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
eps = np.finfo(np.float64).eps
return 1 / (1 + (self.half_saturation / (X + eps)) ** self.exponent)
class ExponentialSaturation(Saturation):
"""
Apply exponential saturation.
The formula is 1 - exp(-exponent * x).
Parameters
----------
exponent : float, default=1.0
The exponent.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> ExponentialSaturation().fit_transform(X)
array([[0.63212056, 1. ],
[0.86466472, 1. ],
[0.95021293, 1. ]])
"""
def __init__(self, exponent: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return 1 - np.exp(-self.exponent * X) | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/preprocessing/saturation.py | saturation.py |
from __future__ import annotations
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array
class Saturation(BaseEstimator, TransformerMixin, ABC):
"""Base class for all saturations, such as Box-Cox, Adbudg, ..."""
def fit(self, X: np.ndarray, y: None = None) -> Saturation:
"""
Fit the transformer.
In this special case, nothing is done.
Parameters
----------
X : Ignored
Not used, present here for API consistency by convention.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
Saturation
Fitted transformer.
"""
X = check_array(X)
self._check_n_features(X, reset=True)
return self
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Apply the saturation effect.
Parameters
----------
X : np.ndarray
Data to be transformed.
Returns
-------
np.ndarray
Data with saturation effect applied.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
return self._transformation(X)
@abstractmethod
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
class BoxCoxSaturation(Saturation):
"""
Apply the Box-Cox saturation.
The formula is ((x + shift) ** exponent-1) / exponent if exponent!=0, else ln(x+shift).
Parameters
----------
exponent: float, default=1.0
The exponent.
shift : float, default=1.0
The shift.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> BoxCoxSaturation(exponent=0.5).fit_transform(X)
array([[ 0.82842712, 61.27716808],
[ 1.46410162, 61.27716808],
[ 2. , 61.27716808]])
"""
def __init__(self, exponent: float = 1.0, shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.shift = shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
if self.exponent != 0:
return ((X + self.shift) ** self.exponent - 1) / self.exponent
else:
return np.log(X + self.shift)
class AdbudgSaturation(Saturation):
"""
Apply the Adbudg saturation.
The formula is x ** exponent / (denominator_shift + x ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
denominator_shift : float, default=1.0
The shift in the denominator.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> AdbudgSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, denominator_shift: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
self.denominator_shift = denominator_shift
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return X ** self.exponent / (self.denominator_shift + X ** self.exponent)
class HillSaturation(Saturation):
"""
Apply the Hill saturation.
The formula is 1 / (1 + (half_saturation / x) ** exponent).
Parameters
----------
exponent : float, default=1.0
The exponent.
half_saturation : float, default=1.0
The point of half saturation, i.e. Hill(half_saturation) = 0.5.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> HillSaturation().fit_transform(X)
array([[0.5 , 0.999001 ],
[0.66666667, 0.999001 ],
[0.75 , 0.999001 ]])
"""
def __init__(self, exponent: float = 1.0, half_saturation: float = 1.0) -> None:
"""Initialize."""
self.half_saturation = half_saturation
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
eps = np.finfo(np.float64).eps
return 1 / (1 + (self.half_saturation / (X + eps)) ** self.exponent)
class ExponentialSaturation(Saturation):
"""
Apply exponential saturation.
The formula is 1 - exp(-exponent * x).
Parameters
----------
exponent : float, default=1.0
The exponent.
Notes
-----
This version produces saturated values in the interval [0, 1]. You can use `LinearShift` from the shift module to
bring it between some interval [a, b].
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1000], [2, 1000], [3, 1000]])
>>> ExponentialSaturation().fit_transform(X)
array([[0.63212056, 1. ],
[0.86466472, 1. ],
[0.95021293, 1. ]])
"""
def __init__(self, exponent: float = 1.0) -> None:
"""Initialize."""
self.exponent = exponent
def _transformation(self, X: np.ndarray) -> np.ndarray:
"""Generate the transformation formula."""
return 1 - np.exp(-self.exponent * X) | 0.975202 | 0.588446 |
from __future__ import annotations
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder as ScikitLearnOneHotEncoder
from sklearn.utils.validation import check_is_fitted, check_array
class OneHotEncoderWithNames(ScikitLearnOneHotEncoder):
"""
Razor-thin layer around scikit-learn's OneHotEncoder class to return a pandas dataframe with the appropriate column names.
Description from the maintainers of scikit-learn:
Encode categorical features as a one-hot numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array (depending on the ``sparse``
parameter).
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Parameters
----------
categories : 'auto' or a list of array-like, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values within a single feature, and should be sorted in case of
numeric values.
The used categories can be found in the ``categories_`` attribute.
drop : {'first', 'if_binary'} or a array-like of shape (n_features,), default=None
Specifies a methodology to use to drop one of the categories per
feature. This is useful in situations where perfectly collinear
features cause problems, such as when feeding the resulting data
into a neural network or an unregularized regression.
However, dropping one category breaks the symmetry of the original
representation and can therefore induce a bias in downstream models,
for instance for penalized linear classification or regression models.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one
category is present, the feature will be dropped entirely.
- 'if_binary' : drop the first category in each feature with two
categories. Features with 1 or more than 2 categories are
left intact.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that
should be dropped.
sparse : bool, default=True
Will return sparse matrix if set True else will return an array.
dtype : number type, default=float
Desired dtype of output.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``). This includes the category specified in ``drop``
(if any).
drop_idx_ : array of shape (n_features,)
- ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category
to be dropped for each feature.
- ``drop_idx_[i] = None`` if no category is to be dropped from the
feature with index ``i``, e.g. when `drop='if_binary'` and the
feature isn't binary.
- ``drop_idx_ = None`` if all the transformed features will be
retained.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': [1, 2, 1], 'B': ['a', 'b', 'c']})
>>> OneHotEncoderWithNames().fit_transform(df)
A_1 A_2 B_a B_b B_c
0 1 0 1 0 0
1 0 1 0 1 0
2 1 0 0 0 1
"""
def fit(self, X: pd.DataFrame, y: None = None) -> OneHotEncoderWithNames:
"""
Fits a OneHotEncoder while also storing the dataframe column names that let us check if the columns match when calling the transform method.
Parameters
----------
X : pd.DataFrame
Fit the OneHotEncoder on this dataframe.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
OneHotEncoderWithNames
Fitted transformer.
"""
self.column_names_ = X.columns
self._check_n_features(X, reset=True)
return super().fit(X, y)
def _replace_prefix(self, ohe_column_name):
feature_name, feature_value = ohe_column_name.split("_", 1)
feature_number = int(feature_name[1:])
return "_".join([self.column_names_[feature_number], feature_value])
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
One hot encode the input dataframe.
Parameters
----------
X : pd.DataFrame
Input to be one hot encoded. The column names should be the same as during the fit method,
including the same order.
Returns
-------
pd.DataFrame
A pandas dataframe containing the one hot encoded data and proper column names.
Raises
------
AssertionError
If the column names during training and transformation time are not identical.
"""
if X.columns.tolist() != self.column_names_.tolist():
raise AssertionError(
"Column names during fit and transform time should be identical, including the order."
)
one_hot_encoded = super().transform(X)
feature_names = [self._replace_prefix(x) for x in self.get_feature_names()]
return pd.DataFrame(
one_hot_encoded.todense() if self.sparse else one_hot_encoded,
columns=feature_names,
).astype(int) | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/pandas/preprocessing.py | preprocessing.py |
from __future__ import annotations
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder as ScikitLearnOneHotEncoder
from sklearn.utils.validation import check_is_fitted, check_array
class OneHotEncoderWithNames(ScikitLearnOneHotEncoder):
"""
Razor-thin layer around scikit-learn's OneHotEncoder class to return a pandas dataframe with the appropriate column names.
Description from the maintainers of scikit-learn:
Encode categorical features as a one-hot numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array (depending on the ``sparse``
parameter).
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Parameters
----------
categories : 'auto' or a list of array-like, default='auto'
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories should not mix strings and numeric
values within a single feature, and should be sorted in case of
numeric values.
The used categories can be found in the ``categories_`` attribute.
drop : {'first', 'if_binary'} or a array-like of shape (n_features,), default=None
Specifies a methodology to use to drop one of the categories per
feature. This is useful in situations where perfectly collinear
features cause problems, such as when feeding the resulting data
into a neural network or an unregularized regression.
However, dropping one category breaks the symmetry of the original
representation and can therefore induce a bias in downstream models,
for instance for penalized linear classification or regression models.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one
category is present, the feature will be dropped entirely.
- 'if_binary' : drop the first category in each feature with two
categories. Features with 1 or more than 2 categories are
left intact.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that
should be dropped.
sparse : bool, default=True
Will return sparse matrix if set True else will return an array.
dtype : number type, default=float
Desired dtype of output.
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order of the features in X and corresponding with the output
of ``transform``). This includes the category specified in ``drop``
(if any).
drop_idx_ : array of shape (n_features,)
- ``drop_idx_[i]`` is the index in ``categories_[i]`` of the category
to be dropped for each feature.
- ``drop_idx_[i] = None`` if no category is to be dropped from the
feature with index ``i``, e.g. when `drop='if_binary'` and the
feature isn't binary.
- ``drop_idx_ = None`` if all the transformed features will be
retained.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': [1, 2, 1], 'B': ['a', 'b', 'c']})
>>> OneHotEncoderWithNames().fit_transform(df)
A_1 A_2 B_a B_b B_c
0 1 0 1 0 0
1 0 1 0 1 0
2 1 0 0 0 1
"""
def fit(self, X: pd.DataFrame, y: None = None) -> OneHotEncoderWithNames:
"""
Fits a OneHotEncoder while also storing the dataframe column names that let us check if the columns match when calling the transform method.
Parameters
----------
X : pd.DataFrame
Fit the OneHotEncoder on this dataframe.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
OneHotEncoderWithNames
Fitted transformer.
"""
self.column_names_ = X.columns
self._check_n_features(X, reset=True)
return super().fit(X, y)
def _replace_prefix(self, ohe_column_name):
feature_name, feature_value = ohe_column_name.split("_", 1)
feature_number = int(feature_name[1:])
return "_".join([self.column_names_[feature_number], feature_value])
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
One hot encode the input dataframe.
Parameters
----------
X : pd.DataFrame
Input to be one hot encoded. The column names should be the same as during the fit method,
including the same order.
Returns
-------
pd.DataFrame
A pandas dataframe containing the one hot encoded data and proper column names.
Raises
------
AssertionError
If the column names during training and transformation time are not identical.
"""
if X.columns.tolist() != self.column_names_.tolist():
raise AssertionError(
"Column names during fit and transform time should be identical, including the order."
)
one_hot_encoded = super().transform(X)
feature_names = [self._replace_prefix(x) for x in self.get_feature_names()]
return pd.DataFrame(
one_hot_encoded.todense() if self.sparse else one_hot_encoded,
columns=feature_names,
).astype(int) | 0.969914 | 0.816589 |
import numpy as np
import pandas as pd
def explode_date(
df: pd.DataFrame,
start_column: str,
end_column: str,
result_column: str = "Date",
frequency: str = "d",
drop: bool = True,
) -> pd.DataFrame:
"""
Transform a pandas dataframe with columns (*, start_date, end_date) into a longer format with columns (*, date).
This is useful if you deal with datasets that contain special time periods per row, but you need a single date per row.
See the examples for more details.
Parameters
----------
df: pd.DataFrame
The input dataframe with a column containing starts dates and end dates.
start_column : str
Start date of the period.
end_column : str
End date of the period.
result_column : str, default="Date"
Name of the new output date column.
frequency : str, default="d" (for day)
A pandas time frequency. Can take values like "d" for day or "m" for month. A full list can
be found on https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
If None, the transformer tries to infer it during fit time.
drop : bool, default=True
Whether to drop the `start_column` and `end_column` in the output.
Returns
-------
pd.DataFrame
A longer dataframe with one date per row.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... "Data": ["a", "b", "c"],
... "Start": pd.date_range("2020-01-01", periods=3),
... "End": pd.date_range("2020-01-03", periods=3)
... })
>>> df
Data Start End
0 a 2020-01-01 2020-01-03
1 b 2020-01-02 2020-01-04
2 c 2020-01-03 2020-01-05
>>> explode_date(df, start_column="Start", end_column="End", result_column="output_date", frequency="d")
Data output_date
0 a 2020-01-01
0 a 2020-01-02
0 a 2020-01-03
1 b 2020-01-02
1 b 2020-01-03
1 b 2020-01-04
2 c 2020-01-03
2 c 2020-01-04
2 c 2020-01-05
"""
return (
df.assign(
**{
result_column: lambda df: df.apply(
lambda row: pd.date_range(
start=row[start_column], end=row[end_column], freq=frequency
),
axis=1,
)
}
)
.explode(result_column)
.drop(columns=drop * [start_column, end_column])
)
def add_date_indicators(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
Enrich a pandas dataframes with a new column indicating if there is a special date.
This new column will contain a one for each date specified in the `dates` keyword, zero otherwise.
Parameters
----------
df : pd.DataFrame
Input dataframe with a DateTime index.
kwargs : List[str]*
As many inputs as you want of the form date_name=[date_1, date_2, ...], i.e. christmas=['2020-12-24'].
See the example below for more information.
Returns
-------
pd.DataFrame
A dataframe with date indicator columns.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({"A": range(7)}, index=pd.date_range(start="2019-12-29", periods=7))
>>> add_date_indicators(
... df,
... around_new_year_2020=["2019-12-31", "2020-01-01", "2020-01-02"],
... other_date_1=["2019-12-29"],
... other_date_2=["2018-01-01"]
... )
A around_new_year_2020 other_date_1 other_date_2
2019-12-29 0 0 1 0
2019-12-30 1 0 0 0
2019-12-31 2 1 0 0
2020-01-01 3 1 0 0
2020-01-02 4 1 0 0
2020-01-03 5 0 0 0
2020-01-04 6 0 0 0
"""
return df.assign(
**{name: df.index.isin(dates).astype(int) for name, dates in kwargs.items()}
)
def add_time_features(
df: pd.DataFrame,
second: bool = False,
minute: bool = False,
hour: bool = False,
day_of_week: bool = False,
day_of_month: bool = False,
day_of_year: bool = False,
week_of_month: bool = False,
week_of_year: bool = False,
month: bool = False,
year: bool = False,
) -> pd.DataFrame:
"""
Enrich pandas dataframes with new columns which are easy derivations from its DatetimeIndex, such as the day of week or the month.
Parameters
----------
df: pd.DataFrame
Input dataframe with a DateTime index.
second : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
minute : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
hour : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
day_of_week : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
day_of_month : bool, default=False
Whether to extract the day of month from the index and add it as a new column.
day_of_year : bool, default=False
Whether to extract the day of year from the index and add it as a new column.
week_of_month : bool, default=False
Whether to extract the week of month from the index and add it as a new column.
week_of_year : bool, default=False
Whether to extract the week of year from the index and add it as a new column.
month : bool, default=False
Whether to extract the month from the index and add it as a new column.
year : bool, default=False
Whether to extract the year from the index and add it as a new column.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame(
... {"A": ["a", "b", "c"]},
... index=[
... pd.Timestamp("1988-08-08"),
... pd.Timestamp("2000-01-01"),
... pd.Timestamp("1950-12-31"),
... ])
>>> add_time_features(df, day_of_month=True, month=True, year=True)
A day_of_month month year
1988-08-08 a 8 8 1988
2000-01-01 b 1 1 2000
1950-12-31 c 31 12 1950
"""
def _add_second(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(second=df.index.second) if second else df
def _add_minute(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(minute=df.index.minute) if minute else df
def _add_hour(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(hour=df.index.hour) if hour else df
def _add_day_of_week(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_week=df.index.weekday + 1) if day_of_week else df
def _add_day_of_month(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_month=df.index.day) if day_of_month else df
def _add_day_of_year(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_year=df.index.dayofyear) if day_of_year else df
def _add_week_of_month(df: pd.DataFrame) -> pd.DataFrame:
return (
df.assign(week_of_month=np.ceil(df.index.day / 7).astype(int))
if week_of_month
else df
)
def _add_week_of_year(df: pd.DataFrame) -> pd.DataFrame:
return (
df.assign(week_of_year=df.index.isocalendar().week) if week_of_year else df
)
def _add_month(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(month=df.index.month) if month else df
def _add_year(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(year=df.index.year) if year else df
return (
df.pipe(_add_second)
.pipe(_add_minute)
.pipe(_add_hour)
.pipe(_add_day_of_week)
.pipe(_add_day_of_month)
.pipe(_add_day_of_year)
.pipe(_add_week_of_month)
.pipe(_add_week_of_year)
.pipe(_add_month)
.pipe(_add_year)
) | scikit-bonus | /scikit_bonus-0.1.12-py3-none-any.whl/skbonus/pandas/time_utils.py | time_utils.py |
import numpy as np
import pandas as pd
def explode_date(
df: pd.DataFrame,
start_column: str,
end_column: str,
result_column: str = "Date",
frequency: str = "d",
drop: bool = True,
) -> pd.DataFrame:
"""
Transform a pandas dataframe with columns (*, start_date, end_date) into a longer format with columns (*, date).
This is useful if you deal with datasets that contain special time periods per row, but you need a single date per row.
See the examples for more details.
Parameters
----------
df: pd.DataFrame
The input dataframe with a column containing starts dates and end dates.
start_column : str
Start date of the period.
end_column : str
End date of the period.
result_column : str, default="Date"
Name of the new output date column.
frequency : str, default="d" (for day)
A pandas time frequency. Can take values like "d" for day or "m" for month. A full list can
be found on https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases.
If None, the transformer tries to infer it during fit time.
drop : bool, default=True
Whether to drop the `start_column` and `end_column` in the output.
Returns
-------
pd.DataFrame
A longer dataframe with one date per row.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... "Data": ["a", "b", "c"],
... "Start": pd.date_range("2020-01-01", periods=3),
... "End": pd.date_range("2020-01-03", periods=3)
... })
>>> df
Data Start End
0 a 2020-01-01 2020-01-03
1 b 2020-01-02 2020-01-04
2 c 2020-01-03 2020-01-05
>>> explode_date(df, start_column="Start", end_column="End", result_column="output_date", frequency="d")
Data output_date
0 a 2020-01-01
0 a 2020-01-02
0 a 2020-01-03
1 b 2020-01-02
1 b 2020-01-03
1 b 2020-01-04
2 c 2020-01-03
2 c 2020-01-04
2 c 2020-01-05
"""
return (
df.assign(
**{
result_column: lambda df: df.apply(
lambda row: pd.date_range(
start=row[start_column], end=row[end_column], freq=frequency
),
axis=1,
)
}
)
.explode(result_column)
.drop(columns=drop * [start_column, end_column])
)
def add_date_indicators(df: pd.DataFrame, **kwargs) -> pd.DataFrame:
"""
Enrich a pandas dataframes with a new column indicating if there is a special date.
This new column will contain a one for each date specified in the `dates` keyword, zero otherwise.
Parameters
----------
df : pd.DataFrame
Input dataframe with a DateTime index.
kwargs : List[str]*
As many inputs as you want of the form date_name=[date_1, date_2, ...], i.e. christmas=['2020-12-24'].
See the example below for more information.
Returns
-------
pd.DataFrame
A dataframe with date indicator columns.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({"A": range(7)}, index=pd.date_range(start="2019-12-29", periods=7))
>>> add_date_indicators(
... df,
... around_new_year_2020=["2019-12-31", "2020-01-01", "2020-01-02"],
... other_date_1=["2019-12-29"],
... other_date_2=["2018-01-01"]
... )
A around_new_year_2020 other_date_1 other_date_2
2019-12-29 0 0 1 0
2019-12-30 1 0 0 0
2019-12-31 2 1 0 0
2020-01-01 3 1 0 0
2020-01-02 4 1 0 0
2020-01-03 5 0 0 0
2020-01-04 6 0 0 0
"""
return df.assign(
**{name: df.index.isin(dates).astype(int) for name, dates in kwargs.items()}
)
def add_time_features(
df: pd.DataFrame,
second: bool = False,
minute: bool = False,
hour: bool = False,
day_of_week: bool = False,
day_of_month: bool = False,
day_of_year: bool = False,
week_of_month: bool = False,
week_of_year: bool = False,
month: bool = False,
year: bool = False,
) -> pd.DataFrame:
"""
Enrich pandas dataframes with new columns which are easy derivations from its DatetimeIndex, such as the day of week or the month.
Parameters
----------
df: pd.DataFrame
Input dataframe with a DateTime index.
second : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
minute : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
hour : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
day_of_week : bool, default=False
Whether to extract the day of week from the index and add it as a new column.
day_of_month : bool, default=False
Whether to extract the day of month from the index and add it as a new column.
day_of_year : bool, default=False
Whether to extract the day of year from the index and add it as a new column.
week_of_month : bool, default=False
Whether to extract the week of month from the index and add it as a new column.
week_of_year : bool, default=False
Whether to extract the week of year from the index and add it as a new column.
month : bool, default=False
Whether to extract the month from the index and add it as a new column.
year : bool, default=False
Whether to extract the year from the index and add it as a new column.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame(
... {"A": ["a", "b", "c"]},
... index=[
... pd.Timestamp("1988-08-08"),
... pd.Timestamp("2000-01-01"),
... pd.Timestamp("1950-12-31"),
... ])
>>> add_time_features(df, day_of_month=True, month=True, year=True)
A day_of_month month year
1988-08-08 a 8 8 1988
2000-01-01 b 1 1 2000
1950-12-31 c 31 12 1950
"""
def _add_second(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(second=df.index.second) if second else df
def _add_minute(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(minute=df.index.minute) if minute else df
def _add_hour(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(hour=df.index.hour) if hour else df
def _add_day_of_week(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_week=df.index.weekday + 1) if day_of_week else df
def _add_day_of_month(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_month=df.index.day) if day_of_month else df
def _add_day_of_year(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(day_of_year=df.index.dayofyear) if day_of_year else df
def _add_week_of_month(df: pd.DataFrame) -> pd.DataFrame:
return (
df.assign(week_of_month=np.ceil(df.index.day / 7).astype(int))
if week_of_month
else df
)
def _add_week_of_year(df: pd.DataFrame) -> pd.DataFrame:
return (
df.assign(week_of_year=df.index.isocalendar().week) if week_of_year else df
)
def _add_month(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(month=df.index.month) if month else df
def _add_year(df: pd.DataFrame) -> pd.DataFrame:
return df.assign(year=df.index.year) if year else df
return (
df.pipe(_add_second)
.pipe(_add_minute)
.pipe(_add_hour)
.pipe(_add_day_of_week)
.pipe(_add_day_of_month)
.pipe(_add_day_of_year)
.pipe(_add_week_of_month)
.pipe(_add_week_of_year)
.pipe(_add_month)
.pipe(_add_year)
) | 0.916536 | 0.588002 |
# Scikit-bot
[![CI](https://github.com/FirefoxMetzger/ropy/actions/workflows/ci.yml/badge.svg)](https://github.com/FirefoxMetzger/ropy/actions/workflows/ci.yml)
[![CD](https://github.com/FirefoxMetzger/ropy/actions/workflows/cd.yml/badge.svg)](https://github.com/FirefoxMetzger/ropy/actions/workflows/cd.yml)
[![codecov](https://codecov.io/gh/FirefoxMetzger/scikit-bot/branch/main/graph/badge.svg?token=VNND9WET47)](https://codecov.io/gh/FirefoxMetzger/scikit-bot)
[![Documentation Status](https://readthedocs.org/projects/robotics-python/badge/?version=latest)](https://scikit-bot.org/en/latest/?badge=latest)
scikit-bot is a robotics library that aims to address the large heterogeneity of
code in the robotics community by providing a selection of commonly used
algorithms and functions in an easy access manner. It focusses on begin easy to
use and on enabling rapid prototyping.
- **Curious**? Check out our detailed [API
documentation](https://robotics-python.readthedocs.io/en/latest/api_reference.html)
to see if there is anything that meets your need.
- Got an **idea for a new feature** or spotted something that is missing? Submit
a [feature request](https://github.com/FirefoxMetzger/ropy/issues).
- Want to **contribute code**? Awesome! We are very happy about PRs. You can
check open issues (may I suggest a [good first
issue](https://github.com/FirefoxMetzger/ropy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22))
for an overview of what is currently needed. Alternatively, you can submit a
new [feature request](https://github.com/FirefoxMetzger/ropy/issues) and -
after we figured out where the feature should go - you can submit a PR for
exactly this feature.
**Note:** If you find any part of the API particularly useful in its current
form, please leave a note in the issue section (and a comment on what could be
improved). Feedback like this helps maturing the API more quickly. This way, we
can keep the things that are useful and improve the things that aren't.
### Why does ropy redirect here?
Originally this library was named ropy, because it didn't seem grand enough to
be a fully grown scikit. However, as things evolved, it was rebranded into
scikit-bot.
| scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/README.md | README.md | # Scikit-bot
[![CI](https://github.com/FirefoxMetzger/ropy/actions/workflows/ci.yml/badge.svg)](https://github.com/FirefoxMetzger/ropy/actions/workflows/ci.yml)
[![CD](https://github.com/FirefoxMetzger/ropy/actions/workflows/cd.yml/badge.svg)](https://github.com/FirefoxMetzger/ropy/actions/workflows/cd.yml)
[![codecov](https://codecov.io/gh/FirefoxMetzger/scikit-bot/branch/main/graph/badge.svg?token=VNND9WET47)](https://codecov.io/gh/FirefoxMetzger/scikit-bot)
[![Documentation Status](https://readthedocs.org/projects/robotics-python/badge/?version=latest)](https://scikit-bot.org/en/latest/?badge=latest)
scikit-bot is a robotics library that aims to address the large heterogeneity of
code in the robotics community by providing a selection of commonly used
algorithms and functions in an easy access manner. It focusses on begin easy to
use and on enabling rapid prototyping.
- **Curious**? Check out our detailed [API
documentation](https://robotics-python.readthedocs.io/en/latest/api_reference.html)
to see if there is anything that meets your need.
- Got an **idea for a new feature** or spotted something that is missing? Submit
a [feature request](https://github.com/FirefoxMetzger/ropy/issues).
- Want to **contribute code**? Awesome! We are very happy about PRs. You can
check open issues (may I suggest a [good first
issue](https://github.com/FirefoxMetzger/ropy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22))
for an overview of what is currently needed. Alternatively, you can submit a
new [feature request](https://github.com/FirefoxMetzger/ropy/issues) and -
after we figured out where the feature should go - you can submit a PR for
exactly this feature.
**Note:** If you find any part of the API particularly useful in its current
form, please leave a note in the issue section (and a comment on what could be
improved). Feedback like this helps maturing the API more quickly. This way, we
can keep the things that are useful and improve the things that aren't.
### Why does ropy redirect here?
Originally this library was named ropy, because it didn't seem grand enough to
be a fully grown scikit. However, as things evolved, it was rebranded into
scikit-bot.
| 0.765681 | 0.783409 |
import numpy as np
from numba.extending import overload, register_jitable
from numba.np.unsafe.ndarray import to_fixed_tuple
from numba import types
from numpy.typing import ArrayLike
@overload(np.moveaxis)
def moveaxis(a: np.ndarray, source, destination) -> np.ndarray:
"""Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
dest : int or sequence of int
Destination positions for each of the original axes. These must also be unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
Notes
-----
If one of (source, destination) is an integer, then the other must be an integer, too.
See Also
--------
`np.moveaxis <https://numpy.org/doc/stable/reference/generated/numpy.moveaxis.html>`_
"""
@register_jitable
def impl_array(a: np.ndarray, source, destination):
source_work = np.atleast_1d(np.asarray(source))
destination_work = np.atleast_1d(np.asarray(destination))
if source_work.size != destination_work.size:
raise ValueError(
"`source` and `destination` arguments must have "
"the same number of elements"
)
for idx in range(source_work.size):
if abs(source_work[idx]) > a.ndim:
raise ValueError("Invalid axis in `source`.")
if abs(destination_work[idx]) > a.ndim:
raise ValueError("Invalid axis in `destination`.")
source_work = [x % a.ndim for x in source_work]
destination_work = [x % a.ndim for x in destination_work]
order = [n for n in range(a.ndim) if n not in source_work]
for dest, src in sorted(zip(destination_work, source_work)):
order.insert(dest, src)
oder_tuple = to_fixed_tuple(np.array(order), a.ndim)
return np.transpose(a, oder_tuple)
@register_jitable
def impl_int(a: np.ndarray, source, destination):
if abs(source) > a.ndim:
raise ValueError("Invalid axis in `source`.")
if abs(destination) > a.ndim:
raise ValueError("Invalid axis in `destination`.")
source = source % a.ndim
destination = destination % a.ndim
order = [n for n in range(a.ndim) if n != source]
order.insert(destination, source)
oder_tuple = to_fixed_tuple(np.array(order), a.ndim)
return np.transpose(a, oder_tuple)
if isinstance(source, types.Integer) and isinstance(destination, types.Integer):
return impl_int
else:
return impl_array
@overload(np.putmask)
def putmask(a: np.ndarray, mask: ArrayLike, values: ArrayLike) -> None:
"""Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
np.place, np.put, np.take, np.copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
def impl(a: np.ndarray, mask: ArrayLike, values: ArrayLike) -> None:
mask = np.asarray(mask)
values = np.atleast_1d(np.asarray(values))
for idx in range(a.size):
if mask.flat[idx]:
a.flat[idx] = values.flat[idx % len(values)]
return impl | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/_numba_overloads.py | _numba_overloads.py | import numpy as np
from numba.extending import overload, register_jitable
from numba.np.unsafe.ndarray import to_fixed_tuple
from numba import types
from numpy.typing import ArrayLike
@overload(np.moveaxis)
def moveaxis(a: np.ndarray, source, destination) -> np.ndarray:
"""Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
dest : int or sequence of int
Destination positions for each of the original axes. These must also be unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
Notes
-----
If one of (source, destination) is an integer, then the other must be an integer, too.
See Also
--------
`np.moveaxis <https://numpy.org/doc/stable/reference/generated/numpy.moveaxis.html>`_
"""
@register_jitable
def impl_array(a: np.ndarray, source, destination):
source_work = np.atleast_1d(np.asarray(source))
destination_work = np.atleast_1d(np.asarray(destination))
if source_work.size != destination_work.size:
raise ValueError(
"`source` and `destination` arguments must have "
"the same number of elements"
)
for idx in range(source_work.size):
if abs(source_work[idx]) > a.ndim:
raise ValueError("Invalid axis in `source`.")
if abs(destination_work[idx]) > a.ndim:
raise ValueError("Invalid axis in `destination`.")
source_work = [x % a.ndim for x in source_work]
destination_work = [x % a.ndim for x in destination_work]
order = [n for n in range(a.ndim) if n not in source_work]
for dest, src in sorted(zip(destination_work, source_work)):
order.insert(dest, src)
oder_tuple = to_fixed_tuple(np.array(order), a.ndim)
return np.transpose(a, oder_tuple)
@register_jitable
def impl_int(a: np.ndarray, source, destination):
if abs(source) > a.ndim:
raise ValueError("Invalid axis in `source`.")
if abs(destination) > a.ndim:
raise ValueError("Invalid axis in `destination`.")
source = source % a.ndim
destination = destination % a.ndim
order = [n for n in range(a.ndim) if n != source]
order.insert(destination, source)
oder_tuple = to_fixed_tuple(np.array(order), a.ndim)
return np.transpose(a, oder_tuple)
if isinstance(source, types.Integer) and isinstance(destination, types.Integer):
return impl_int
else:
return impl_array
@overload(np.putmask)
def putmask(a: np.ndarray, mask: ArrayLike, values: ArrayLike) -> None:
"""Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
np.place, np.put, np.take, np.copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
def impl(a: np.ndarray, mask: ArrayLike, values: ArrayLike) -> None:
mask = np.asarray(mask)
values = np.atleast_1d(np.asarray(values))
for idx in range(a.size):
if mask.flat[idx]:
a.flat[idx] = values.flat[idx % len(values)]
return impl | 0.857097 | 0.762468 |
from abc import ABC, abstractmethod
import numpy as np
from numpy.typing import ArrayLike
from .affine import Rotation, Translation
from .utils3d import RotvecRotation
class Joint(ABC):
"""Abstract Joint.
This class is used to define the joint interface and to create a common base
class that joints can inherit from.
"""
@property
@abstractmethod
def param(self):
"""Unified name for the parameter controlling this joint"""
raise NotImplementedError()
@param.setter
@abstractmethod
def param(self, value: ArrayLike):
"""Joints must be modifiable."""
raise NotImplementedError()
@property
@abstractmethod
def upper_limit(self):
"""Maximum parameter value.
Notes
-----
This can be ``np.inf`` if there is no upper limit.
"""
raise NotImplementedError()
@property
@abstractmethod
def lower_limit(self):
"""Minimum parameter value.
Notes
-----
This can be ``-np.inf`` if there is no lower limit.
"""
raise NotImplementedError()
class RotationalJoint(RotvecRotation, Joint):
"""Rotation with constraints in 3D.
Parameters
----------
rotvec : ArrayLike
The vector around which points are rotated.
angle : ArrayLike
The magnitude of the rotation. If None, the length of ``vector`` will be
used.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
upper_limit : ArrayLike
The maximum joint angle. Default: 2pi.
lower_limit : ArrayLike
The minimum joint angle. Default: 0.
Notes
-----
Batch dimensions of ``rotvec`` and ``angle`` must be broadcastable.
Setting ``RotationalJoint.angle`` will check if the joint angle limits are
respected; however, setting ``RotationalJoint.param`` will not.
"""
def __init__(
self,
rotvec: ArrayLike,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1,
upper_limit: ArrayLike = 2 * np.pi,
lower_limit: ArrayLike = 0,
) -> None:
self._upper_limit = upper_limit
self._lower_limit = lower_limit
super().__init__(rotvec, angle=angle, degrees=degrees, axis=axis)
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""Magnitude of the rotation (in radians)."""
return self._angle
@param.setter
def param(self, value: ArrayLike) -> None:
self._angle = value
self._v = np.cos(value / 2) * self._u - np.sin(value / 2) * self._u_ortho
@RotvecRotation.angle.setter
def angle(self, angle: ArrayLike) -> None:
angle = np.asarray(angle)
if np.any(angle < self.lower_limit) or np.any(angle > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = angle
class AngleJoint(Rotation, Joint):
"""Rotation with constraints in 2D.
Parameters
----------
angle : ArrayLike
The magnitude of the rotation. If None, it will be set to ``lower_limit``.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
upper_limit : ArrayLike
The maximum joint angle. Default: 2pi.
lower_limit : ArrayLike
The minimum joint angle. Default: 0.
Notes
-----
Setting ``RotationalJoint.angle`` will check if the joint angle limits are
respected; however, setting ``RotationalJoint.param`` will not.
"""
def __init__(
self,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1,
upper_limit: ArrayLike = 2 * np.pi,
lower_limit: ArrayLike = 0,
) -> None:
super().__init__((1, 0), (0, 1), axis=axis)
angle = angle or lower_limit
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
self.param = angle
self._upper_limit = upper_limit
self._lower_limit = lower_limit
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""Magnitude of the rotation (in radians)."""
return self._angle
@param.setter
def param(self, value: ArrayLike) -> None:
self._angle = value
self._v = np.cos(value / 2) * self._u - np.sin(value / 2) * self._u_ortho
@Rotation.angle.setter
def angle(self, angle: ArrayLike) -> None:
angle = np.asarray(angle)
if np.any(angle < self.lower_limit) or np.any(angle > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = angle
class PrismaticJoint(Translation, Joint):
"""Translation with constraints in N-D.
Parameters
----------
direction : ArrayLike
A vector describing the translation.
amount : ArrayLike
A scalar indicating by how much to scale ``direction``. Default is 1.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
upper_limit : ArrayLike
The maximum value of amount. Default: 1.
lower_limit : ArrayLike
The minimum value of amount. Default: 0.
Notes
-----
Setting ``PrismaticJoint.amount`` will enforce joint limits; however,
setting ``PrismaticJoint.param`` will not.
"""
def __init__(
self,
direction: ArrayLike,
*,
upper_limit: ArrayLike = 1,
lower_limit: ArrayLike = 0,
amount: ArrayLike = 1,
axis: int = -1,
) -> None:
self._upper_limit = upper_limit
self._lower_limit = lower_limit
super().__init__(direction, amount=amount, axis=axis)
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""The amount by which to scale the direction vector."""
return self._amount
@param.setter
def param(self, value: ArrayLike) -> None:
self._amount = np.asarray(value)
@Translation.amount.setter
def amount(self, amount: ArrayLike) -> None:
amount = np.asarray(amount)
if np.any(amount < self.lower_limit) or np.any(amount > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = amount | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/joints.py | joints.py | from abc import ABC, abstractmethod
import numpy as np
from numpy.typing import ArrayLike
from .affine import Rotation, Translation
from .utils3d import RotvecRotation
class Joint(ABC):
"""Abstract Joint.
This class is used to define the joint interface and to create a common base
class that joints can inherit from.
"""
@property
@abstractmethod
def param(self):
"""Unified name for the parameter controlling this joint"""
raise NotImplementedError()
@param.setter
@abstractmethod
def param(self, value: ArrayLike):
"""Joints must be modifiable."""
raise NotImplementedError()
@property
@abstractmethod
def upper_limit(self):
"""Maximum parameter value.
Notes
-----
This can be ``np.inf`` if there is no upper limit.
"""
raise NotImplementedError()
@property
@abstractmethod
def lower_limit(self):
"""Minimum parameter value.
Notes
-----
This can be ``-np.inf`` if there is no lower limit.
"""
raise NotImplementedError()
class RotationalJoint(RotvecRotation, Joint):
"""Rotation with constraints in 3D.
Parameters
----------
rotvec : ArrayLike
The vector around which points are rotated.
angle : ArrayLike
The magnitude of the rotation. If None, the length of ``vector`` will be
used.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
upper_limit : ArrayLike
The maximum joint angle. Default: 2pi.
lower_limit : ArrayLike
The minimum joint angle. Default: 0.
Notes
-----
Batch dimensions of ``rotvec`` and ``angle`` must be broadcastable.
Setting ``RotationalJoint.angle`` will check if the joint angle limits are
respected; however, setting ``RotationalJoint.param`` will not.
"""
def __init__(
self,
rotvec: ArrayLike,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1,
upper_limit: ArrayLike = 2 * np.pi,
lower_limit: ArrayLike = 0,
) -> None:
self._upper_limit = upper_limit
self._lower_limit = lower_limit
super().__init__(rotvec, angle=angle, degrees=degrees, axis=axis)
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""Magnitude of the rotation (in radians)."""
return self._angle
@param.setter
def param(self, value: ArrayLike) -> None:
self._angle = value
self._v = np.cos(value / 2) * self._u - np.sin(value / 2) * self._u_ortho
@RotvecRotation.angle.setter
def angle(self, angle: ArrayLike) -> None:
angle = np.asarray(angle)
if np.any(angle < self.lower_limit) or np.any(angle > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = angle
class AngleJoint(Rotation, Joint):
"""Rotation with constraints in 2D.
Parameters
----------
angle : ArrayLike
The magnitude of the rotation. If None, it will be set to ``lower_limit``.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
upper_limit : ArrayLike
The maximum joint angle. Default: 2pi.
lower_limit : ArrayLike
The minimum joint angle. Default: 0.
Notes
-----
Setting ``RotationalJoint.angle`` will check if the joint angle limits are
respected; however, setting ``RotationalJoint.param`` will not.
"""
def __init__(
self,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1,
upper_limit: ArrayLike = 2 * np.pi,
lower_limit: ArrayLike = 0,
) -> None:
super().__init__((1, 0), (0, 1), axis=axis)
angle = angle or lower_limit
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
self.param = angle
self._upper_limit = upper_limit
self._lower_limit = lower_limit
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""Magnitude of the rotation (in radians)."""
return self._angle
@param.setter
def param(self, value: ArrayLike) -> None:
self._angle = value
self._v = np.cos(value / 2) * self._u - np.sin(value / 2) * self._u_ortho
@Rotation.angle.setter
def angle(self, angle: ArrayLike) -> None:
angle = np.asarray(angle)
if np.any(angle < self.lower_limit) or np.any(angle > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = angle
class PrismaticJoint(Translation, Joint):
"""Translation with constraints in N-D.
Parameters
----------
direction : ArrayLike
A vector describing the translation.
amount : ArrayLike
A scalar indicating by how much to scale ``direction``. Default is 1.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
upper_limit : ArrayLike
The maximum value of amount. Default: 1.
lower_limit : ArrayLike
The minimum value of amount. Default: 0.
Notes
-----
Setting ``PrismaticJoint.amount`` will enforce joint limits; however,
setting ``PrismaticJoint.param`` will not.
"""
def __init__(
self,
direction: ArrayLike,
*,
upper_limit: ArrayLike = 1,
lower_limit: ArrayLike = 0,
amount: ArrayLike = 1,
axis: int = -1,
) -> None:
self._upper_limit = upper_limit
self._lower_limit = lower_limit
super().__init__(direction, amount=amount, axis=axis)
@property
def upper_limit(self):
return self._upper_limit
@property
def lower_limit(self):
return self._lower_limit
@property
def param(self) -> float:
"""The amount by which to scale the direction vector."""
return self._amount
@param.setter
def param(self, value: ArrayLike) -> None:
self._amount = np.asarray(value)
@Translation.amount.setter
def amount(self, amount: ArrayLike) -> None:
amount = np.asarray(amount)
if np.any(amount < self.lower_limit) or np.any(amount > self.upper_limit):
raise ValueError("An angle exceeds the joint's limit.")
self.param = amount | 0.970785 | 0.614307 |
from numpy.typing import ArrayLike
from typing import List, Tuple, Union, Callable
import numpy as np
from dataclasses import dataclass, field
from queue import PriorityQueue
import warnings
def DepthFirst(frames: Tuple["Frame"], links: Tuple["Link"]) -> float:
"""Depth-first search metric for Frame.chain_between"""
return -len(links)
def BreadthFirst(frames: Tuple["Frame"], links: Tuple["Link"]) -> float:
"""Beadth-first search metric for Frame.chain_between"""
return -1 / len(frames)
@dataclass(order=True)
class QueueItem:
priority: float
frames: List["Frame"] = field(compare=False)
links: List["Link"] = field(compare=False)
class FramePath(str):
def matches(self, frames: Tuple["Frame"]) -> bool:
return frames[-1].name == self
class Link:
"""A directional relationship between two Frames
An abstract class that describes a transformation from a parent frame into a
child frame. Its default use is to express a vector given in the parent
frame using the child frame.
Attributes
----------
transformation : np.ndarray
A affine matrix describing the transformation from the parent frame to
the child frame.
Notes
-----
:attr:`Link.transformation` may raise a ``NotImplementedError`` if the link
doesn't support affine transformation matrices, or if the matrix doesn't exist.
"""
def __init__(self, parent_dim: int, child_dim: int) -> None:
self.parent_dim: int = parent_dim
self.child_dim: int = child_dim
def __call__(
self, parent: "Frame", child: "Frame" = None, *, add_inverse: bool = True
) -> "Frame":
"""Add this link to the parent frame.
Parameters
----------
parent : Frame
The Frame from which vectors originate.
child : Frame
The Frame in which vectors are expressed after they were mapped by
this link's transform. If None, a new child will be created.
add_inverse : bool
Also add the inverse link to the child if this Link is invertible.
Defaults to ``True``.
Returns
-------
child : Frame
The Frame in which vectors are expressed after they were mapped
by this link's transform.
"""
if child is None:
child = Frame(self.child_dim)
parent.add_link(self, child)
if add_inverse:
try:
child.add_link(self.invert(), parent)
except ValueError:
# inverse does not exist
pass
return child
def invert(self) -> "Frame":
"""Returns a new link that is the inverse of this link.
The links share parameters, i.e., if the transform of a link changes,
the transform of its inverse does also.
"""
return InvertLink(self)
def transform(self, x: ArrayLike) -> np.ndarray:
"""Expresses the vector x (assumed to be given in the parent's frame) in
the child's frame.
Parameters
----------
x : ArrayLike
The vector expressed in the parent's frame
Returns
-------
y : ArrayLike
The vector expressed in the child's frame
"""
raise NotImplementedError
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in the child frame) into the parent frame.
Parameters
----------
x : ArrayLike
The vector expressed in the childs's frame
Returns
-------
y : ArrayLike
The vector expressed in the parents's frame
"""
raise NotImplementedError
class InvertLink(Link):
"""Inverse of an existing Link.
This class can be constructed from any link that implements
__inverse_transform__. It is a tight wrapper around the original link and
shares any parameters. Accordingly, if the original link updates, so will
this link.
Parameters
----------
link : Link
The link to be inverted. It must implement __inverse_transform__.
"""
def __init__(self, link: Link) -> None:
if link.__inverse_transform__.__func__ is Link.__inverse_transform__:
raise ValueError("Link doesn't implement __inverse_transform__.") from None
super().__init__(link.child_dim, link.parent_dim)
self._forward_link = link
def transform(self, x: ArrayLike) -> np.ndarray:
return self._forward_link.__inverse_transform__(x)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
return self._forward_link.transform(x)
def __getattr__(self, attr):
return getattr(self._forward_link, attr)
class Frame:
"""Representation of a coordinate system.
Each coordinate frame is a node in a directed graph where edges
(:class:`skbot.transform.Link`) describe transformations between frames. This
transformation is not limited to neighbours, but works between any two
frames that share a chain of links pointing from a parent to the
(grand-)child.
Parameters
----------
ndim : int
Number of coordinate dimensions.
name : str
The name of this coordinate frame. Defaults to ``None``.
"""
def __init__(self, ndim: int, *, name: str = None) -> None:
self._children: List[Tuple(Frame, Link)] = list()
self.ndim: int = ndim
self.name = name
def transform(
self,
x: ArrayLike,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
) -> np.ndarray:
"""Express the vector x in to_frame.
Express the vector x - assumed to be in this frame - in the coordinate
frame to_frame. If it is not possible to find a transformation between
this frame and to_frame a RuntimeError will be raised.
Parameters
----------
x : ArrayLike
A vector expressed in this frame.
to_frame : Frame, str
The frame in which x should be expressed. If it is a string,
:func:`~transform` will search for a child frame with the given
string as name. In case of duplicate names in a frame graph, the
first one found is used.
ignore_frames : Frame
Any frames that should be ignored when searching for a suitable
transformation chain. Note that this currently does not support
string aliases.
Returns
-------
x_new : np.ndarray
The vector x expressed to_frame.
Raises
------
RuntimeError
If no suitable chain of transformations can be found, a RuntimeError is raised.
"""
x_new = np.asarray(x)
for link in self.links_between(to_frame, ignore_frames=ignore_frames):
x_new = link.transform(x_new)
return x_new
def get_affine_matrix(
self, to_frame: Union["Frame", str], *, ignore_frames: List["Frame"] = None
) -> np.ndarray:
"""Affine transformation matrix to ``to_frame`` (if existant).
Parameters
----------
to_frame : Frame
The frame in which x should be expressed.
ignore_frames : Frame
Any frames that should be ignored when searching for a suitable
transformation chain.
Returns
-------
tf_matrix : np.ndarray
The matrix describing the transformation.
Raises
------
RuntimeError
If no suitable chain of transformations can be found, a RuntimeError
is raised.
Notes
-----
The affine transformation matrix between two frames only exists if the
transformation chain is linear in ``self.ndim+1`` dimensions. Requesting
a non-existing affine matrix will raise an Exception. In practice, this
means that each link along the transformation chain needs to implement
:attr:`skbot.transform.Link.transformation`.
"""
tf_matrix = np.eye(self.ndim + 1)
for link in self.links_between(to_frame, ignore_frames=ignore_frames):
tf_matrix = link.affine_matrix @ tf_matrix
return tf_matrix
def add_link(self, edge: Link, child: "Frame") -> None:
"""Add an edge to the frame graph.
The edge is directional and points from this frame to another (possibliy
identical) frame.
Parameters
----------
edge : Link
The transformation to add to the graph.
child : Frame
The frame that following this link leads to.
"""
self._children.append((child, edge))
def _enqueue_children(
self,
queue: PriorityQueue,
queue_item: QueueItem,
*,
visited: List["Frame"],
metric: Callable[[Tuple["Frame"], Tuple[Link]], float],
max_depth: int,
) -> None:
"""Internal logic for :func:chain_between.
Appends all children that have not been visited previously to
the search queue.
"""
frames = queue_item.frames
sub_chain = queue_item.links
if len(sub_chain) == max_depth:
return
for child, link in self._children:
if child in visited:
continue
new_frames = (*frames, child)
new_chain = (*sub_chain, link)
priority = metric(new_frames, new_chain)
new_item = QueueItem(priority, new_frames, new_chain)
queue.put(new_item)
def chain_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
) -> Tuple[List["Frame"], List[Link]]:
"""Get the frames and links between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns both, the sequence
of frames and the sequence of links involved in this transformation.
The sequence of frames will have this frame as its first element,
and ```to_frame`` as its last element.
The first element of the returned sequence of links takes as input the
vector expressed in the current frame; each following element takes as
input the vector expressed in its predecessor's output frame. The last
element outputs the vector expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
Returns
-------
frames : List[Frame]
A list of frames between this frame and ``to_frame`` (inclusive).
links : List[Link]
A list of links between this frame and ``to_frame``.
"""
if max_depth is None:
max_depth = float("inf")
if ignore_frames is None:
ignore_frames = list()
if isinstance(to_frame, str):
to_frame = FramePath(to_frame)
frames = (self,)
sub_chain = tuple()
queue = PriorityQueue()
root_el = QueueItem(metric(frames, sub_chain), frames, sub_chain)
queue.put(root_el)
while not queue.empty():
item: QueueItem = queue.get()
frames = item.frames
sub_chain = item.links
active_frame = frames[-1]
if isinstance(to_frame, FramePath):
is_match = to_frame.matches(frames)
else:
is_match = active_frame == to_frame
if is_match:
break
ignore_frames.append(active_frame)
active_frame._enqueue_children(
queue, item, visited=ignore_frames, metric=metric, max_depth=max_depth
)
else:
raise RuntimeError(
"Did not find a transformation chain to the target frame."
)
return list(frames), sub_chain
def transform_chain(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
) -> List[Link]:
"""Get a transformation chain into ``to_frame`` (deprecated).
.. deprecated:: 0.9.0
This method is deprecated and will be removed in scikit-bot v1.0.
Use ``Frame.links_between`` instead.
.. versionadded:: 0.5.0
This method was added to Frame.
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
links involved in this transformation. The first element of the returned
sequence takes as input the vector expressed in the current frame; each
following element takes as input the vector expressed in its
predecessor's output frame. The last element outputs the vector
expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
warnings.warn(
"`Frame.transform_chain` is deprecated."
" Use `Frame.links_between` instead.",
DeprecationWarning,
)
_, links = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
return links
def links_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the links between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
links involved in this transformation. The first element of the returned
sequence takes as input the vector expressed in the current frame; each
following element takes as input the vector expressed in its
predecessor's output frame. The last element outputs the vector
expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
_, links = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
return links
def frames_between(
self,
to_frame: Union["Frame", str],
*,
include_self: bool = True,
include_to_frame: bool = True,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the frames between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
frames between this frame and ``to_frame``.
Parameters
----------
include_self : bool
If ``True`` (default) this frame will be added as the first
element of the returned sequence.
include_to_frame : bool
If ``True`` (default) ``to_frame`` will be added as the last
element of the returned sequence.
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
frames, _ = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
if not include_self:
frames.pop(0)
if not include_to_frame:
frames.pop(-1)
return frames
def joints_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the links between this frame and ``to_frame``.
.. versionadded:: 0.12.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
joints involved in this transformation. All joints will be "unwraped",
i.e., if a Joint occurs inside a :class:`tf:InvertLink` or similar, then
only the Joint will be returned. The resulting order matches the order
that links are encountered in the transformation chain.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
# avoid circular import
from .simplfy import simplify_links
from .joints import Joint
links = self.links_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
# unwrap and summarize what we can
links = simplify_links(links, keep_joints=True)
# unwrap inverse links
links = [
link._forward_link if isinstance(link, InvertLink) else link
for link in links
]
joints = [link for link in links if isinstance(link, Joint)]
return joints
def find_frame(self, path: str, *, ignore_frames: List["Frame"] = None) -> "Frame":
"""Find a frame matching a given path.
.. versionadded:: 0.3.0
This method was added to frame.
This method allows you to find reachable frames using an xpath inspired
syntax. Path elements are spearated using the `/` character. Each
element of the path is the name of a frame. For example,
``world/link1/link2/gripper``, denotes a sequence of 4 frames with names
``["world", "link1", "link2", "gripper"]``. The final frame in the path
(gripper) is returned.
By default an element along the path is directly connected to its next
element. In the previous example this means that there must exist a
direct link from "world" to "link1". An exception to this rule is the
use of an ellipsis (...), in which case an element must be connected to
its next element by a transformation chain (a sequence of
links).
The following path elements have special meanings:
- Ellipsis (``...``)
Indicates that the previous frame and the next frame are
connected by a transformation chain instead of being connected
directly.
- None (``//``)
Omitting a name indicates that the name of this frame is None.
Parameters
----------
xpath : str
A xpath string describing the frame to search for.
ignore_frames : List[Frame]
Any frames that should be ignored when matching the path.
Returns
-------
matched_frame : Frame
A frame matching the given path.
Notes
-----
In directed graphs there is no clear notion of search order; hence it is
undefined which frame is found if multiple matches for the path exist.
In this case an arbitrary match is returned, and you should not count on
the result to be deterministic.
Because ``...`` and ``//`` have special meaning, frames with names
``"..."`` or ``""`` will be ignored by this method and can not be found.
Similarly, frames that use slashes, e.g. ``namespace/my_frame``, will be
ignored and instead the sequences ``["namespace", "my_frame"]`` will be
matched.
Each element of the path is assumed to represent a unique frame. This
means that circular paths will not be matched.
"""
parts = path.split("/")
part = parts.pop(0)
indirect = False
while len(parts) > 0 and part == "...":
indirect = True
part = parts.pop(0)
if part == "...":
raise ValueError(f"Path ends with ellipsis: {path}")
part = None if part == "" else part
if len(parts) == 0 and self.name == part:
return self
elif not indirect and self.name != part:
raise RuntimeError(f"No match for {path}.")
elif len(parts) > 0 and self.name == part:
sub_path = "/".join(parts)
else:
parts = ["...", part] + parts
sub_path = "/".join(parts)
if ignore_frames is None:
ignore_frames = []
local_ignore = [self]
local_ignore.extend(ignore_frames)
child_frame: Frame
for child_frame, _ in self._children:
if child_frame in local_ignore:
continue
try:
return child_frame.find_frame(sub_path, ignore_frames=local_ignore)
except RuntimeError:
continue
else:
raise RuntimeError(f"No match for {path}.")
class CustomLink(Link):
"""A link representing a custom transformation.
Initialize a new link from the callable ``transformation`` that transforms a
vector from the parent frame to the child frame. This link can represent
arbitrary transformations between two frames.
Parameters
----------
parent : Frame
The frame in which vectors are specified.
child : Frame
The frame into which this link transforms vectors.
transfomration : Callable[[ArrayLike], np.ndarray]
A callable that takes a vector - in the parent frame - as input and
returns the vector in the child frame.
Notes
-----
This function does not implement :func:`Link.__inverse_transform__`.
"""
def __init__(
self,
parent_dim: int,
child_dim: int,
transformation: Callable[[ArrayLike], np.ndarray],
) -> None:
"""Initialize a new custom link."""
super().__init__(parent_dim, child_dim)
self._transform = transformation
def transform(self, x: ArrayLike) -> np.ndarray:
return self._transform(x)
class CompundLink(Link):
"""A link representing a sequence of other links
.. versionadded:: 0.4.0
This link allows you to build a complex link from a sequence of simpler
links. Upon ``transform`` each link transforms the result of its predecessor
and the result of the last link is returned. Similarly, when
__inverse_transform__ is called, each link inverse transformes its
successor (inverse order).
Parameters
----------
wrapped_links : List[Link]
A sequence of link objects. The order is first-to-last, i.e.,
``wrapped_links[0]`` is applied first and ``wrapped_links[-1]`` is
applied last.
Notes
-----
This link will only be invertible if all wrapped links implement
__inverse_transform__.
"""
def __init__(self, wrapped_links: List[Link]):
super().__init__(wrapped_links[0].parent_dim, wrapped_links[-1].child_dim)
self._links = wrapped_links
def transform(self, x: ArrayLike) -> np.ndarray:
for link in self._links:
x = link.transform(x)
return x
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
for link in reversed(self._links):
x = link.__inverse_transform__(x)
return x | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/base.py | base.py | from numpy.typing import ArrayLike
from typing import List, Tuple, Union, Callable
import numpy as np
from dataclasses import dataclass, field
from queue import PriorityQueue
import warnings
def DepthFirst(frames: Tuple["Frame"], links: Tuple["Link"]) -> float:
"""Depth-first search metric for Frame.chain_between"""
return -len(links)
def BreadthFirst(frames: Tuple["Frame"], links: Tuple["Link"]) -> float:
"""Beadth-first search metric for Frame.chain_between"""
return -1 / len(frames)
@dataclass(order=True)
class QueueItem:
priority: float
frames: List["Frame"] = field(compare=False)
links: List["Link"] = field(compare=False)
class FramePath(str):
def matches(self, frames: Tuple["Frame"]) -> bool:
return frames[-1].name == self
class Link:
"""A directional relationship between two Frames
An abstract class that describes a transformation from a parent frame into a
child frame. Its default use is to express a vector given in the parent
frame using the child frame.
Attributes
----------
transformation : np.ndarray
A affine matrix describing the transformation from the parent frame to
the child frame.
Notes
-----
:attr:`Link.transformation` may raise a ``NotImplementedError`` if the link
doesn't support affine transformation matrices, or if the matrix doesn't exist.
"""
def __init__(self, parent_dim: int, child_dim: int) -> None:
self.parent_dim: int = parent_dim
self.child_dim: int = child_dim
def __call__(
self, parent: "Frame", child: "Frame" = None, *, add_inverse: bool = True
) -> "Frame":
"""Add this link to the parent frame.
Parameters
----------
parent : Frame
The Frame from which vectors originate.
child : Frame
The Frame in which vectors are expressed after they were mapped by
this link's transform. If None, a new child will be created.
add_inverse : bool
Also add the inverse link to the child if this Link is invertible.
Defaults to ``True``.
Returns
-------
child : Frame
The Frame in which vectors are expressed after they were mapped
by this link's transform.
"""
if child is None:
child = Frame(self.child_dim)
parent.add_link(self, child)
if add_inverse:
try:
child.add_link(self.invert(), parent)
except ValueError:
# inverse does not exist
pass
return child
def invert(self) -> "Frame":
"""Returns a new link that is the inverse of this link.
The links share parameters, i.e., if the transform of a link changes,
the transform of its inverse does also.
"""
return InvertLink(self)
def transform(self, x: ArrayLike) -> np.ndarray:
"""Expresses the vector x (assumed to be given in the parent's frame) in
the child's frame.
Parameters
----------
x : ArrayLike
The vector expressed in the parent's frame
Returns
-------
y : ArrayLike
The vector expressed in the child's frame
"""
raise NotImplementedError
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in the child frame) into the parent frame.
Parameters
----------
x : ArrayLike
The vector expressed in the childs's frame
Returns
-------
y : ArrayLike
The vector expressed in the parents's frame
"""
raise NotImplementedError
class InvertLink(Link):
"""Inverse of an existing Link.
This class can be constructed from any link that implements
__inverse_transform__. It is a tight wrapper around the original link and
shares any parameters. Accordingly, if the original link updates, so will
this link.
Parameters
----------
link : Link
The link to be inverted. It must implement __inverse_transform__.
"""
def __init__(self, link: Link) -> None:
if link.__inverse_transform__.__func__ is Link.__inverse_transform__:
raise ValueError("Link doesn't implement __inverse_transform__.") from None
super().__init__(link.child_dim, link.parent_dim)
self._forward_link = link
def transform(self, x: ArrayLike) -> np.ndarray:
return self._forward_link.__inverse_transform__(x)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
return self._forward_link.transform(x)
def __getattr__(self, attr):
return getattr(self._forward_link, attr)
class Frame:
"""Representation of a coordinate system.
Each coordinate frame is a node in a directed graph where edges
(:class:`skbot.transform.Link`) describe transformations between frames. This
transformation is not limited to neighbours, but works between any two
frames that share a chain of links pointing from a parent to the
(grand-)child.
Parameters
----------
ndim : int
Number of coordinate dimensions.
name : str
The name of this coordinate frame. Defaults to ``None``.
"""
def __init__(self, ndim: int, *, name: str = None) -> None:
self._children: List[Tuple(Frame, Link)] = list()
self.ndim: int = ndim
self.name = name
def transform(
self,
x: ArrayLike,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
) -> np.ndarray:
"""Express the vector x in to_frame.
Express the vector x - assumed to be in this frame - in the coordinate
frame to_frame. If it is not possible to find a transformation between
this frame and to_frame a RuntimeError will be raised.
Parameters
----------
x : ArrayLike
A vector expressed in this frame.
to_frame : Frame, str
The frame in which x should be expressed. If it is a string,
:func:`~transform` will search for a child frame with the given
string as name. In case of duplicate names in a frame graph, the
first one found is used.
ignore_frames : Frame
Any frames that should be ignored when searching for a suitable
transformation chain. Note that this currently does not support
string aliases.
Returns
-------
x_new : np.ndarray
The vector x expressed to_frame.
Raises
------
RuntimeError
If no suitable chain of transformations can be found, a RuntimeError is raised.
"""
x_new = np.asarray(x)
for link in self.links_between(to_frame, ignore_frames=ignore_frames):
x_new = link.transform(x_new)
return x_new
def get_affine_matrix(
self, to_frame: Union["Frame", str], *, ignore_frames: List["Frame"] = None
) -> np.ndarray:
"""Affine transformation matrix to ``to_frame`` (if existant).
Parameters
----------
to_frame : Frame
The frame in which x should be expressed.
ignore_frames : Frame
Any frames that should be ignored when searching for a suitable
transformation chain.
Returns
-------
tf_matrix : np.ndarray
The matrix describing the transformation.
Raises
------
RuntimeError
If no suitable chain of transformations can be found, a RuntimeError
is raised.
Notes
-----
The affine transformation matrix between two frames only exists if the
transformation chain is linear in ``self.ndim+1`` dimensions. Requesting
a non-existing affine matrix will raise an Exception. In practice, this
means that each link along the transformation chain needs to implement
:attr:`skbot.transform.Link.transformation`.
"""
tf_matrix = np.eye(self.ndim + 1)
for link in self.links_between(to_frame, ignore_frames=ignore_frames):
tf_matrix = link.affine_matrix @ tf_matrix
return tf_matrix
def add_link(self, edge: Link, child: "Frame") -> None:
"""Add an edge to the frame graph.
The edge is directional and points from this frame to another (possibliy
identical) frame.
Parameters
----------
edge : Link
The transformation to add to the graph.
child : Frame
The frame that following this link leads to.
"""
self._children.append((child, edge))
def _enqueue_children(
self,
queue: PriorityQueue,
queue_item: QueueItem,
*,
visited: List["Frame"],
metric: Callable[[Tuple["Frame"], Tuple[Link]], float],
max_depth: int,
) -> None:
"""Internal logic for :func:chain_between.
Appends all children that have not been visited previously to
the search queue.
"""
frames = queue_item.frames
sub_chain = queue_item.links
if len(sub_chain) == max_depth:
return
for child, link in self._children:
if child in visited:
continue
new_frames = (*frames, child)
new_chain = (*sub_chain, link)
priority = metric(new_frames, new_chain)
new_item = QueueItem(priority, new_frames, new_chain)
queue.put(new_item)
def chain_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
) -> Tuple[List["Frame"], List[Link]]:
"""Get the frames and links between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns both, the sequence
of frames and the sequence of links involved in this transformation.
The sequence of frames will have this frame as its first element,
and ```to_frame`` as its last element.
The first element of the returned sequence of links takes as input the
vector expressed in the current frame; each following element takes as
input the vector expressed in its predecessor's output frame. The last
element outputs the vector expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
Returns
-------
frames : List[Frame]
A list of frames between this frame and ``to_frame`` (inclusive).
links : List[Link]
A list of links between this frame and ``to_frame``.
"""
if max_depth is None:
max_depth = float("inf")
if ignore_frames is None:
ignore_frames = list()
if isinstance(to_frame, str):
to_frame = FramePath(to_frame)
frames = (self,)
sub_chain = tuple()
queue = PriorityQueue()
root_el = QueueItem(metric(frames, sub_chain), frames, sub_chain)
queue.put(root_el)
while not queue.empty():
item: QueueItem = queue.get()
frames = item.frames
sub_chain = item.links
active_frame = frames[-1]
if isinstance(to_frame, FramePath):
is_match = to_frame.matches(frames)
else:
is_match = active_frame == to_frame
if is_match:
break
ignore_frames.append(active_frame)
active_frame._enqueue_children(
queue, item, visited=ignore_frames, metric=metric, max_depth=max_depth
)
else:
raise RuntimeError(
"Did not find a transformation chain to the target frame."
)
return list(frames), sub_chain
def transform_chain(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
) -> List[Link]:
"""Get a transformation chain into ``to_frame`` (deprecated).
.. deprecated:: 0.9.0
This method is deprecated and will be removed in scikit-bot v1.0.
Use ``Frame.links_between`` instead.
.. versionadded:: 0.5.0
This method was added to Frame.
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
links involved in this transformation. The first element of the returned
sequence takes as input the vector expressed in the current frame; each
following element takes as input the vector expressed in its
predecessor's output frame. The last element outputs the vector
expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
warnings.warn(
"`Frame.transform_chain` is deprecated."
" Use `Frame.links_between` instead.",
DeprecationWarning,
)
_, links = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
return links
def links_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the links between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
links involved in this transformation. The first element of the returned
sequence takes as input the vector expressed in the current frame; each
following element takes as input the vector expressed in its
predecessor's output frame. The last element outputs the vector
expressed in ``to_frame``.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
_, links = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
return links
def frames_between(
self,
to_frame: Union["Frame", str],
*,
include_self: bool = True,
include_to_frame: bool = True,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the frames between this frame and ``to_frame``.
.. versionadded:: 0.9.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
frames between this frame and ``to_frame``.
Parameters
----------
include_self : bool
If ``True`` (default) this frame will be added as the first
element of the returned sequence.
include_to_frame : bool
If ``True`` (default) ``to_frame`` will be added as the last
element of the returned sequence.
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
frames, _ = self.chain_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
if not include_self:
frames.pop(0)
if not include_to_frame:
frames.pop(-1)
return frames
def joints_between(
self,
to_frame: Union["Frame", str],
*,
ignore_frames: List["Frame"] = None,
metric: Callable[[Tuple["Frame"], Tuple[Link]], float] = DepthFirst,
max_depth: int = None,
):
"""Get the links between this frame and ``to_frame``.
.. versionadded:: 0.12.0
This function searches the frame graph for a chain of transformations
from the current frame into ``to_frame`` and returns the sequence of
joints involved in this transformation. All joints will be "unwraped",
i.e., if a Joint occurs inside a :class:`tf:InvertLink` or similar, then
only the Joint will be returned. The resulting order matches the order
that links are encountered in the transformation chain.
Parameters
----------
to_frame : Frame, str
The frame in which to express vectors. If ``str``, the graph is
searched for any node matching that name and the transformation
chain to the first node matching the name is returned.
ignore_frames : List[Frame]
A list of frames to exclude while searching for a transformation
chain.
metric : Callable[[Tuple[Frame], Tuple[Link]], float]
A function to compute the priority of a sub-chain. Sub-chains are
searched in order of priority starting with the lowest value. The
first chain that matches ``to_frame`` is returned. You can use a
custom function that takes a sequence of frames and links involved
in the sub-chain as input and returns a float (signature
``custom_fn(frames, links) -> float``), or you can use a pre-build
function:
skbot.transform.metric.DepthFirst
The frame graph is searched depth-first with no
preference among frames (default).
skbot.transform.metric.BreadthFirst
The frame graph is searched breadth-first with no
preference among frames.
max_depth : int
If not None, the maximum depth to search, i.e., the maximum length
of the transform chain.
"""
# avoid circular import
from .simplfy import simplify_links
from .joints import Joint
links = self.links_between(
to_frame, ignore_frames=ignore_frames, metric=metric, max_depth=max_depth
)
# unwrap and summarize what we can
links = simplify_links(links, keep_joints=True)
# unwrap inverse links
links = [
link._forward_link if isinstance(link, InvertLink) else link
for link in links
]
joints = [link for link in links if isinstance(link, Joint)]
return joints
def find_frame(self, path: str, *, ignore_frames: List["Frame"] = None) -> "Frame":
"""Find a frame matching a given path.
.. versionadded:: 0.3.0
This method was added to frame.
This method allows you to find reachable frames using an xpath inspired
syntax. Path elements are spearated using the `/` character. Each
element of the path is the name of a frame. For example,
``world/link1/link2/gripper``, denotes a sequence of 4 frames with names
``["world", "link1", "link2", "gripper"]``. The final frame in the path
(gripper) is returned.
By default an element along the path is directly connected to its next
element. In the previous example this means that there must exist a
direct link from "world" to "link1". An exception to this rule is the
use of an ellipsis (...), in which case an element must be connected to
its next element by a transformation chain (a sequence of
links).
The following path elements have special meanings:
- Ellipsis (``...``)
Indicates that the previous frame and the next frame are
connected by a transformation chain instead of being connected
directly.
- None (``//``)
Omitting a name indicates that the name of this frame is None.
Parameters
----------
xpath : str
A xpath string describing the frame to search for.
ignore_frames : List[Frame]
Any frames that should be ignored when matching the path.
Returns
-------
matched_frame : Frame
A frame matching the given path.
Notes
-----
In directed graphs there is no clear notion of search order; hence it is
undefined which frame is found if multiple matches for the path exist.
In this case an arbitrary match is returned, and you should not count on
the result to be deterministic.
Because ``...`` and ``//`` have special meaning, frames with names
``"..."`` or ``""`` will be ignored by this method and can not be found.
Similarly, frames that use slashes, e.g. ``namespace/my_frame``, will be
ignored and instead the sequences ``["namespace", "my_frame"]`` will be
matched.
Each element of the path is assumed to represent a unique frame. This
means that circular paths will not be matched.
"""
parts = path.split("/")
part = parts.pop(0)
indirect = False
while len(parts) > 0 and part == "...":
indirect = True
part = parts.pop(0)
if part == "...":
raise ValueError(f"Path ends with ellipsis: {path}")
part = None if part == "" else part
if len(parts) == 0 and self.name == part:
return self
elif not indirect and self.name != part:
raise RuntimeError(f"No match for {path}.")
elif len(parts) > 0 and self.name == part:
sub_path = "/".join(parts)
else:
parts = ["...", part] + parts
sub_path = "/".join(parts)
if ignore_frames is None:
ignore_frames = []
local_ignore = [self]
local_ignore.extend(ignore_frames)
child_frame: Frame
for child_frame, _ in self._children:
if child_frame in local_ignore:
continue
try:
return child_frame.find_frame(sub_path, ignore_frames=local_ignore)
except RuntimeError:
continue
else:
raise RuntimeError(f"No match for {path}.")
class CustomLink(Link):
"""A link representing a custom transformation.
Initialize a new link from the callable ``transformation`` that transforms a
vector from the parent frame to the child frame. This link can represent
arbitrary transformations between two frames.
Parameters
----------
parent : Frame
The frame in which vectors are specified.
child : Frame
The frame into which this link transforms vectors.
transfomration : Callable[[ArrayLike], np.ndarray]
A callable that takes a vector - in the parent frame - as input and
returns the vector in the child frame.
Notes
-----
This function does not implement :func:`Link.__inverse_transform__`.
"""
def __init__(
self,
parent_dim: int,
child_dim: int,
transformation: Callable[[ArrayLike], np.ndarray],
) -> None:
"""Initialize a new custom link."""
super().__init__(parent_dim, child_dim)
self._transform = transformation
def transform(self, x: ArrayLike) -> np.ndarray:
return self._transform(x)
class CompundLink(Link):
"""A link representing a sequence of other links
.. versionadded:: 0.4.0
This link allows you to build a complex link from a sequence of simpler
links. Upon ``transform`` each link transforms the result of its predecessor
and the result of the last link is returned. Similarly, when
__inverse_transform__ is called, each link inverse transformes its
successor (inverse order).
Parameters
----------
wrapped_links : List[Link]
A sequence of link objects. The order is first-to-last, i.e.,
``wrapped_links[0]`` is applied first and ``wrapped_links[-1]`` is
applied last.
Notes
-----
This link will only be invertible if all wrapped links implement
__inverse_transform__.
"""
def __init__(self, wrapped_links: List[Link]):
super().__init__(wrapped_links[0].parent_dim, wrapped_links[-1].child_dim)
self._links = wrapped_links
def transform(self, x: ArrayLike) -> np.ndarray:
for link in self._links:
x = link.transform(x)
return x
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
for link in reversed(self._links):
x = link.__inverse_transform__(x)
return x | 0.975589 | 0.610773 |
from math import sqrt
from .base import Link
from .affine import Rotation
from numpy.typing import ArrayLike
import numpy as np
class Rotation2D(Rotation):
"""Rotation in 2D.
A convenient way of initializing a rotation in 2D.
Parameters
----------
angle : ArrayLike
The magnitude of the rotation.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which computation takes place. All other axes are
considered batch dimensions.
"""
def __init__(
self,
angle: ArrayLike,
*,
degrees: bool = False,
axis: int = -1,
) -> None:
angle = np.asarray(angle)
vector_shape = [*angle.shape]
vector_shape[axis] = 2
u_vector = np.zeros(vector_shape)
u_vector[..., 1] = 1
v_vector = np.zeros(vector_shape)
v_vector[..., 0] = 1
super().__init__(u_vector, v_vector, axis=axis)
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
self.angle = angle
class AxialHexagonTransform(Link):
"""Conversion to Axial Hexagon Coordininates in 2D
This transform takes a 2D vector in euclidian (x, y) coordinates and
converts it into coordinates on a (r, q, s) hexagonal grid. For this, it
uses axial coordinates for which the value of s is implied, because r+q+s=0;
hence this transform returns a 2D vector in (r, q) coordinates.
See here for an overview of `Hexagon Coordinate Systems
<https://www.redblobgames.com/grids/hexagons/#coordinates>`_.
Parameters
----------
size : ArrayLike
The size of a single hexagon. It measures the distance from a hexagon
center to one of it's corners.
flat_top : bool
If True (default), hexagons are oriented with one side parallel to the x
axis (top is flat). Otherwise they are oriented with one side
parallel to the y-axis (top is pointy).
axis : int
The axis along which computation takes place. All other axes are
considered batch dimensions.
"""
def __init__(
self, *, size: ArrayLike = 1.0, flat_top: bool = True, axis: int = -1
) -> None:
super().__init__(2, 2)
self.size = size
self._axis = axis
if flat_top:
self.q_basis = np.array([2 / 3, 0])
self.r_basis = np.array([-1 / 3, sqrt(3) / 3])
# basis for inverse transform
self.x_basis = np.array([3 / 2, 0])
self.y_basis = np.array([sqrt(3) / 2, sqrt(3)])
else:
self.q_basis = np.array([sqrt(3) / 3, -1 / 3])
self.r_basis = np.array([0, 2 / 3])
# basis for inverse transform
self.x_basis = np.array([sqrt(3), sqrt(3) / 2])
self.y_basis = np.array([0, 3 / 2])
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.asfarray(x)
x = np.moveaxis(x, self._axis, -1)
result = np.empty_like(x)
result[..., 0] = np.sum(x * self.q_basis, axis=-1)
result[..., 1] = np.sum(x * self.r_basis, axis=-1)
result /= self.size
return result
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.asfarray(x)
x = np.moveaxis(x, self._axis, -1)
result = np.empty_like(x)
result[..., 0] = np.sum(x * self.x_basis, axis=-1)
result[..., 1] = np.sum(x * self.y_basis, axis=-1)
result *= self.size
return result
class HexagonAxisRound(Link):
"""Round Hexagon Axis Coordinates in 2D.
This link rounds hexagon coordinates given in axis coordinates (r, q) to
their closest hexagon.
Parameters
----------
axis : int
The axis along which rounding takes place.
Notes
-----
This link is _not_ invertible.
"""
def __init__(self, *, axis=-1) -> None:
super().__init__(2, 2)
self._axis = axis
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
# convert to cube coordinates
cube_coordinates = np.empty((*x.shape[:-1], 3))
cube_coordinates[..., :-1] = x
cube_coordinates[..., -1] = -x[..., 0] - x[..., 1]
cube_coordinates = cube_coordinates.reshape(-1, 3)
# round and enforce q+r+s=0 constraint
cube_rounded = np.round(cube_coordinates).astype(int)
residual = np.abs(cube_coordinates - cube_rounded)
first_max = np.argmax(residual, axis=-1)
matching_range = np.arange(first_max.shape[0])
cube_rounded[matching_range, first_max] -= np.sum(cube_rounded, axis=-1)
rounded_coordinates = np.moveaxis(cube_rounded[..., :2], -1, self._axis)
return rounded_coordinates | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/utils2d.py | utils2d.py | from math import sqrt
from .base import Link
from .affine import Rotation
from numpy.typing import ArrayLike
import numpy as np
class Rotation2D(Rotation):
"""Rotation in 2D.
A convenient way of initializing a rotation in 2D.
Parameters
----------
angle : ArrayLike
The magnitude of the rotation.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which computation takes place. All other axes are
considered batch dimensions.
"""
def __init__(
self,
angle: ArrayLike,
*,
degrees: bool = False,
axis: int = -1,
) -> None:
angle = np.asarray(angle)
vector_shape = [*angle.shape]
vector_shape[axis] = 2
u_vector = np.zeros(vector_shape)
u_vector[..., 1] = 1
v_vector = np.zeros(vector_shape)
v_vector[..., 0] = 1
super().__init__(u_vector, v_vector, axis=axis)
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
self.angle = angle
class AxialHexagonTransform(Link):
"""Conversion to Axial Hexagon Coordininates in 2D
This transform takes a 2D vector in euclidian (x, y) coordinates and
converts it into coordinates on a (r, q, s) hexagonal grid. For this, it
uses axial coordinates for which the value of s is implied, because r+q+s=0;
hence this transform returns a 2D vector in (r, q) coordinates.
See here for an overview of `Hexagon Coordinate Systems
<https://www.redblobgames.com/grids/hexagons/#coordinates>`_.
Parameters
----------
size : ArrayLike
The size of a single hexagon. It measures the distance from a hexagon
center to one of it's corners.
flat_top : bool
If True (default), hexagons are oriented with one side parallel to the x
axis (top is flat). Otherwise they are oriented with one side
parallel to the y-axis (top is pointy).
axis : int
The axis along which computation takes place. All other axes are
considered batch dimensions.
"""
def __init__(
self, *, size: ArrayLike = 1.0, flat_top: bool = True, axis: int = -1
) -> None:
super().__init__(2, 2)
self.size = size
self._axis = axis
if flat_top:
self.q_basis = np.array([2 / 3, 0])
self.r_basis = np.array([-1 / 3, sqrt(3) / 3])
# basis for inverse transform
self.x_basis = np.array([3 / 2, 0])
self.y_basis = np.array([sqrt(3) / 2, sqrt(3)])
else:
self.q_basis = np.array([sqrt(3) / 3, -1 / 3])
self.r_basis = np.array([0, 2 / 3])
# basis for inverse transform
self.x_basis = np.array([sqrt(3), sqrt(3) / 2])
self.y_basis = np.array([0, 3 / 2])
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.asfarray(x)
x = np.moveaxis(x, self._axis, -1)
result = np.empty_like(x)
result[..., 0] = np.sum(x * self.q_basis, axis=-1)
result[..., 1] = np.sum(x * self.r_basis, axis=-1)
result /= self.size
return result
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.asfarray(x)
x = np.moveaxis(x, self._axis, -1)
result = np.empty_like(x)
result[..., 0] = np.sum(x * self.x_basis, axis=-1)
result[..., 1] = np.sum(x * self.y_basis, axis=-1)
result *= self.size
return result
class HexagonAxisRound(Link):
"""Round Hexagon Axis Coordinates in 2D.
This link rounds hexagon coordinates given in axis coordinates (r, q) to
their closest hexagon.
Parameters
----------
axis : int
The axis along which rounding takes place.
Notes
-----
This link is _not_ invertible.
"""
def __init__(self, *, axis=-1) -> None:
super().__init__(2, 2)
self._axis = axis
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
# convert to cube coordinates
cube_coordinates = np.empty((*x.shape[:-1], 3))
cube_coordinates[..., :-1] = x
cube_coordinates[..., -1] = -x[..., 0] - x[..., 1]
cube_coordinates = cube_coordinates.reshape(-1, 3)
# round and enforce q+r+s=0 constraint
cube_rounded = np.round(cube_coordinates).astype(int)
residual = np.abs(cube_coordinates - cube_rounded)
first_max = np.argmax(residual, axis=-1)
matching_range = np.arange(first_max.shape[0])
cube_rounded[matching_range, first_max] -= np.sum(cube_rounded, axis=-1)
rounded_coordinates = np.moveaxis(cube_rounded[..., :2], -1, self._axis)
return rounded_coordinates | 0.941385 | 0.820433 |
from numpy.typing import ArrayLike
from typing import List
import numpy as np
from .base import Frame, Link, InvertLink
from ._utils import vector_project, angle_between
from .functions import translate, rotate, as_affine_matrix
class AffineLink(Link):
"""A link representing a affine transformation.
This is an abstract class useful when writing links for affine
transformations. An affine transformation is a transformation that can be
described using the equation ``y = Ax+b``.
The main utility of this class is that it computes the corresponding
transformation matrix once ``self.transform`` is known.
Parameters
----------
parent : int
The frame in which vectors are specified.
child : int
The frame into which this link transforms vectors.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(self, parent_dim: int, child_dim: int, *, axis: int = -1) -> None:
"""Initialize a new affine link."""
super().__init__(parent_dim, child_dim)
self._tf_matrix = None
self._axis = axis
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
cartesian_parent = Frame(self.parent_dim)
cartesian_child = Frame(self.child_dim)
self(cartesian_parent, cartesian_child)
affine_parent = AffineSpace(self.parent_dim, axis=self._axis)(cartesian_parent)
affine_child = AffineSpace(self.child_dim, axis=self._axis)(cartesian_child)
affine_matrix = as_affine_matrix(affine_parent, affine_child, axis=self._axis)
return affine_matrix
@property
def _inverse_tf_matrix(self):
cartesian_parent = Frame(self.parent_dim)
cartesian_child = Frame(self.child_dim)
self(cartesian_parent, cartesian_child)
affine_parent = AffineSpace(self.parent_dim, axis=self._axis)(cartesian_parent)
affine_child = AffineSpace(self.child_dim, axis=self._axis)(cartesian_child)
affine_matrix = as_affine_matrix(affine_child, affine_parent, axis=self._axis)
return affine_matrix
def invert(self) -> Frame:
"""Return a new Link that is the inverse of this link."""
return Inverse(self)
class Inverse(InvertLink):
def __init__(self, link: AffineLink) -> None:
super().__init__(link)
self._forward_link: AffineLink
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
return self._forward_link._inverse_tf_matrix
class AffineCompound(AffineLink):
def __init__(self, wrapped_links: List[AffineLink]) -> None:
super().__init__(wrapped_links[0].parent_dim, wrapped_links[-1].child_dim)
self._links = wrapped_links
def transform(self, x: ArrayLike) -> np.ndarray:
for link in self._links:
x = link.transform(x)
return x
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
for link in reversed(self._links):
x = link.__inverse_transform__(x)
return x
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
matrix = self._links[0].affine_matrix
for link in self._links[1:]:
matrix = link.affine_matrix @ matrix
return matrix
def invert(self) -> Frame:
"""Return a new Link that is the inverse of this link."""
links = list()
for link in reversed(self._links):
links.append(Inverse(link))
return AffineCompound(links)
class Rotation(AffineLink):
"""Planar rotation in N-D.
The plane of rotation is described by two vectors (u, v). The initial
angle of rotation is twice the angle between u and v (measured from u to v)
and can be modified by setting the angle explicitly, e.g. ``link.angle =
np.pi``. The angle is measured in radians.
Parameters
----------
u : ArrayLike
The first vector defining the plane of rotation. The angle is mesured
from here.
v : ArrayLike
The second vector defining the plane of rotation. The angle is measured
to here, i.e., from u to v.
Notes
-----
Implements __inverse_transform__.
If you wish to initialize a rotation that rotates u onto v, and both are of
same length, you can use ``tf.Rotation(u+v, v)``.
"""
def __init__(self, u: ArrayLike, v: ArrayLike, *, axis: int = -1) -> None:
u = np.asarray(u)
v = np.asarray(v)
u = np.moveaxis(u, axis, -1)
v = np.moveaxis(v, axis, -1)
frame_dim = u.shape[axis]
super().__init__(frame_dim, frame_dim)
self._u = u / np.linalg.norm(u)
self._v = v / np.linalg.norm(v)
self._angle = 2 * angle_between(u, v)
u_orthogonal = v - vector_project(v, u)
self._u_ortho = u_orthogonal / np.linalg.norm(u_orthogonal)
def transform(self, x: ArrayLike) -> np.ndarray:
return rotate(x, self._u, self._v)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
return rotate(x, self._v, self._u)
@property
def angle(self) -> float:
"""The magnitude of the rotation (in radians)."""
return self._angle
@angle.setter
def angle(self, angle: ArrayLike) -> None:
self._angle = angle
self._v = np.cos(angle / 2) * self._u - np.sin(angle / 2) * self._u_ortho
class Translation(AffineLink):
"""Translation in N-D.
.. versionchanged:: 0.6.0
``direction`` and ``scalar`` may now be broadcastable arrays
Parameters
----------
direction : ArrayLike
A vector, or batch of vectors, describing the translation.
amount : ArrayLike
A scalar indicating by how much to scale ``direction``. Default is 1.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(
self, direction: ArrayLike, *, amount: ArrayLike = 1, axis: int = -1
) -> None:
direction = np.asarray(direction)
frame_dim = direction.shape[axis]
super().__init__(frame_dim, frame_dim, axis=axis)
self._axis = axis
self._amount = np.asarray(amount)
self._direction = np.moveaxis(direction, axis, -1)
@property
def direction(self) -> np.ndarray:
"""The direction in which vectors are translated."""
return self._direction
@direction.setter
def direction(self, direction: ArrayLike) -> None:
self._direction = np.asarray(direction)
@property
def amount(self) -> float:
"""The amount by which to scale the direction vector."""
return self._amount
@amount.setter
def amount(self, amount: ArrayLike) -> None:
self._amount = np.asarray(amount)
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.asarray(x)
x = np.moveaxis(x, self._axis, -1)
result = translate(x, self._amount[..., None] * self._direction)
return np.moveaxis(result, -1, self._axis)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.asarray(x)
x = np.moveaxis(x, self._axis, -1)
result = translate(x, -self._amount[..., None] * self._direction)
return np.moveaxis(result, -1, self._axis)
class AffineSpace(Link):
"""Transform to affine space
Parameters
----------
ndim : int
The number of dimensions of the cartesian space.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(self, ndim: int, *, axis=-1) -> None:
super().__init__(ndim, ndim + 1)
self._axis = axis
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
shape = list(x.shape)
shape[-1] += 1
affine_vector = np.ones(shape, dtype=x.dtype)
affine_vector[..., :-1] = x
return np.moveaxis(affine_vector, -1, self._axis)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
values = x[..., :-1]
scaling = x[..., -1][..., None]
cartesian_vector = values / scaling
return np.moveaxis(cartesian_vector, -1, self._axis) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/affine.py | affine.py | from numpy.typing import ArrayLike
from typing import List
import numpy as np
from .base import Frame, Link, InvertLink
from ._utils import vector_project, angle_between
from .functions import translate, rotate, as_affine_matrix
class AffineLink(Link):
"""A link representing a affine transformation.
This is an abstract class useful when writing links for affine
transformations. An affine transformation is a transformation that can be
described using the equation ``y = Ax+b``.
The main utility of this class is that it computes the corresponding
transformation matrix once ``self.transform`` is known.
Parameters
----------
parent : int
The frame in which vectors are specified.
child : int
The frame into which this link transforms vectors.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(self, parent_dim: int, child_dim: int, *, axis: int = -1) -> None:
"""Initialize a new affine link."""
super().__init__(parent_dim, child_dim)
self._tf_matrix = None
self._axis = axis
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
cartesian_parent = Frame(self.parent_dim)
cartesian_child = Frame(self.child_dim)
self(cartesian_parent, cartesian_child)
affine_parent = AffineSpace(self.parent_dim, axis=self._axis)(cartesian_parent)
affine_child = AffineSpace(self.child_dim, axis=self._axis)(cartesian_child)
affine_matrix = as_affine_matrix(affine_parent, affine_child, axis=self._axis)
return affine_matrix
@property
def _inverse_tf_matrix(self):
cartesian_parent = Frame(self.parent_dim)
cartesian_child = Frame(self.child_dim)
self(cartesian_parent, cartesian_child)
affine_parent = AffineSpace(self.parent_dim, axis=self._axis)(cartesian_parent)
affine_child = AffineSpace(self.child_dim, axis=self._axis)(cartesian_child)
affine_matrix = as_affine_matrix(affine_child, affine_parent, axis=self._axis)
return affine_matrix
def invert(self) -> Frame:
"""Return a new Link that is the inverse of this link."""
return Inverse(self)
class Inverse(InvertLink):
def __init__(self, link: AffineLink) -> None:
super().__init__(link)
self._forward_link: AffineLink
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
return self._forward_link._inverse_tf_matrix
class AffineCompound(AffineLink):
def __init__(self, wrapped_links: List[AffineLink]) -> None:
super().__init__(wrapped_links[0].parent_dim, wrapped_links[-1].child_dim)
self._links = wrapped_links
def transform(self, x: ArrayLike) -> np.ndarray:
for link in self._links:
x = link.transform(x)
return x
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
for link in reversed(self._links):
x = link.__inverse_transform__(x)
return x
@property
def affine_matrix(self) -> np.ndarray:
"""The transformation matrix mapping the parent to the child frame."""
matrix = self._links[0].affine_matrix
for link in self._links[1:]:
matrix = link.affine_matrix @ matrix
return matrix
def invert(self) -> Frame:
"""Return a new Link that is the inverse of this link."""
links = list()
for link in reversed(self._links):
links.append(Inverse(link))
return AffineCompound(links)
class Rotation(AffineLink):
"""Planar rotation in N-D.
The plane of rotation is described by two vectors (u, v). The initial
angle of rotation is twice the angle between u and v (measured from u to v)
and can be modified by setting the angle explicitly, e.g. ``link.angle =
np.pi``. The angle is measured in radians.
Parameters
----------
u : ArrayLike
The first vector defining the plane of rotation. The angle is mesured
from here.
v : ArrayLike
The second vector defining the plane of rotation. The angle is measured
to here, i.e., from u to v.
Notes
-----
Implements __inverse_transform__.
If you wish to initialize a rotation that rotates u onto v, and both are of
same length, you can use ``tf.Rotation(u+v, v)``.
"""
def __init__(self, u: ArrayLike, v: ArrayLike, *, axis: int = -1) -> None:
u = np.asarray(u)
v = np.asarray(v)
u = np.moveaxis(u, axis, -1)
v = np.moveaxis(v, axis, -1)
frame_dim = u.shape[axis]
super().__init__(frame_dim, frame_dim)
self._u = u / np.linalg.norm(u)
self._v = v / np.linalg.norm(v)
self._angle = 2 * angle_between(u, v)
u_orthogonal = v - vector_project(v, u)
self._u_ortho = u_orthogonal / np.linalg.norm(u_orthogonal)
def transform(self, x: ArrayLike) -> np.ndarray:
return rotate(x, self._u, self._v)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
return rotate(x, self._v, self._u)
@property
def angle(self) -> float:
"""The magnitude of the rotation (in radians)."""
return self._angle
@angle.setter
def angle(self, angle: ArrayLike) -> None:
self._angle = angle
self._v = np.cos(angle / 2) * self._u - np.sin(angle / 2) * self._u_ortho
class Translation(AffineLink):
"""Translation in N-D.
.. versionchanged:: 0.6.0
``direction`` and ``scalar`` may now be broadcastable arrays
Parameters
----------
direction : ArrayLike
A vector, or batch of vectors, describing the translation.
amount : ArrayLike
A scalar indicating by how much to scale ``direction``. Default is 1.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(
self, direction: ArrayLike, *, amount: ArrayLike = 1, axis: int = -1
) -> None:
direction = np.asarray(direction)
frame_dim = direction.shape[axis]
super().__init__(frame_dim, frame_dim, axis=axis)
self._axis = axis
self._amount = np.asarray(amount)
self._direction = np.moveaxis(direction, axis, -1)
@property
def direction(self) -> np.ndarray:
"""The direction in which vectors are translated."""
return self._direction
@direction.setter
def direction(self, direction: ArrayLike) -> None:
self._direction = np.asarray(direction)
@property
def amount(self) -> float:
"""The amount by which to scale the direction vector."""
return self._amount
@amount.setter
def amount(self, amount: ArrayLike) -> None:
self._amount = np.asarray(amount)
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.asarray(x)
x = np.moveaxis(x, self._axis, -1)
result = translate(x, self._amount[..., None] * self._direction)
return np.moveaxis(result, -1, self._axis)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.asarray(x)
x = np.moveaxis(x, self._axis, -1)
result = translate(x, -self._amount[..., None] * self._direction)
return np.moveaxis(result, -1, self._axis)
class AffineSpace(Link):
"""Transform to affine space
Parameters
----------
ndim : int
The number of dimensions of the cartesian space.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
"""
def __init__(self, ndim: int, *, axis=-1) -> None:
super().__init__(ndim, ndim + 1)
self._axis = axis
def transform(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
shape = list(x.shape)
shape[-1] += 1
affine_vector = np.ones(shape, dtype=x.dtype)
affine_vector[..., :-1] = x
return np.moveaxis(affine_vector, -1, self._axis)
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
x = np.moveaxis(x, self._axis, -1)
values = x[..., :-1]
scaling = x[..., -1][..., None]
cartesian_vector = values / scaling
return np.moveaxis(cartesian_vector, -1, self._axis) | 0.966976 | 0.682031 |
from typing import List
from .base import CompundLink, Link, InvertLink
from .affine import AffineCompound, Translation, Rotation
from .joints import Joint
import numpy as np
def simplify_links(
links: List[Link],
*,
keep_links: List[Link] = None,
keep_joints: bool = False,
eps: float = 1e-16
) -> List[Link]:
"""Simplify a transformation sequence.
.. currentmodule:: skbot.transform
This function attempts to optimize the given transformation sequence by
reducing the number of transformations involved. For this it may replace or
modify any link in the sequence with the exception of those listed in
``keep_links``. Concretely it does the following modifications:
- It (recursively) flattens :class:`CompoundLinks <CompundLink>`.
- It replaces double inversions with the original link.
- It drops 0 degree :class:`Rotations <Rotation>` (identities).
- It drops 0 amount :class:`Translations <Translation>` (identities).
- It combines series of translations into a single translation.
- It sorts translations before rotations.
.. versionadded:: 0.10.0
Parameters
----------
links : List[Link]
The list of links to simplify.
keep_links : List[Link]
A list list of links that - if present - should not be simplified.
keep_joints : bool
If True treat tf.Joint instances as if they were in keep_links.
eps : float
The number below which angles and translations are interpreted as 0.
Defaults to ``1e-16``.
Returns
-------
improved_links : List[Link]
A new list of links that is a simplified version of the initial list.
"""
if keep_links is None:
keep_links = list()
def simplify(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
for idx in range(len(links)):
link = links[idx]
# skip if link should not be modified
if link in keep_links or (isinstance(link, Joint) and keep_joints):
improved_links.append(link)
continue
# resolve inversions
if isinstance(link, InvertLink):
inverted_link = link._forward_link
# still don't touch keep links
if inverted_link in keep_links or (
isinstance(inverted_link, Joint) and keep_joints
):
improved_links.append(link)
continue
# double inverse
if isinstance(inverted_link, InvertLink):
improved_links.append(inverted_link._forward_link)
continue
# inverted compound link
if isinstance(inverted_link, (CompundLink, AffineCompound)):
for sub_link in reversed(inverted_link._links):
improved_links.append(InvertLink(sub_link))
continue
# inverted translation
if isinstance(inverted_link, Translation):
resolved = Translation(
inverted_link.direction,
amount=-inverted_link.amount,
axis=inverted_link._axis,
)
improved_links.append(resolved)
continue
# inverted rotation
if isinstance(inverted_link, Rotation):
angle = inverted_link.angle
resolved = Rotation(
inverted_link._u,
inverted_link._u_ortho,
axis=inverted_link._axis,
)
resolved.angle = -angle
improved_links.append(resolved)
continue
# unpack compound links
if isinstance(link, (CompundLink, AffineCompound)):
for sub_link in link._links:
improved_links.append(sub_link)
continue
# drop identity translations
if isinstance(link, Translation) and abs(link.amount) < eps:
continue
# drop identity rotations
if isinstance(link, Rotation) and abs(link.angle) < eps:
continue
# no improvements for this link
improved_links.append(link)
if len(improved_links) != len(links):
improved_links = simplify(improved_links)
elif any([a != b for a, b in zip(links, improved_links)]):
improved_links = simplify(improved_links)
return improved_links
def combine_translations(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
idx = 0
while idx < len(links):
link = links[idx]
if not isinstance(link, Translation):
improved_links.append(link)
idx += 1
continue
translations: List[Translation] = list()
for sub_link in links[idx:]:
if not isinstance(sub_link, Translation):
break
translations.append(sub_link)
new_direction = np.zeros(link.parent_dim)
for sub_link in translations:
new_direction += sub_link.amount * sub_link.direction
improved_links.append(Translation(new_direction))
idx += len(translations)
return improved_links
def sort_links(links: List[Link]) -> List[Link]:
improved_links: List[Link] = [x for x in links]
repeat = True
while repeat:
repeat = False
for idx in range(len(improved_links) - 1):
link = improved_links[idx]
next_link = improved_links[idx + 1]
if isinstance(link, Rotation) and isinstance(next_link, Translation):
vector = next_link.amount * next_link.direction
vector = link.__inverse_transform__(vector)
improved_links[idx + 1] = improved_links[idx]
improved_links[idx] = Translation(vector)
repeat = True
continue
return improved_links
improved_links = simplify(links)
subchains: List[List[Link]] = list()
keepsies: List[Link] = list()
current_subchain: List[Link] = list()
for link in improved_links:
if link in keep_links or (isinstance(link, Joint) and keep_joints):
keepsies.append(link)
subchains.append(current_subchain)
current_subchain = list()
else:
current_subchain.append(link)
subchains.append(current_subchain)
improved_chains: List[List[Link]] = list()
for subchain in subchains:
improved_links = sort_links(subchain)
improved_links = combine_translations(improved_links)
improved_chains.append(improved_links)
improved_chain: List[Link] = list()
for chain, keepsie in zip(improved_chains, keepsies):
improved_chain += chain
improved_chain += [keepsie]
improved_chain += improved_chains[-1]
return improved_chain | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/simplfy.py | simplfy.py | from typing import List
from .base import CompundLink, Link, InvertLink
from .affine import AffineCompound, Translation, Rotation
from .joints import Joint
import numpy as np
def simplify_links(
links: List[Link],
*,
keep_links: List[Link] = None,
keep_joints: bool = False,
eps: float = 1e-16
) -> List[Link]:
"""Simplify a transformation sequence.
.. currentmodule:: skbot.transform
This function attempts to optimize the given transformation sequence by
reducing the number of transformations involved. For this it may replace or
modify any link in the sequence with the exception of those listed in
``keep_links``. Concretely it does the following modifications:
- It (recursively) flattens :class:`CompoundLinks <CompundLink>`.
- It replaces double inversions with the original link.
- It drops 0 degree :class:`Rotations <Rotation>` (identities).
- It drops 0 amount :class:`Translations <Translation>` (identities).
- It combines series of translations into a single translation.
- It sorts translations before rotations.
.. versionadded:: 0.10.0
Parameters
----------
links : List[Link]
The list of links to simplify.
keep_links : List[Link]
A list list of links that - if present - should not be simplified.
keep_joints : bool
If True treat tf.Joint instances as if they were in keep_links.
eps : float
The number below which angles and translations are interpreted as 0.
Defaults to ``1e-16``.
Returns
-------
improved_links : List[Link]
A new list of links that is a simplified version of the initial list.
"""
if keep_links is None:
keep_links = list()
def simplify(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
for idx in range(len(links)):
link = links[idx]
# skip if link should not be modified
if link in keep_links or (isinstance(link, Joint) and keep_joints):
improved_links.append(link)
continue
# resolve inversions
if isinstance(link, InvertLink):
inverted_link = link._forward_link
# still don't touch keep links
if inverted_link in keep_links or (
isinstance(inverted_link, Joint) and keep_joints
):
improved_links.append(link)
continue
# double inverse
if isinstance(inverted_link, InvertLink):
improved_links.append(inverted_link._forward_link)
continue
# inverted compound link
if isinstance(inverted_link, (CompundLink, AffineCompound)):
for sub_link in reversed(inverted_link._links):
improved_links.append(InvertLink(sub_link))
continue
# inverted translation
if isinstance(inverted_link, Translation):
resolved = Translation(
inverted_link.direction,
amount=-inverted_link.amount,
axis=inverted_link._axis,
)
improved_links.append(resolved)
continue
# inverted rotation
if isinstance(inverted_link, Rotation):
angle = inverted_link.angle
resolved = Rotation(
inverted_link._u,
inverted_link._u_ortho,
axis=inverted_link._axis,
)
resolved.angle = -angle
improved_links.append(resolved)
continue
# unpack compound links
if isinstance(link, (CompundLink, AffineCompound)):
for sub_link in link._links:
improved_links.append(sub_link)
continue
# drop identity translations
if isinstance(link, Translation) and abs(link.amount) < eps:
continue
# drop identity rotations
if isinstance(link, Rotation) and abs(link.angle) < eps:
continue
# no improvements for this link
improved_links.append(link)
if len(improved_links) != len(links):
improved_links = simplify(improved_links)
elif any([a != b for a, b in zip(links, improved_links)]):
improved_links = simplify(improved_links)
return improved_links
def combine_translations(links: List[Link]) -> List[Link]:
improved_links: List[Link] = list()
idx = 0
while idx < len(links):
link = links[idx]
if not isinstance(link, Translation):
improved_links.append(link)
idx += 1
continue
translations: List[Translation] = list()
for sub_link in links[idx:]:
if not isinstance(sub_link, Translation):
break
translations.append(sub_link)
new_direction = np.zeros(link.parent_dim)
for sub_link in translations:
new_direction += sub_link.amount * sub_link.direction
improved_links.append(Translation(new_direction))
idx += len(translations)
return improved_links
def sort_links(links: List[Link]) -> List[Link]:
improved_links: List[Link] = [x for x in links]
repeat = True
while repeat:
repeat = False
for idx in range(len(improved_links) - 1):
link = improved_links[idx]
next_link = improved_links[idx + 1]
if isinstance(link, Rotation) and isinstance(next_link, Translation):
vector = next_link.amount * next_link.direction
vector = link.__inverse_transform__(vector)
improved_links[idx + 1] = improved_links[idx]
improved_links[idx] = Translation(vector)
repeat = True
continue
return improved_links
improved_links = simplify(links)
subchains: List[List[Link]] = list()
keepsies: List[Link] = list()
current_subchain: List[Link] = list()
for link in improved_links:
if link in keep_links or (isinstance(link, Joint) and keep_joints):
keepsies.append(link)
subchains.append(current_subchain)
current_subchain = list()
else:
current_subchain.append(link)
subchains.append(current_subchain)
improved_chains: List[List[Link]] = list()
for subchain in subchains:
improved_links = sort_links(subchain)
improved_links = combine_translations(improved_links)
improved_chains.append(improved_links)
improved_chain: List[Link] = list()
for chain, keepsie in zip(improved_chains, keepsies):
improved_chain += chain
improved_chain += [keepsie]
improved_chain += improved_chains[-1]
return improved_chain | 0.884489 | 0.426083 |
import numpy as np
from numpy.typing import ArrayLike
from .base import Frame
from ._utils import vector_project
def scale(vector: ArrayLike, scalar: ArrayLike) -> np.ndarray:
"""Scale each dimension of a vector.
Multiplies each dimension of ``vector`` with the matching dimension of
``scalar``. If necessary, ``scalar`` will be broadcasted.
Parameters
----------
vector : ArrayLike
A vector to be scaled.
scalar : ArrayLike
A vector representing the amount by which to scale each dimension.
Returns
-------
scaled : ArrayLike
A vector where each dimension is scaled by scalar.
Notes
-----
Exists for completeness. It may be cleaner to simply write
``scalar * vector`` instead.
"""
vector = np.asarray(vector)
scalar = np.asarray(scalar)
return scalar * vector
def translate(vector: ArrayLike, direction: ArrayLike) -> np.ndarray:
"""Translate a vector along direction.
Parameters
----------
vector : ArrayLike
The vector to be translated.
direction : ArrayLike
A vector describing the translation.
Returns
-------
translated_vector : ArrayLike
The translated vector.
Notes
-----
Exists for completeness. It may be cleaner to simply write
``vector + direction`` instead.
"""
return vector + direction
def rotate(vector: ArrayLike, u: ArrayLike, v: ArrayLike, *, axis=-1) -> np.ndarray:
"""Rotate a vector in the u,v plane.
Rotates a vector by reflecting it twice. The plane of rotation
is given by the u-v-plane and the angle of rotation is two times
the angle from u to v.
Parameters
----------
vector : ArrayLike
The vector to be rotated.
u : ArrayLike
The first of the two axes defining the plane of rotation
v : ArrayLike
The second of the two axes defining the plane of rotation
axis : int
The axis along which to compute the reflection. Default: -1.
Returns
-------
rotated_vector : np.ndarray
The vector rotated in the u-v-plane by two times the angle
from u to v.
Notes
-----
The angle of rotation is given by the angle between the two vectors that
define the plane of rotation. The orientation of the rotation is from u
towards v, and the amount of rotation is twice the angle.
The scale of u and/or v does not influence the rotation.
"""
vector = np.asarray(vector)
u = np.asarray(u)
v = np.asarray(v)
# implemented as rotation by two reflections
return reflect(reflect(vector, u, axis=axis), v, axis=axis)
def reflect(vector: ArrayLike, direction: ArrayLike, *, axis=-1) -> np.ndarray:
"""Reflect a vector along a line defined by direction.
Parameters
----------
vector : ArrayLike
The vector to be reflected.
direction : ArrayLike
The vector describing the direction along which the reflection takes place.
axis : int
The axis along which to compute the reflection. Default: -1.
Returns
-------
reflected_vector : ArrayLike
The reflected vector.
Notes
-----
The length of direction does not influence the result of the reflection.
"""
# from: https://en.wikipedia.org/wiki/Reflection_(mathematics)#Reflection_through_a_hyperplane_in_n_dimensions
vector = np.asarray(vector)
direction = np.asarray(direction)
return vector - 2 * vector_project(vector, direction, axis=axis)
def shear(
vector: ArrayLike, direction: ArrayLike, amount: ArrayLike, *, axis=-1
) -> np.ndarray:
"""Displaces a vector along direction by the scalar product of vector and amount.
A shear displaces a vector in a fixed direction by the vector's scalar
projection onto a second vector (amount) scaled by the length of that second
vector. If amount and direction are orthogonal, the result is a shear. If
amount and direction are parallel, the result is a stretch.
Parameters
----------
vector : ArrayLike
The vector to be sheared.
direction : ArrayLike
The direction along which to apply the shear.
amount : ArrayLike
The axis that determines the amount to shear by.
axis : int
The axis along with to compute the shear.
Returns
-------
sheared : np.ndarray
The sheared vector.
Notes
-----
If direction is not normalized the resulting shear factor will be scaled by
the length (euclidian norm) of direction.
"""
vector = np.asarray(vector)
direction = np.asarray(direction)
amount = np.asarray(amount)
tmp1 = np.sum(vector * amount, axis=axis)
return vector + tmp1 * direction
def as_affine_matrix(from_frame: Frame, to_frame: Frame, *, axis: int = -1):
"""Transformation Matrix between two frames at a given point.
Given two frames ``from_frame`` and ``to_frame`` that represent affine space
and are connected by a sequence of linear transformations, compute a matrix
representation of the transformation.
Parameters
----------
from_frame : tf.Frame
The parent frame.
to_frame : tf.Frame
The child frame.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
Returns
-------
matrix : np.ndarray
The matrix representation of the transformation. It will have the shape
``(batch_shape, to_frame.ndim, from_frame.ndim)``.
Notes
-----
The matrix representation will only be accurate if the transformation chain
between the two given frames is linear.
The
"""
if axis != -1:
raise NotImplementedError("Axis is not implemented yet.")
basis_set = np.eye(from_frame.ndim)
basis_set[:, -1] = 1
mapped_basis = from_frame.transform(basis_set, to_frame).T
# normalize affine matrix
scaling = mapped_basis[-1, :]
mapped_basis /= scaling[None, :]
mapped_basis[..., :-1] -= mapped_basis[..., -1][..., None]
return mapped_basis | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/functions.py | functions.py | import numpy as np
from numpy.typing import ArrayLike
from .base import Frame
from ._utils import vector_project
def scale(vector: ArrayLike, scalar: ArrayLike) -> np.ndarray:
"""Scale each dimension of a vector.
Multiplies each dimension of ``vector`` with the matching dimension of
``scalar``. If necessary, ``scalar`` will be broadcasted.
Parameters
----------
vector : ArrayLike
A vector to be scaled.
scalar : ArrayLike
A vector representing the amount by which to scale each dimension.
Returns
-------
scaled : ArrayLike
A vector where each dimension is scaled by scalar.
Notes
-----
Exists for completeness. It may be cleaner to simply write
``scalar * vector`` instead.
"""
vector = np.asarray(vector)
scalar = np.asarray(scalar)
return scalar * vector
def translate(vector: ArrayLike, direction: ArrayLike) -> np.ndarray:
"""Translate a vector along direction.
Parameters
----------
vector : ArrayLike
The vector to be translated.
direction : ArrayLike
A vector describing the translation.
Returns
-------
translated_vector : ArrayLike
The translated vector.
Notes
-----
Exists for completeness. It may be cleaner to simply write
``vector + direction`` instead.
"""
return vector + direction
def rotate(vector: ArrayLike, u: ArrayLike, v: ArrayLike, *, axis=-1) -> np.ndarray:
"""Rotate a vector in the u,v plane.
Rotates a vector by reflecting it twice. The plane of rotation
is given by the u-v-plane and the angle of rotation is two times
the angle from u to v.
Parameters
----------
vector : ArrayLike
The vector to be rotated.
u : ArrayLike
The first of the two axes defining the plane of rotation
v : ArrayLike
The second of the two axes defining the plane of rotation
axis : int
The axis along which to compute the reflection. Default: -1.
Returns
-------
rotated_vector : np.ndarray
The vector rotated in the u-v-plane by two times the angle
from u to v.
Notes
-----
The angle of rotation is given by the angle between the two vectors that
define the plane of rotation. The orientation of the rotation is from u
towards v, and the amount of rotation is twice the angle.
The scale of u and/or v does not influence the rotation.
"""
vector = np.asarray(vector)
u = np.asarray(u)
v = np.asarray(v)
# implemented as rotation by two reflections
return reflect(reflect(vector, u, axis=axis), v, axis=axis)
def reflect(vector: ArrayLike, direction: ArrayLike, *, axis=-1) -> np.ndarray:
"""Reflect a vector along a line defined by direction.
Parameters
----------
vector : ArrayLike
The vector to be reflected.
direction : ArrayLike
The vector describing the direction along which the reflection takes place.
axis : int
The axis along which to compute the reflection. Default: -1.
Returns
-------
reflected_vector : ArrayLike
The reflected vector.
Notes
-----
The length of direction does not influence the result of the reflection.
"""
# from: https://en.wikipedia.org/wiki/Reflection_(mathematics)#Reflection_through_a_hyperplane_in_n_dimensions
vector = np.asarray(vector)
direction = np.asarray(direction)
return vector - 2 * vector_project(vector, direction, axis=axis)
def shear(
vector: ArrayLike, direction: ArrayLike, amount: ArrayLike, *, axis=-1
) -> np.ndarray:
"""Displaces a vector along direction by the scalar product of vector and amount.
A shear displaces a vector in a fixed direction by the vector's scalar
projection onto a second vector (amount) scaled by the length of that second
vector. If amount and direction are orthogonal, the result is a shear. If
amount and direction are parallel, the result is a stretch.
Parameters
----------
vector : ArrayLike
The vector to be sheared.
direction : ArrayLike
The direction along which to apply the shear.
amount : ArrayLike
The axis that determines the amount to shear by.
axis : int
The axis along with to compute the shear.
Returns
-------
sheared : np.ndarray
The sheared vector.
Notes
-----
If direction is not normalized the resulting shear factor will be scaled by
the length (euclidian norm) of direction.
"""
vector = np.asarray(vector)
direction = np.asarray(direction)
amount = np.asarray(amount)
tmp1 = np.sum(vector * amount, axis=axis)
return vector + tmp1 * direction
def as_affine_matrix(from_frame: Frame, to_frame: Frame, *, axis: int = -1):
"""Transformation Matrix between two frames at a given point.
Given two frames ``from_frame`` and ``to_frame`` that represent affine space
and are connected by a sequence of linear transformations, compute a matrix
representation of the transformation.
Parameters
----------
from_frame : tf.Frame
The parent frame.
to_frame : tf.Frame
The child frame.
axis : int
The axis along which computation takes place. All other axes are considered
batch dimensions.
Returns
-------
matrix : np.ndarray
The matrix representation of the transformation. It will have the shape
``(batch_shape, to_frame.ndim, from_frame.ndim)``.
Notes
-----
The matrix representation will only be accurate if the transformation chain
between the two given frames is linear.
The
"""
if axis != -1:
raise NotImplementedError("Axis is not implemented yet.")
basis_set = np.eye(from_frame.ndim)
basis_set[:, -1] = 1
mapped_basis = from_frame.transform(basis_set, to_frame).T
# normalize affine matrix
scaling = mapped_basis[-1, :]
mapped_basis /= scaling[None, :]
mapped_basis[..., :-1] -= mapped_basis[..., -1][..., None]
return mapped_basis | 0.957288 | 0.925027 |
from math import tan
import numpy as np
from numpy.typing import ArrayLike
from scipy.spatial.transform import Rotation as scipy_rotation
from .base import Link
from .projections import PerspectiveProjection
from .affine import AffineCompound, Translation, Rotation
from ._utils import angle_between, vector_project
class RotvecRotation(Rotation):
"""Rotation based on rotation vector in 3D.
Parameters
----------
rotvec : ArrayLike
The vector around which points are rotated.
angle : ArrayLike
The magnitude of the rotation. If None, the length of ``vector`` will be
used.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
Notes
-----
Batch dimensions of ``rotvec`` and ``angle`` must be broadcastable.
"""
def __init__(
self,
rotvec: ArrayLike,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1
) -> None:
rotvec = np.asarray(rotvec)
rotvec = np.moveaxis(rotvec, axis, -1)
if angle is None:
angle = np.linalg.norm(rotvec, axis=axis, keepdims=True)
angle = np.moveaxis(angle, axis, -1)
else:
angle = np.asarray(angle)
if angle.ndim > 0:
angle = np.moveaxis(angle, axis, -1)[..., None]
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
# arbitrary vector that isn't parallel to rotvec
alternativeA = np.zeros_like(rotvec)
alternativeA[..., :] = (1, 0, 0)
alternativeB = np.zeros_like(rotvec)
alternativeB[..., :] = (0, 1, 0)
enclosing_angle = np.abs(angle_between(alternativeA, rotvec))[..., None]
switch_vectors = (enclosing_angle < (np.pi / 4)) | (
abs(enclosing_angle - np.pi) < (np.pi / 4)
)
arbitrary_vector = np.where(switch_vectors, alternativeB, alternativeA)
vec_u = arbitrary_vector - vector_project(arbitrary_vector, rotvec)
vec_u /= np.linalg.norm(vec_u, axis=-1, keepdims=True)
basis2 = np.cross(vec_u, rotvec, axisa=-1, axisb=-1, axisc=-1)
basis2 /= np.linalg.norm(basis2, axis=-1, keepdims=True)
super().__init__(vec_u, basis2)
self.angle = angle
class EulerRotation(AffineCompound):
"""Rotation based on Euler angles in 3D.
Parameters
----------
sequence : str
Specifies sequence of axes for rotations. Up to 3 characters belonging
to the set {‘X’, ‘Y’, ‘Z’} for intrinsic rotations, or {‘x’, ‘y’, ‘z’}
for extrinsic rotations. Extrinsic and intrinsic rotations cannot be
mixed in one function call.
angles : ArrayLike
Euler angles specified in radians (degrees is False) or degrees (degrees
is True). Each value of angles corresponds to the respective angle
listed in ``sequence``.
degrees : bool
If True, angles are assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
"""
def __init__(
self, sequence: str, angles: ArrayLike, *, degrees: bool = False, axis: int = -1
) -> None:
angles = np.asarray(angles)
if angles.ndim == 0:
angles = angles[None, ...]
angles = np.moveaxis(angles, axis, 0)
rotations = list()
for idx, char in enumerate(sequence):
angle: np.ndarray = angles[idx, ...]
if char in ["x", "X"]:
rotvec = np.array((1, 0, 0), dtype=np.float_)
elif char in ["y", "Y"]:
rotvec = np.array((0, 1, 0), dtype=np.float_)
elif char in ["z", "Z"]:
rotvec = np.array((0, 0, 1), dtype=np.float_)
else:
raise ValueError("Unknown axis '{char}' in rotation sequence.")
rotvec = np.broadcast_to(rotvec, (*angle.shape, 3))
rotvec = np.moveaxis(rotvec, -1, axis)
rot = RotvecRotation(rotvec, angle=angle, degrees=degrees, axis=axis)
rotations.append(rot)
if sequence.islower():
super().__init__(rotations)
elif sequence.isupper():
rotations = [x for x in reversed(rotations)]
super().__init__(rotations)
else:
raise ValueError("Can not mix intrinsic and extrinsic rotations.")
class QuaternionRotation(RotvecRotation):
"""Rotation based on Quaternions in 3D.
Parameters
----------
quaternion : ArrayLike
A (possibly non-unit norm) quaternion in ``sequence`` format. It will be
normalized to unit norm.
sequence : str
Specifies the order of parameters in the quaternion. Possible values are
``"xyzw"`` (default), i.e., scalar-last, or "wxyz", i.e., scalar-first.
axis : int
The axis along which to to compute. Default: -1.
Notes
-----
The current implementation uses scipy's rotation class. As such you are
limited to a single batch dimension. If this is to little, please open an
issue.
"""
def __init__(
self, quaternion: ArrayLike, *, sequence: str = "xyzw", axis: int = -1
) -> None:
quaternion = np.asarray(quaternion)
if sequence == "xyzw":
pass
elif sequence == "wxyz":
quaternion = quaternion[[1, 2, 3, 0]]
else:
raise ValueError(
"Invalid value for sequence. Possible values are 'xyzw' or 'wxyz'."
)
rot = scipy_rotation.from_quat(quaternion)
rotvec = rot.as_rotvec()
angle = rot.magnitude()
super().__init__(rotvec, angle=angle, axis=axis)
class FrustumProjection(Link):
"""Frustum based intrinsic camera transformation.
This link computes the 2D camera/pixel position of a point in 3D (world)
space. The projection's center point is located in the origin and the camera
is pointing along the positive z-axis. The origin of the pixel frame is
located at the top left corner of the image with the y-axis pointing down
and the x-axis pointing right. Points along the z-axis are projected into
the center of the image (``image_shape/2``).
Parameters
----------
hfov : float
The angle of the viewing frustum in radians. It is assumed to be less than
pi (180°).
image_shape : ArrayLike
The shape (height, width) of the image plane in pixels.
See Also
--------
:class:`skbot.ignition.FrustumProjection`
Notes
-----
This function assumes that ``hfov`` is less than pi (180°).
Points outside the viewing frustum will still be projected. While most will
be mapped into points outside of ``image_shape``, points on the backside of
the camera may alias with points inside the image. In this case special care
must be taken.
"""
def __init__(self, hfov: float, image_shape: ArrayLike) -> None:
super().__init__(3, 2)
image_shape = np.asarray(image_shape)
aspect_ratio = image_shape[1] / image_shape[0]
f_x = 1 / (tan(hfov / 2))
f_y = aspect_ratio * f_x
amounts = np.array([[0, 0, f_y], [0, 0, f_x]])
directions = np.array([[0, 2 / image_shape[0], 0], [2 / image_shape[1], 0, 0]])
self.proj = PerspectiveProjection(directions, amounts, axis=-1)
self.tf = Translation(image_shape / 2)
def transform(self, x: ArrayLike) -> np.ndarray:
x_projected = self.proj.transform(x)
x_transformed = self.tf.transform(x_projected)
return x_transformed | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/utils3d.py | utils3d.py | from math import tan
import numpy as np
from numpy.typing import ArrayLike
from scipy.spatial.transform import Rotation as scipy_rotation
from .base import Link
from .projections import PerspectiveProjection
from .affine import AffineCompound, Translation, Rotation
from ._utils import angle_between, vector_project
class RotvecRotation(Rotation):
"""Rotation based on rotation vector in 3D.
Parameters
----------
rotvec : ArrayLike
The vector around which points are rotated.
angle : ArrayLike
The magnitude of the rotation. If None, the length of ``vector`` will be
used.
degrees : bool
If True, angle is assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
Notes
-----
Batch dimensions of ``rotvec`` and ``angle`` must be broadcastable.
"""
def __init__(
self,
rotvec: ArrayLike,
*,
angle: ArrayLike = None,
degrees: bool = False,
axis: int = -1
) -> None:
rotvec = np.asarray(rotvec)
rotvec = np.moveaxis(rotvec, axis, -1)
if angle is None:
angle = np.linalg.norm(rotvec, axis=axis, keepdims=True)
angle = np.moveaxis(angle, axis, -1)
else:
angle = np.asarray(angle)
if angle.ndim > 0:
angle = np.moveaxis(angle, axis, -1)[..., None]
if degrees: # make radians
angle = angle / 360 * 2 * np.pi
# arbitrary vector that isn't parallel to rotvec
alternativeA = np.zeros_like(rotvec)
alternativeA[..., :] = (1, 0, 0)
alternativeB = np.zeros_like(rotvec)
alternativeB[..., :] = (0, 1, 0)
enclosing_angle = np.abs(angle_between(alternativeA, rotvec))[..., None]
switch_vectors = (enclosing_angle < (np.pi / 4)) | (
abs(enclosing_angle - np.pi) < (np.pi / 4)
)
arbitrary_vector = np.where(switch_vectors, alternativeB, alternativeA)
vec_u = arbitrary_vector - vector_project(arbitrary_vector, rotvec)
vec_u /= np.linalg.norm(vec_u, axis=-1, keepdims=True)
basis2 = np.cross(vec_u, rotvec, axisa=-1, axisb=-1, axisc=-1)
basis2 /= np.linalg.norm(basis2, axis=-1, keepdims=True)
super().__init__(vec_u, basis2)
self.angle = angle
class EulerRotation(AffineCompound):
"""Rotation based on Euler angles in 3D.
Parameters
----------
sequence : str
Specifies sequence of axes for rotations. Up to 3 characters belonging
to the set {‘X’, ‘Y’, ‘Z’} for intrinsic rotations, or {‘x’, ‘y’, ‘z’}
for extrinsic rotations. Extrinsic and intrinsic rotations cannot be
mixed in one function call.
angles : ArrayLike
Euler angles specified in radians (degrees is False) or degrees (degrees
is True). Each value of angles corresponds to the respective angle
listed in ``sequence``.
degrees : bool
If True, angles are assumed to be in degrees. Default is False.
axis : int
The axis along which to to compute. Default: -1.
"""
def __init__(
self, sequence: str, angles: ArrayLike, *, degrees: bool = False, axis: int = -1
) -> None:
angles = np.asarray(angles)
if angles.ndim == 0:
angles = angles[None, ...]
angles = np.moveaxis(angles, axis, 0)
rotations = list()
for idx, char in enumerate(sequence):
angle: np.ndarray = angles[idx, ...]
if char in ["x", "X"]:
rotvec = np.array((1, 0, 0), dtype=np.float_)
elif char in ["y", "Y"]:
rotvec = np.array((0, 1, 0), dtype=np.float_)
elif char in ["z", "Z"]:
rotvec = np.array((0, 0, 1), dtype=np.float_)
else:
raise ValueError("Unknown axis '{char}' in rotation sequence.")
rotvec = np.broadcast_to(rotvec, (*angle.shape, 3))
rotvec = np.moveaxis(rotvec, -1, axis)
rot = RotvecRotation(rotvec, angle=angle, degrees=degrees, axis=axis)
rotations.append(rot)
if sequence.islower():
super().__init__(rotations)
elif sequence.isupper():
rotations = [x for x in reversed(rotations)]
super().__init__(rotations)
else:
raise ValueError("Can not mix intrinsic and extrinsic rotations.")
class QuaternionRotation(RotvecRotation):
"""Rotation based on Quaternions in 3D.
Parameters
----------
quaternion : ArrayLike
A (possibly non-unit norm) quaternion in ``sequence`` format. It will be
normalized to unit norm.
sequence : str
Specifies the order of parameters in the quaternion. Possible values are
``"xyzw"`` (default), i.e., scalar-last, or "wxyz", i.e., scalar-first.
axis : int
The axis along which to to compute. Default: -1.
Notes
-----
The current implementation uses scipy's rotation class. As such you are
limited to a single batch dimension. If this is to little, please open an
issue.
"""
def __init__(
self, quaternion: ArrayLike, *, sequence: str = "xyzw", axis: int = -1
) -> None:
quaternion = np.asarray(quaternion)
if sequence == "xyzw":
pass
elif sequence == "wxyz":
quaternion = quaternion[[1, 2, 3, 0]]
else:
raise ValueError(
"Invalid value for sequence. Possible values are 'xyzw' or 'wxyz'."
)
rot = scipy_rotation.from_quat(quaternion)
rotvec = rot.as_rotvec()
angle = rot.magnitude()
super().__init__(rotvec, angle=angle, axis=axis)
class FrustumProjection(Link):
"""Frustum based intrinsic camera transformation.
This link computes the 2D camera/pixel position of a point in 3D (world)
space. The projection's center point is located in the origin and the camera
is pointing along the positive z-axis. The origin of the pixel frame is
located at the top left corner of the image with the y-axis pointing down
and the x-axis pointing right. Points along the z-axis are projected into
the center of the image (``image_shape/2``).
Parameters
----------
hfov : float
The angle of the viewing frustum in radians. It is assumed to be less than
pi (180°).
image_shape : ArrayLike
The shape (height, width) of the image plane in pixels.
See Also
--------
:class:`skbot.ignition.FrustumProjection`
Notes
-----
This function assumes that ``hfov`` is less than pi (180°).
Points outside the viewing frustum will still be projected. While most will
be mapped into points outside of ``image_shape``, points on the backside of
the camera may alias with points inside the image. In this case special care
must be taken.
"""
def __init__(self, hfov: float, image_shape: ArrayLike) -> None:
super().__init__(3, 2)
image_shape = np.asarray(image_shape)
aspect_ratio = image_shape[1] / image_shape[0]
f_x = 1 / (tan(hfov / 2))
f_y = aspect_ratio * f_x
amounts = np.array([[0, 0, f_y], [0, 0, f_x]])
directions = np.array([[0, 2 / image_shape[0], 0], [2 / image_shape[1], 0, 0]])
self.proj = PerspectiveProjection(directions, amounts, axis=-1)
self.tf = Translation(image_shape / 2)
def transform(self, x: ArrayLike) -> np.ndarray:
x_projected = self.proj.transform(x)
x_transformed = self.tf.transform(x_projected)
return x_transformed | 0.950583 | 0.617282 |
from math import tan
from typing import Tuple
import numpy as np
from numpy.typing import ArrayLike
from .affine import AffineLink
from ._utils import scalar_project
class PerspectiveProjection(AffineLink):
"""Perspective projection in N-D.
This link projects a N dimensional frame onto an M dimensional frame. Using
the parent's origin as the center of projection. In its most common use
this corresponds to a central projection, e.g., the projection of
coordinates in a 3D world frame down to a 2D camera frame.
This link computes the projection using pairs of ``directions`` and
``amounts`` (both batches of vectors). To compute each coordinate of a
vector in the projected space the vector is first scalar projected onto the
amount (vector). This determines distance from the projection's center. Then
the vector is scalar projected onto the direction (vector) and the
result is scaled (anti-)proportional to the distance from the projection's
center.
Parameters
----------
directions : ArrayLike
A batch of (subspace-)vectors onto which points will be projected. The
vectors run along ``axis`` and the subspace runs along
``subspace_axis``. All other dimensions are considered batch dimensions.
Often this is a normal basis of the projection's subspace, e.g., the
the x and y axes of a camera's image plane expressed in the parent
frame.
amounts : ArrayLike
A batch of vectors indicating the direction along which to measure
distance from the projection center. Its shape must match
``directions.shape``. Often all amount vectors are pairwise linearly
dependent, e.g., they all point in the direction a camera is facing.
axis : int
The axis along which the projection is computed. It's length is equal to
the number of dimensions in the parent frame.
subspace_axis : int
The axis along which different directions and amounts are stacked. It's
length is equal to the number of dimensions in the child frame. Note
that this axis _must_ be present, even if vectors are projected down to
1D; in this case, the this axis has length 1.
Methods
-------
transform(x)
Expresses the vector x (assumed to be given in the parent's frame) in
the child's frame.
See Also
--------
:class:`skbot.transform.FrustumProjection`, :class:`skbot.ignition.FrustumProjection`
Notes
-----
The length of a single direction vector rescales this axis. For example, if you have
a camera with a certain number of pixels then the length of the direction vector would
reflect this.
The length of a single amount vector determines the scaling of distance. For example, if
you have a camera with a certain focal lengths (fx, fy) then the length of the amount vector
would reflect this.
"""
def __init__(
self,
directions: ArrayLike,
amounts: ArrayLike,
*,
axis: int = -1,
subspace_axis: int = -2
) -> None:
self.directions = np.asarray(directions)
self.amounts = np.asarray(amounts)
# make data axis the last axis (more efficient and easier to handle)
# also make subspace axis the second last axis
self.directions = np.moveaxis(self.directions, [subspace_axis, axis], [-2, -1])
self.amounts = np.moveaxis(self.amounts, [subspace_axis, axis], [-2, -1])
super().__init__(self.directions.shape[axis], self.directions.ndim, axis=axis)
def transform(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in parent frame) into the child frame.
Parameters
----------
x : ArrayLike
A batch of vectors expressed in the parent's frame. The parent frame runs
along ``axis`` specified in the constructor.
Returns
-------
y : ArrayLike
A batch of vectors expressed in the child's frame. The child frame runs
along ``axis`` specified in the constructor.
Notes
-----
This function requires the batch dimensions of ``x``, ``amounts``, and
``directions`` to be broadcastable. To make an example assume a
projection from N dimensions to M dimensions. In the trivial case
(single vector, single projection) there are no batch dimensions; shapes
are what you'd expect: ``x.shape=(N,)``, ``amounts.shape = (M, N)``,
``directions.shape=(M, N)``. In the case of a batch of vectors and a
single projection, batch dimensions must be broadcastable:
``x.shape=(batch, N)``, ``amounts.shape = (1, M, N)``,
``directions.shape=(1, M, N)``. In the case of a single single vector
and multiple projections the same rule applies: ``x.shape=(1, N)``,
``amounts.shape = (batch, M, N)``, ``directions.shape=(batch, M, N)``.
Other combinations are - of course - possible, too.
"""
x = np.asarray(x, dtype=np.float64)
x = np.moveaxis(x, self._axis, -1)
# make x broadcastable with amounts/directions
x = np.expand_dims(x, -2)
scaling = scalar_project(x, self.amounts, axis=-1)
scaling /= np.linalg.norm(self.amounts, axis=-1)
projected = scalar_project(x, self.directions, axis=-1)
projected /= scaling * np.linalg.norm(self.directions, axis=-1)
return projected
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in the child frame) into the parent frame.
Parameters
----------
x : ArrayLike
The vector expressed in the childs's frame
Returns
-------
y : ArrayLike
The vector expressed in the parents's frame
"""
raise NotImplementedError("A projection isn't invertable in general.") | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/transform/projections.py | projections.py | from math import tan
from typing import Tuple
import numpy as np
from numpy.typing import ArrayLike
from .affine import AffineLink
from ._utils import scalar_project
class PerspectiveProjection(AffineLink):
"""Perspective projection in N-D.
This link projects a N dimensional frame onto an M dimensional frame. Using
the parent's origin as the center of projection. In its most common use
this corresponds to a central projection, e.g., the projection of
coordinates in a 3D world frame down to a 2D camera frame.
This link computes the projection using pairs of ``directions`` and
``amounts`` (both batches of vectors). To compute each coordinate of a
vector in the projected space the vector is first scalar projected onto the
amount (vector). This determines distance from the projection's center. Then
the vector is scalar projected onto the direction (vector) and the
result is scaled (anti-)proportional to the distance from the projection's
center.
Parameters
----------
directions : ArrayLike
A batch of (subspace-)vectors onto which points will be projected. The
vectors run along ``axis`` and the subspace runs along
``subspace_axis``. All other dimensions are considered batch dimensions.
Often this is a normal basis of the projection's subspace, e.g., the
the x and y axes of a camera's image plane expressed in the parent
frame.
amounts : ArrayLike
A batch of vectors indicating the direction along which to measure
distance from the projection center. Its shape must match
``directions.shape``. Often all amount vectors are pairwise linearly
dependent, e.g., they all point in the direction a camera is facing.
axis : int
The axis along which the projection is computed. It's length is equal to
the number of dimensions in the parent frame.
subspace_axis : int
The axis along which different directions and amounts are stacked. It's
length is equal to the number of dimensions in the child frame. Note
that this axis _must_ be present, even if vectors are projected down to
1D; in this case, the this axis has length 1.
Methods
-------
transform(x)
Expresses the vector x (assumed to be given in the parent's frame) in
the child's frame.
See Also
--------
:class:`skbot.transform.FrustumProjection`, :class:`skbot.ignition.FrustumProjection`
Notes
-----
The length of a single direction vector rescales this axis. For example, if you have
a camera with a certain number of pixels then the length of the direction vector would
reflect this.
The length of a single amount vector determines the scaling of distance. For example, if
you have a camera with a certain focal lengths (fx, fy) then the length of the amount vector
would reflect this.
"""
def __init__(
self,
directions: ArrayLike,
amounts: ArrayLike,
*,
axis: int = -1,
subspace_axis: int = -2
) -> None:
self.directions = np.asarray(directions)
self.amounts = np.asarray(amounts)
# make data axis the last axis (more efficient and easier to handle)
# also make subspace axis the second last axis
self.directions = np.moveaxis(self.directions, [subspace_axis, axis], [-2, -1])
self.amounts = np.moveaxis(self.amounts, [subspace_axis, axis], [-2, -1])
super().__init__(self.directions.shape[axis], self.directions.ndim, axis=axis)
def transform(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in parent frame) into the child frame.
Parameters
----------
x : ArrayLike
A batch of vectors expressed in the parent's frame. The parent frame runs
along ``axis`` specified in the constructor.
Returns
-------
y : ArrayLike
A batch of vectors expressed in the child's frame. The child frame runs
along ``axis`` specified in the constructor.
Notes
-----
This function requires the batch dimensions of ``x``, ``amounts``, and
``directions`` to be broadcastable. To make an example assume a
projection from N dimensions to M dimensions. In the trivial case
(single vector, single projection) there are no batch dimensions; shapes
are what you'd expect: ``x.shape=(N,)``, ``amounts.shape = (M, N)``,
``directions.shape=(M, N)``. In the case of a batch of vectors and a
single projection, batch dimensions must be broadcastable:
``x.shape=(batch, N)``, ``amounts.shape = (1, M, N)``,
``directions.shape=(1, M, N)``. In the case of a single single vector
and multiple projections the same rule applies: ``x.shape=(1, N)``,
``amounts.shape = (batch, M, N)``, ``directions.shape=(batch, M, N)``.
Other combinations are - of course - possible, too.
"""
x = np.asarray(x, dtype=np.float64)
x = np.moveaxis(x, self._axis, -1)
# make x broadcastable with amounts/directions
x = np.expand_dims(x, -2)
scaling = scalar_project(x, self.amounts, axis=-1)
scaling /= np.linalg.norm(self.amounts, axis=-1)
projected = scalar_project(x, self.directions, axis=-1)
projected /= scaling * np.linalg.norm(self.directions, axis=-1)
return projected
def __inverse_transform__(self, x: ArrayLike) -> np.ndarray:
"""Transform x (given in the child frame) into the parent frame.
Parameters
----------
x : ArrayLike
The vector expressed in the childs's frame
Returns
-------
y : ArrayLike
The vector expressed in the parents's frame
"""
raise NotImplementedError("A projection isn't invertable in general.") | 0.960869 | 0.899608 |
from .. import transform as tf
from ..transform._utils import scalar_project, angle_between
from numpy.typing import ArrayLike
from typing import List, Callable
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.optimize import OptimizeResult
from .targets import Target, PositionTarget, RotationTarget
import warnings
def step_generic_joint(
joint: tf.Joint, target: Target, maxiter: int
) -> Callable[[], None]:
"""Find the optimal value for the current joint."""
def generic_objective(x: float, current_joint: tf.Joint) -> float:
current_joint.param = x
return target.score()
def inner() -> None:
if target.score() < target.atol:
return # nothing to do
result: OptimizeResult = minimize_scalar(
lambda x: generic_objective(x, joint),
bounds=(joint.lower_limit, joint.upper_limit),
method="bounded",
options={"maxiter": maxiter},
)
if not result.success:
raise RuntimeError(f"IK failed. Reason: {result.message}")
joint.param = result.x
return inner
def analytic_rotation(
joint: tf.RotationalJoint, target: PositionTarget
) -> Callable[[], None]:
"""Fast-path for rotation joints and position targets.
This computes the optimal joint value analytically instead of solving
a sub-optimization problem.
.. versionadded:: 0.10.0
"""
joint_idx = target._chain.index(joint)
basis1 = np.array((1, 0), dtype=float)
basis2 = np.array((0, 1), dtype=float)
eps = 1e-10
def inner() -> None:
if target.score() < target.atol:
return # nothing to do
target_point = target.dynamic_position
for link in reversed(target._chain[joint_idx:]):
target_point = link.__inverse_transform__(target_point)
target_projected = np.array(
[
scalar_project(target_point, joint._u),
scalar_project(target_point, joint._u_ortho),
]
)
current_position = target.static_position
for link in target._chain[:joint_idx]:
current_position = link.transform(current_position)
current_projected = np.array(
[
scalar_project(current_position, joint._u),
scalar_project(current_position, joint._u_ortho),
]
)
# skip adjustment if the desired position is in the joints null space
if np.linalg.norm(target_projected) < eps:
return
target_angle = angle_between(target_projected, basis1)
if angle_between(target_projected, basis2) > np.pi / 2:
target_angle = -target_angle
current_angle = angle_between(current_projected, basis1)
if angle_between(current_projected, basis2) > np.pi / 2:
current_angle = -current_angle
angle = target_angle - current_angle
# it is a bit odd that I have to use - angle here instead of using
# + angle. There may be a bug regarding left/right handedness somewhere
joint.param = np.clip(joint.param - angle, joint.lower_limit, joint.upper_limit)
return inner
def ccd(
targets: List[Target],
joints: List[tf.Joint] = None,
*args,
rtol: float = 1e-6,
maxiter: int = 500,
line_search_maxiter: int = 500,
weights: List[float] = None,
tol: float = None,
cycle_links: List[tf.Joint] = None,
pointA: ArrayLike = None,
pointB: ArrayLike = None,
frameA: tf.Frame = None,
frameB: tf.Frame = None,
metric: Callable[[np.ndarray, np.ndarray], float] = None,
) -> List[np.ndarray]:
"""Cyclic Coordinate Descent.
.. note::
This function will modify the objects in ``joints`` as a side effect.
This function cycles through ``targets`` and ``joints``. For each pair it -
one joint at a time - chooses a value for the joint that minimizes the score
of the target. If all targets are reached, this function returns the the
corresponding joint parameters; otherwise an exception is raised.
.. versionchanged:: 0.10.0
CCD has a new signature and now makes use of Targets.
.. versionchanged:: 0.10.0
CCD can now jointly optimize for multiple targets.
.. versionadded:: 0.7.0
Parameters
----------
targets : List[Target]
A list of quality measures that a successful pose minimizes.
joints : List[joint]
A list of 1DoF joints which should be adjusted to minimize ``targets``.
rtol : float
Relative tolerance for termination. If, after one full cycle, none
of the targets have improved by more than rtol the algorithm terminates
and assumes that a local optimum has been found.
maxiter : int
The maximum number of times to cycle over target+joint pairs.
line_search_maxiter : int
If no fast-path is implemented for a joint+target pair then CCD solves a
1D sub-optimization problem for the pair instead. This parameter limits
the total number of iterations for this sub-optimization.
weights : List[float]
.. deprecated:: 0.10.0
Targets are optimized cyclical instead of optimizing a weighted sum.
This parameter has no effect.
cycle_links : List[tf.Joint]
.. deprecated:: 0.10.0
Use ``joints`` instead.
A list of 1DoF joints which should be adjusted to minimize targets.
tol : float
.. deprecated:: 0.10.0
Specify ``atol`` on the desired target instead.
Absolute tolerance for termination.
pointA : ArrayLike
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
A list of points. The i-th pointA is represented in the i-th frame of
frameA. If only one point is given, the list can be omitted and the point
can be directly used as input.
pointB : ArrayLike
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The desired positions of each point given in pointA. The i-th pointB is
represented in the i-th frame of frameB. If only one point is given, the
list can be omitted and the point can be directly used as input.
frameA : tf.Frame
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The frame in which the points in pointA are represented. The i-th
element corresponds to the i-th pointA. If only one point is given, the
list can be omitted and the frame can be directly used as input.
frameB : tf.Frame
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The frame in which the points in pointB are represented. The i-th
element corresponds to the i-th pointB. If only one point is given, the
list can be omitted and the frame can be directly used as input.
metric : Callable
.. deprecated:: 0.10.0
Specify ``norm`` in a :class:`PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
A function that takes two points (expressed in the corresponding frameB)
and that computs the distance between them. Its signature is
``metric(transformed_point, pointB) -> distance``. If None, the
euclidian distance will be used.
Returns
-------
joint_values : List[float]
The final parameters of each joint.
Notes
-----
Joint limits (min/max) are enforced as hard constraints.
The current implementation is a naive python implementation and not very
optimized. PRs improving performance are welcome :)
References
----------
.. [kenwright2012] Kenwright, Ben. "Inverse kinematics-cyclic coordinate descent (CCD)."
Journal of Graphics Tools 16.4 (2012): 177-217.
"""
if len(args) > 0:
if len(args) != 3:
raise TypeError(
f"ccd() takes 2 positional arguments, but {2+len(args)} were given."
)
warnings.warn(
"The signature `ccd(pointA, pointB, frameA, frameB, cycle_links)`"
" is depreciated and will be removed in scikit-bot v1.0."
" Use `targets` combined with a `ik.PositionTarget` instead.",
DeprecationWarning,
)
target = PositionTarget(targets, joints, args[0], args[1])
targets = [target]
joints = args[2]
elif frameA is not None:
warnings.warn(
"The use of `pointA`, `pointB`, `frameA`, and `frameB` is deprecated"
" and will be removed in scikit-bot v1.0."
" Use `targets` combined with a `ik.PositionTarget` instead.",
DeprecationWarning,
)
target = PositionTarget(
static_position=np.asarray(pointA),
dynamic_position=np.asarray(pointB),
static_frame=frameA,
dynamic_frame=frameB,
)
targets.append(target)
if cycle_links is not None:
warnings.warn(
"The use of `cycle_links` is depreciated"
" and will be removed in scikit-bot v1.0."
" Use `joints` instead.",
DeprecationWarning,
)
joints = cycle_links
for target in targets:
target._chain = tf.simplify_links(target._chain, keep_links=joints)
joint_values = [l.param for l in joints]
if tol is not None:
warnings.warn(
"The use of `tol` is depreciated"
" and will be removed in scikit-bot v1.0."
" Specify `atol` on the respective target instead.",
DeprecationWarning,
)
for target in targets:
target.atol = tol
if weights is None:
weights = [1 / len(targets)] * len(targets)
weights = np.asarray(weights)
step_fn = list()
for target in targets:
for joint in joints:
stepper = None
if (
isinstance(target, PositionTarget)
and isinstance(joint, tf.RotationalJoint)
and target.static_frame.ndim == target.dynamic_frame.ndim
and target.static_frame.ndim == 3
and target.usage_count(joint) == 1
):
stepper = analytic_rotation(joint, target)
if stepper is None:
stepper = step_generic_joint(joint, target, line_search_maxiter)
step_fn.append(stepper)
old_scores = np.array([float("inf")] * len(targets))
atols = np.array([x.atol for x in targets])
for step in range(maxiter * len(targets) * len(joints)):
joint_idx = step % len(joints)
residual = step % (len(joints) * len(targets))
target_idx = residual // len(joints)
iteration = step // (len(joints) * len(targets))
if target_idx == 0 and joint_idx == 0:
scores = np.array([x.score() for x in targets])
if np.all(scores < atols):
break
if not any(old_scores - scores > rtol):
raise RuntimeError(
"IK failed. Reason:"
" Loss in the local minimum is greater than `atol`."
)
old_scores = scores
step_fn[len(joints) * target_idx + joint_idx]()
else:
raise RuntimeError(f"IK failed: maxiter exceeded.")
for idx in range(len(joints)):
joint_values[idx] = joints[idx].param
return joint_values | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/inverse_kinematics/cyclic_coordinate_descent.py | cyclic_coordinate_descent.py | from .. import transform as tf
from ..transform._utils import scalar_project, angle_between
from numpy.typing import ArrayLike
from typing import List, Callable
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.optimize import OptimizeResult
from .targets import Target, PositionTarget, RotationTarget
import warnings
def step_generic_joint(
joint: tf.Joint, target: Target, maxiter: int
) -> Callable[[], None]:
"""Find the optimal value for the current joint."""
def generic_objective(x: float, current_joint: tf.Joint) -> float:
current_joint.param = x
return target.score()
def inner() -> None:
if target.score() < target.atol:
return # nothing to do
result: OptimizeResult = minimize_scalar(
lambda x: generic_objective(x, joint),
bounds=(joint.lower_limit, joint.upper_limit),
method="bounded",
options={"maxiter": maxiter},
)
if not result.success:
raise RuntimeError(f"IK failed. Reason: {result.message}")
joint.param = result.x
return inner
def analytic_rotation(
joint: tf.RotationalJoint, target: PositionTarget
) -> Callable[[], None]:
"""Fast-path for rotation joints and position targets.
This computes the optimal joint value analytically instead of solving
a sub-optimization problem.
.. versionadded:: 0.10.0
"""
joint_idx = target._chain.index(joint)
basis1 = np.array((1, 0), dtype=float)
basis2 = np.array((0, 1), dtype=float)
eps = 1e-10
def inner() -> None:
if target.score() < target.atol:
return # nothing to do
target_point = target.dynamic_position
for link in reversed(target._chain[joint_idx:]):
target_point = link.__inverse_transform__(target_point)
target_projected = np.array(
[
scalar_project(target_point, joint._u),
scalar_project(target_point, joint._u_ortho),
]
)
current_position = target.static_position
for link in target._chain[:joint_idx]:
current_position = link.transform(current_position)
current_projected = np.array(
[
scalar_project(current_position, joint._u),
scalar_project(current_position, joint._u_ortho),
]
)
# skip adjustment if the desired position is in the joints null space
if np.linalg.norm(target_projected) < eps:
return
target_angle = angle_between(target_projected, basis1)
if angle_between(target_projected, basis2) > np.pi / 2:
target_angle = -target_angle
current_angle = angle_between(current_projected, basis1)
if angle_between(current_projected, basis2) > np.pi / 2:
current_angle = -current_angle
angle = target_angle - current_angle
# it is a bit odd that I have to use - angle here instead of using
# + angle. There may be a bug regarding left/right handedness somewhere
joint.param = np.clip(joint.param - angle, joint.lower_limit, joint.upper_limit)
return inner
def ccd(
targets: List[Target],
joints: List[tf.Joint] = None,
*args,
rtol: float = 1e-6,
maxiter: int = 500,
line_search_maxiter: int = 500,
weights: List[float] = None,
tol: float = None,
cycle_links: List[tf.Joint] = None,
pointA: ArrayLike = None,
pointB: ArrayLike = None,
frameA: tf.Frame = None,
frameB: tf.Frame = None,
metric: Callable[[np.ndarray, np.ndarray], float] = None,
) -> List[np.ndarray]:
"""Cyclic Coordinate Descent.
.. note::
This function will modify the objects in ``joints`` as a side effect.
This function cycles through ``targets`` and ``joints``. For each pair it -
one joint at a time - chooses a value for the joint that minimizes the score
of the target. If all targets are reached, this function returns the the
corresponding joint parameters; otherwise an exception is raised.
.. versionchanged:: 0.10.0
CCD has a new signature and now makes use of Targets.
.. versionchanged:: 0.10.0
CCD can now jointly optimize for multiple targets.
.. versionadded:: 0.7.0
Parameters
----------
targets : List[Target]
A list of quality measures that a successful pose minimizes.
joints : List[joint]
A list of 1DoF joints which should be adjusted to minimize ``targets``.
rtol : float
Relative tolerance for termination. If, after one full cycle, none
of the targets have improved by more than rtol the algorithm terminates
and assumes that a local optimum has been found.
maxiter : int
The maximum number of times to cycle over target+joint pairs.
line_search_maxiter : int
If no fast-path is implemented for a joint+target pair then CCD solves a
1D sub-optimization problem for the pair instead. This parameter limits
the total number of iterations for this sub-optimization.
weights : List[float]
.. deprecated:: 0.10.0
Targets are optimized cyclical instead of optimizing a weighted sum.
This parameter has no effect.
cycle_links : List[tf.Joint]
.. deprecated:: 0.10.0
Use ``joints`` instead.
A list of 1DoF joints which should be adjusted to minimize targets.
tol : float
.. deprecated:: 0.10.0
Specify ``atol`` on the desired target instead.
Absolute tolerance for termination.
pointA : ArrayLike
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
A list of points. The i-th pointA is represented in the i-th frame of
frameA. If only one point is given, the list can be omitted and the point
can be directly used as input.
pointB : ArrayLike
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The desired positions of each point given in pointA. The i-th pointB is
represented in the i-th frame of frameB. If only one point is given, the
list can be omitted and the point can be directly used as input.
frameA : tf.Frame
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The frame in which the points in pointA are represented. The i-th
element corresponds to the i-th pointA. If only one point is given, the
list can be omitted and the frame can be directly used as input.
frameB : tf.Frame
.. deprecated:: 0.10.0
Use ``targets`` and a :class:`ik.PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
The frame in which the points in pointB are represented. The i-th
element corresponds to the i-th pointB. If only one point is given, the
list can be omitted and the frame can be directly used as input.
metric : Callable
.. deprecated:: 0.10.0
Specify ``norm`` in a :class:`PositionTarget
<skbot.inverse_kinematics.PositionTarget>` instead.
A function that takes two points (expressed in the corresponding frameB)
and that computs the distance between them. Its signature is
``metric(transformed_point, pointB) -> distance``. If None, the
euclidian distance will be used.
Returns
-------
joint_values : List[float]
The final parameters of each joint.
Notes
-----
Joint limits (min/max) are enforced as hard constraints.
The current implementation is a naive python implementation and not very
optimized. PRs improving performance are welcome :)
References
----------
.. [kenwright2012] Kenwright, Ben. "Inverse kinematics-cyclic coordinate descent (CCD)."
Journal of Graphics Tools 16.4 (2012): 177-217.
"""
if len(args) > 0:
if len(args) != 3:
raise TypeError(
f"ccd() takes 2 positional arguments, but {2+len(args)} were given."
)
warnings.warn(
"The signature `ccd(pointA, pointB, frameA, frameB, cycle_links)`"
" is depreciated and will be removed in scikit-bot v1.0."
" Use `targets` combined with a `ik.PositionTarget` instead.",
DeprecationWarning,
)
target = PositionTarget(targets, joints, args[0], args[1])
targets = [target]
joints = args[2]
elif frameA is not None:
warnings.warn(
"The use of `pointA`, `pointB`, `frameA`, and `frameB` is deprecated"
" and will be removed in scikit-bot v1.0."
" Use `targets` combined with a `ik.PositionTarget` instead.",
DeprecationWarning,
)
target = PositionTarget(
static_position=np.asarray(pointA),
dynamic_position=np.asarray(pointB),
static_frame=frameA,
dynamic_frame=frameB,
)
targets.append(target)
if cycle_links is not None:
warnings.warn(
"The use of `cycle_links` is depreciated"
" and will be removed in scikit-bot v1.0."
" Use `joints` instead.",
DeprecationWarning,
)
joints = cycle_links
for target in targets:
target._chain = tf.simplify_links(target._chain, keep_links=joints)
joint_values = [l.param for l in joints]
if tol is not None:
warnings.warn(
"The use of `tol` is depreciated"
" and will be removed in scikit-bot v1.0."
" Specify `atol` on the respective target instead.",
DeprecationWarning,
)
for target in targets:
target.atol = tol
if weights is None:
weights = [1 / len(targets)] * len(targets)
weights = np.asarray(weights)
step_fn = list()
for target in targets:
for joint in joints:
stepper = None
if (
isinstance(target, PositionTarget)
and isinstance(joint, tf.RotationalJoint)
and target.static_frame.ndim == target.dynamic_frame.ndim
and target.static_frame.ndim == 3
and target.usage_count(joint) == 1
):
stepper = analytic_rotation(joint, target)
if stepper is None:
stepper = step_generic_joint(joint, target, line_search_maxiter)
step_fn.append(stepper)
old_scores = np.array([float("inf")] * len(targets))
atols = np.array([x.atol for x in targets])
for step in range(maxiter * len(targets) * len(joints)):
joint_idx = step % len(joints)
residual = step % (len(joints) * len(targets))
target_idx = residual // len(joints)
iteration = step // (len(joints) * len(targets))
if target_idx == 0 and joint_idx == 0:
scores = np.array([x.score() for x in targets])
if np.all(scores < atols):
break
if not any(old_scores - scores > rtol):
raise RuntimeError(
"IK failed. Reason:"
" Loss in the local minimum is greater than `atol`."
)
old_scores = scores
step_fn[len(joints) * target_idx + joint_idx]()
else:
raise RuntimeError(f"IK failed: maxiter exceeded.")
for idx in range(len(joints)):
joint_values[idx] = joints[idx].param
return joint_values | 0.953891 | 0.581184 |
from numpy.typing import ArrayLike
from typing import Callable, List, Union
import numpy as np
from .. import transform as tf
class Target:
"""Abstract IK target.
.. versionadded:: 0.10.0
Parameters
----------
static_frame : tf.Frame
The frame in which the objective is constant.
dynamic_frame : tf.Frame
The frame in which the score is computed.
atol : float
The absolute tolerance for the score. If score is below this value
the target is considered to be reached.
"""
def __init__(
self, static_frame: tf.Frame, dynamic_frame: tf.Frame, *, atol: float = 1e-3
) -> None:
self.static_frame = static_frame
self.dynamic_frame = dynamic_frame
self._chain = self.static_frame.links_between(self.dynamic_frame)
self.atol = atol
def score(self):
"""The score of this target."""
raise NotImplementedError
def usage_count(self, joint: tf.Link) -> int:
"""Frequency of joint use in this target.
This function counts the number of times that ``joint`` is used when
computing the score for this target.
Parameters
----------
joint : tf.Link
The link that has its frequency evaluated.
Returns
-------
frequency : int
The number of occurences of the link.
"""
occurences = 0
for link in self._chain:
if link is joint:
occurences += 1
elif isinstance(link, tf.InvertLink) and link._forward_link is joint:
occurences += 1
return occurences
def uses(self, joint: tf.Link) -> bool:
"""Check if target uses a joint.
Parameters
----------
joint : tf.Link
The link to check.
Returns
-------
is_used : bool
True if joint is used when evaluating this target's score. False
otherwise.
"""
for link in self._chain:
if link is joint:
return True
elif isinstance(link, tf.InvertLink) and link._forward_link is joint:
return True
return False
class PositionTarget(Target):
"""IK position target (nD).
This target can be used to find an IK solution that positions a point
(``static_position``) expressed in ``static_frame`` at a desired target
position (``dynamic_position``) expressed in ``dynamic_frame``. To compute
the current score, this target transforms ``static_position`` from
``static_frame`` into ``dynamic_frame`` and then measures the distance
between the transformed point and ``dynamic_positon`` under the desired norm
(default: L2).
.. versionadded:: 0.10.0
Parameters
----------
static_position : ArrayLike
The value of a position that moves in ``dynamic_frame`` expressed in
``static_frame``.
dynamic_positon : ArrayLike
The value of the target position expressed in ``dynamic_frame``.
static_frame : tf.Frame
The frame in which the moving position is expressed.
dynamic_frame : tf.Frame
The frame in which the target position is expressed.
norm : Callable
A function of the form ``norm(ArrayLike) -> float`` that computes the
norm of the distance between ``target_position`` and the transformed
``static_position`` in ``dynamic_frame``. If None defaults to L2.
"""
def __init__(
self,
static_position: ArrayLike,
dynamic_position: ArrayLike,
static_frame: tf.Frame,
dynamic_frame: tf.Frame,
norm: Callable[[np.ndarray], float] = None,
*,
atol: float = 1e-3,
) -> None:
super().__init__(static_frame, dynamic_frame, atol=atol)
self.static_position = np.asarray(static_position)
self.dynamic_position = np.asarray(dynamic_position)
if norm is None:
self.norm = np.linalg.norm
else:
self.norm = norm
def score(self):
current_pos = self.static_position
for link in self._chain:
current_pos = link.transform(current_pos)
return self.norm(self.dynamic_position - current_pos)
class RotationTarget(Target):
"""IK rotation target (2D/3D).
This target can be used to find an IK solution such that
``dynamic_frame`` has rotation ``desired_rotation`` when the
rotation is expressed relative to ``static_frame``. The score
function computes the distance in radians between the current
rotation and desired rotation.
.. versionadded:: 0.10.0
Parameters
----------
desired_rotation : Union[tf.Link, List[tf.Link]]
A link or list of links that expresses the rotation of
``dynamic_frame`` relative to ``static_frame``.
static_frame : tf.Frame
The frame in which the rotation is expressed.
dynamic_frame : tf.Frame
The frame that should be rotated by
``desired_rotation`` relative to ``static_frame``.
"""
def __init__(
self,
desired_rotation: Union[tf.Link, List[tf.Link]],
static_frame: tf.Frame,
dynamic_frame: tf.Frame,
*,
atol: float = 1e-3,
) -> None:
parent_dim = static_frame.ndim
child_dim = dynamic_frame.ndim
if parent_dim != child_dim:
raise NotImplementedError("Projected Targets are not supported yet.")
if parent_dim not in [2, 3]:
raise NotImplementedError("Only 2D and 3D is currently supported.")
super().__init__(static_frame, dynamic_frame, atol=atol)
if isinstance(desired_rotation, tf.Link):
self.desired_rotation = [desired_rotation]
else:
self.desired_rotation = desired_rotation
self.desired_rotation = tf.simplify_links(self.desired_rotation)
self.desired_rotation = [
x for x in self.desired_rotation if not isinstance(x, tf.Translation)
]
def score(self):
basis = np.eye(self.static_frame.ndim)
desired_basis = basis
for link in self.desired_rotation:
desired_basis = link.transform(desired_basis)
reduced = tf.simplify_links(self._chain)
reduced = [x for x in reduced if not isinstance(x, tf.Translation)]
actual_basis = basis
for link in reduced:
actual_basis = link.transform(actual_basis)
trace = np.trace(desired_basis @ actual_basis.T)
if self.static_frame.ndim == 3:
value = np.clip((trace - 1) / 2, -1, 1)
theta = np.arccos(value)
elif self.static_frame.ndim == 2:
value = np.clip(trace / 2, -1, 1)
theta = np.arccos(value)
else:
raise NotImplementedError("Only 2D and 3D is currently supported.")
return theta | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/inverse_kinematics/targets.py | targets.py | from numpy.typing import ArrayLike
from typing import Callable, List, Union
import numpy as np
from .. import transform as tf
class Target:
"""Abstract IK target.
.. versionadded:: 0.10.0
Parameters
----------
static_frame : tf.Frame
The frame in which the objective is constant.
dynamic_frame : tf.Frame
The frame in which the score is computed.
atol : float
The absolute tolerance for the score. If score is below this value
the target is considered to be reached.
"""
def __init__(
self, static_frame: tf.Frame, dynamic_frame: tf.Frame, *, atol: float = 1e-3
) -> None:
self.static_frame = static_frame
self.dynamic_frame = dynamic_frame
self._chain = self.static_frame.links_between(self.dynamic_frame)
self.atol = atol
def score(self):
"""The score of this target."""
raise NotImplementedError
def usage_count(self, joint: tf.Link) -> int:
"""Frequency of joint use in this target.
This function counts the number of times that ``joint`` is used when
computing the score for this target.
Parameters
----------
joint : tf.Link
The link that has its frequency evaluated.
Returns
-------
frequency : int
The number of occurences of the link.
"""
occurences = 0
for link in self._chain:
if link is joint:
occurences += 1
elif isinstance(link, tf.InvertLink) and link._forward_link is joint:
occurences += 1
return occurences
def uses(self, joint: tf.Link) -> bool:
"""Check if target uses a joint.
Parameters
----------
joint : tf.Link
The link to check.
Returns
-------
is_used : bool
True if joint is used when evaluating this target's score. False
otherwise.
"""
for link in self._chain:
if link is joint:
return True
elif isinstance(link, tf.InvertLink) and link._forward_link is joint:
return True
return False
class PositionTarget(Target):
"""IK position target (nD).
This target can be used to find an IK solution that positions a point
(``static_position``) expressed in ``static_frame`` at a desired target
position (``dynamic_position``) expressed in ``dynamic_frame``. To compute
the current score, this target transforms ``static_position`` from
``static_frame`` into ``dynamic_frame`` and then measures the distance
between the transformed point and ``dynamic_positon`` under the desired norm
(default: L2).
.. versionadded:: 0.10.0
Parameters
----------
static_position : ArrayLike
The value of a position that moves in ``dynamic_frame`` expressed in
``static_frame``.
dynamic_positon : ArrayLike
The value of the target position expressed in ``dynamic_frame``.
static_frame : tf.Frame
The frame in which the moving position is expressed.
dynamic_frame : tf.Frame
The frame in which the target position is expressed.
norm : Callable
A function of the form ``norm(ArrayLike) -> float`` that computes the
norm of the distance between ``target_position`` and the transformed
``static_position`` in ``dynamic_frame``. If None defaults to L2.
"""
def __init__(
self,
static_position: ArrayLike,
dynamic_position: ArrayLike,
static_frame: tf.Frame,
dynamic_frame: tf.Frame,
norm: Callable[[np.ndarray], float] = None,
*,
atol: float = 1e-3,
) -> None:
super().__init__(static_frame, dynamic_frame, atol=atol)
self.static_position = np.asarray(static_position)
self.dynamic_position = np.asarray(dynamic_position)
if norm is None:
self.norm = np.linalg.norm
else:
self.norm = norm
def score(self):
current_pos = self.static_position
for link in self._chain:
current_pos = link.transform(current_pos)
return self.norm(self.dynamic_position - current_pos)
class RotationTarget(Target):
"""IK rotation target (2D/3D).
This target can be used to find an IK solution such that
``dynamic_frame`` has rotation ``desired_rotation`` when the
rotation is expressed relative to ``static_frame``. The score
function computes the distance in radians between the current
rotation and desired rotation.
.. versionadded:: 0.10.0
Parameters
----------
desired_rotation : Union[tf.Link, List[tf.Link]]
A link or list of links that expresses the rotation of
``dynamic_frame`` relative to ``static_frame``.
static_frame : tf.Frame
The frame in which the rotation is expressed.
dynamic_frame : tf.Frame
The frame that should be rotated by
``desired_rotation`` relative to ``static_frame``.
"""
def __init__(
self,
desired_rotation: Union[tf.Link, List[tf.Link]],
static_frame: tf.Frame,
dynamic_frame: tf.Frame,
*,
atol: float = 1e-3,
) -> None:
parent_dim = static_frame.ndim
child_dim = dynamic_frame.ndim
if parent_dim != child_dim:
raise NotImplementedError("Projected Targets are not supported yet.")
if parent_dim not in [2, 3]:
raise NotImplementedError("Only 2D and 3D is currently supported.")
super().__init__(static_frame, dynamic_frame, atol=atol)
if isinstance(desired_rotation, tf.Link):
self.desired_rotation = [desired_rotation]
else:
self.desired_rotation = desired_rotation
self.desired_rotation = tf.simplify_links(self.desired_rotation)
self.desired_rotation = [
x for x in self.desired_rotation if not isinstance(x, tf.Translation)
]
def score(self):
basis = np.eye(self.static_frame.ndim)
desired_basis = basis
for link in self.desired_rotation:
desired_basis = link.transform(desired_basis)
reduced = tf.simplify_links(self._chain)
reduced = [x for x in reduced if not isinstance(x, tf.Translation)]
actual_basis = basis
for link in reduced:
actual_basis = link.transform(actual_basis)
trace = np.trace(desired_basis @ actual_basis.T)
if self.static_frame.ndim == 3:
value = np.clip((trace - 1) / 2, -1, 1)
theta = np.arccos(value)
elif self.static_frame.ndim == 2:
value = np.clip(trace / 2, -1, 1)
theta = np.arccos(value)
else:
raise NotImplementedError("Only 2D and 3D is currently supported.")
return theta | 0.96893 | 0.65846 |
import numpy as np
from scipy.interpolate import splprep, splev
from numpy.typing import ArrayLike
from typing import Optional
def spline_trajectory(
t: ArrayLike,
control_points: ArrayLike,
*,
t_control: Optional[ArrayLike] = None,
degree: int = 3,
t_min: float = 0,
t_max: float = 1,
derivative: int = 0,
) -> np.ndarray:
"""Evaluate the trajectory given by control_points at t using B-spline
interpolation.
``spline_trajectory`` constructs a ``degree``-times differentiable
trajectory using the given control points and then evaluates the resulting
trajectory at ``t``. It does so using B-splines. By default, control points
are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``
results in ``control_points[0]`` and ``t=t_max`` results in
``control_poins[-1]``. Alternatively, the spacing of control points can be
set manually by specifying ``t_control``, which implicitly specifies
``t_min`` and ``t_max``.
Parameters
----------
t : np.ndarray
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : np.ndarray
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : np.ndarray, None
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
degree : int
The degree of the spline; uneven numbers are preferred. The resulting
spline is k times continously differentiable.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``. If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
derivative : int
The derivative of the interpolated trajectory to compute. For example,
``derivative=2`` differentiates the trajectory twice with respect to
``t`` and then evaluates the derivative at the given ``t``.
Returns
-------
position : np.ndarray
The value of the trajectory at ``t``.
Notes
-----
The dimension of the space embedding the trajectory must be less than 12,
i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,
please open an issue; a workaround is to split the trajectory into chunks
of less than 11 dimensions each.
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import spline_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show()
"""
t = np.asarray(t)
control_points = np.asarray(control_points)
if control_points.ndim == 1:
control_points = control_points[:, None]
if t_control is None:
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[-1]
tck, u = splprep(control_points.T, u=t_control, s=0, ub=t_min, ue=t_max, k=degree)
return np.stack(splev(t, tck, der=derivative, ext=2), axis=-1) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/trajectory/spline.py | spline.py | import numpy as np
from scipy.interpolate import splprep, splev
from numpy.typing import ArrayLike
from typing import Optional
def spline_trajectory(
t: ArrayLike,
control_points: ArrayLike,
*,
t_control: Optional[ArrayLike] = None,
degree: int = 3,
t_min: float = 0,
t_max: float = 1,
derivative: int = 0,
) -> np.ndarray:
"""Evaluate the trajectory given by control_points at t using B-spline
interpolation.
``spline_trajectory`` constructs a ``degree``-times differentiable
trajectory using the given control points and then evaluates the resulting
trajectory at ``t``. It does so using B-splines. By default, control points
are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``
results in ``control_points[0]`` and ``t=t_max`` results in
``control_poins[-1]``. Alternatively, the spacing of control points can be
set manually by specifying ``t_control``, which implicitly specifies
``t_min`` and ``t_max``.
Parameters
----------
t : np.ndarray
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : np.ndarray
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : np.ndarray, None
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
degree : int
The degree of the spline; uneven numbers are preferred. The resulting
spline is k times continously differentiable.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``. If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
derivative : int
The derivative of the interpolated trajectory to compute. For example,
``derivative=2`` differentiates the trajectory twice with respect to
``t`` and then evaluates the derivative at the given ``t``.
Returns
-------
position : np.ndarray
The value of the trajectory at ``t``.
Notes
-----
The dimension of the space embedding the trajectory must be less than 12,
i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,
please open an issue; a workaround is to split the trajectory into chunks
of less than 11 dimensions each.
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import spline_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show()
"""
t = np.asarray(t)
control_points = np.asarray(control_points)
if control_points.ndim == 1:
control_points = control_points[:, None]
if t_control is None:
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[-1]
tck, u = splprep(control_points.T, u=t_control, s=0, ub=t_min, ue=t_max, k=degree)
return np.stack(splev(t, tck, der=derivative, ext=2), axis=-1) | 0.944408 | 0.757817 |
import numpy as np
from numpy.typing import ArrayLike
def integral(control_points: ArrayLike, t: ArrayLike, *, axis: int = 0) -> float:
"""Estimate the integral along a curve.
Estimates the integral of a time-parameterized curve along the chosen axis.
The curve is given by a sequence of control points and their respective
times. These times need not be spaced out uniformly.
Parameters
----------
control_points : ArrayLike
The values of the curve at time points t.
t : ArrayLike
The time points at which the curve was evaluated.
axis : int
The axis along which to integrate. All other axes are
treated as batch dimensions.
Returns
-------
estimate : float
The estimate of the integral under the curve.
Notes
-----
The shapes of control_points and t must match or be broadcastable.
"""
# This implementation uses the trapezoidal rule
control_points = np.asarray(control_points)
t = np.asarray(t)
f_range = np.arange(control_points.shape[axis] - 1)
f_lower = np.take(control_points, f_range, axis=axis)
f_upper = np.take(control_points, f_range + 1, axis=axis)
f_k = (f_lower + f_upper) / 2
t_range = np.arange(t.shape[axis] - 1)
t_lower = np.take(t, t_range, axis=axis)
t_upper = np.take(t, t_range + 1, axis=axis)
delta_t = t_upper - t_lower
return np.sum(f_k * delta_t, axis=axis)
def cumulative_integral(
control_points: ArrayLike, t: ArrayLike, *, axis: int = 0
) -> np.ndarray:
"""Estimate the cumulative integral along a curve.
Estimates the cumulative integral of a time-parameterized curve along the
chosen axis. The curve is given by a sequence of control points and their
respective times. These times need not be spaced out uniformly.
Parameters
----------
control_points : ArrayLike
The values of the curve at time points t.
t : ArrayLike
The time points at which the curve was evaluated.
axis : int
The axis along which to integrate. All other axes are
treated as batch dimensions.
Returns
-------
estimate : float
The estimate of the integral under the curve.
Notes
-----
The shapes of control_points and t must match or be broadcastable.
"""
# This implementation uses the trapezoidal rule
control_points = np.asarray(control_points)
t = np.asarray(t)
f_k = np.zeros_like(control_points)
f_range = np.arange(control_points.shape[axis] - 1)
f_lower = np.take(control_points, f_range, axis=axis)
f_upper = np.take(control_points, f_range + 1, axis=axis)
f_k = np.zeros_like(control_points)
f_k = (f_lower + f_upper) / 2
f_k = np.insert(f_k, 0, 0, axis=axis)
t_range = np.arange(t.shape[axis] - 1)
t_lower = np.take(t, t_range, axis=axis)
t_upper = np.take(t, t_range + 1, axis=axis)
delta_t = t_upper - t_lower
delta_t = np.insert(delta_t, 0, 0, axis=axis)
return np.cumsum(f_k * delta_t, axis=axis)
__all__ = ["integral", "cumulative_integral"] | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/trajectory/utils.py | utils.py | import numpy as np
from numpy.typing import ArrayLike
def integral(control_points: ArrayLike, t: ArrayLike, *, axis: int = 0) -> float:
"""Estimate the integral along a curve.
Estimates the integral of a time-parameterized curve along the chosen axis.
The curve is given by a sequence of control points and their respective
times. These times need not be spaced out uniformly.
Parameters
----------
control_points : ArrayLike
The values of the curve at time points t.
t : ArrayLike
The time points at which the curve was evaluated.
axis : int
The axis along which to integrate. All other axes are
treated as batch dimensions.
Returns
-------
estimate : float
The estimate of the integral under the curve.
Notes
-----
The shapes of control_points and t must match or be broadcastable.
"""
# This implementation uses the trapezoidal rule
control_points = np.asarray(control_points)
t = np.asarray(t)
f_range = np.arange(control_points.shape[axis] - 1)
f_lower = np.take(control_points, f_range, axis=axis)
f_upper = np.take(control_points, f_range + 1, axis=axis)
f_k = (f_lower + f_upper) / 2
t_range = np.arange(t.shape[axis] - 1)
t_lower = np.take(t, t_range, axis=axis)
t_upper = np.take(t, t_range + 1, axis=axis)
delta_t = t_upper - t_lower
return np.sum(f_k * delta_t, axis=axis)
def cumulative_integral(
control_points: ArrayLike, t: ArrayLike, *, axis: int = 0
) -> np.ndarray:
"""Estimate the cumulative integral along a curve.
Estimates the cumulative integral of a time-parameterized curve along the
chosen axis. The curve is given by a sequence of control points and their
respective times. These times need not be spaced out uniformly.
Parameters
----------
control_points : ArrayLike
The values of the curve at time points t.
t : ArrayLike
The time points at which the curve was evaluated.
axis : int
The axis along which to integrate. All other axes are
treated as batch dimensions.
Returns
-------
estimate : float
The estimate of the integral under the curve.
Notes
-----
The shapes of control_points and t must match or be broadcastable.
"""
# This implementation uses the trapezoidal rule
control_points = np.asarray(control_points)
t = np.asarray(t)
f_k = np.zeros_like(control_points)
f_range = np.arange(control_points.shape[axis] - 1)
f_lower = np.take(control_points, f_range, axis=axis)
f_upper = np.take(control_points, f_range + 1, axis=axis)
f_k = np.zeros_like(control_points)
f_k = (f_lower + f_upper) / 2
f_k = np.insert(f_k, 0, 0, axis=axis)
t_range = np.arange(t.shape[axis] - 1)
t_lower = np.take(t, t_range, axis=axis)
t_upper = np.take(t, t_range + 1, axis=axis)
delta_t = t_upper - t_lower
delta_t = np.insert(delta_t, 0, 0, axis=axis)
return np.cumsum(f_k * delta_t, axis=axis)
__all__ = ["integral", "cumulative_integral"] | 0.94049 | 0.888081 |
import numpy as np
from scipy.interpolate import interp1d
from numpy.typing import ArrayLike
from typing import Optional
def linear_trajectory(
t: ArrayLike,
control_points: ArrayLike,
*,
t_control: Optional[ArrayLike] = None,
t_min: float = 0,
t_max: float = 1
) -> np.ndarray:
"""Evaluate the trajectory given by control_points at t using linear
interpolation.
``linear_trajectory`` constructs a piece-wise linear trajectory using the
given control points and then evaluates the resulting trajectory at ``t``.
By default, control points are spaced out evenly in the interval ``[t_min,
t_max]`` where ``t=t_min`` results in ``control_points[0]`` and ``t=t_max``
results in ``control_poins[-1]``. Alternatively, the spacing of control
points can be controlled manually by specifying ``t_control``, which
implicitly specifies ``t_min`` and ``t_max``.
Parameters
----------
t : ArrayLike
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : ArrayLike
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : ArrayLike
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``.If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``.
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
Returns
-------
position : ArrayLike
The value of the trajectory at ``t``.
Notes
-----
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with a scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import linear_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = linear_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show()
"""
t = np.asarray(t)
control_points = np.asarray(control_points)
if t_control is None:
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[1]
position = interp1d(t_control, control_points, axis=0)(t)
return position | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/trajectory/linear.py | linear.py | import numpy as np
from scipy.interpolate import interp1d
from numpy.typing import ArrayLike
from typing import Optional
def linear_trajectory(
t: ArrayLike,
control_points: ArrayLike,
*,
t_control: Optional[ArrayLike] = None,
t_min: float = 0,
t_max: float = 1
) -> np.ndarray:
"""Evaluate the trajectory given by control_points at t using linear
interpolation.
``linear_trajectory`` constructs a piece-wise linear trajectory using the
given control points and then evaluates the resulting trajectory at ``t``.
By default, control points are spaced out evenly in the interval ``[t_min,
t_max]`` where ``t=t_min`` results in ``control_points[0]`` and ``t=t_max``
results in ``control_poins[-1]``. Alternatively, the spacing of control
points can be controlled manually by specifying ``t_control``, which
implicitly specifies ``t_min`` and ``t_max``.
Parameters
----------
t : ArrayLike
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : ArrayLike
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : ArrayLike
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``.If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``.
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
Returns
-------
position : ArrayLike
The value of the trajectory at ``t``.
Notes
-----
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with a scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import linear_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = linear_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show()
"""
t = np.asarray(t)
control_points = np.asarray(control_points)
if t_control is None:
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[1]
position = interp1d(t_control, control_points, axis=0)(t)
return position | 0.952695 | 0.76921 |
from xml.etree import ElementTree
from scipy.spatial.transform import Rotation
from typing import Dict, List, Tuple
import numpy as np
from ... import transform as rtf
def create_frame_graph(urdf: str) -> Tuple[Dict[str, rtf.Frame], Dict[str, rtf.Link]]:
"""Create a frame graph from a URDF string.
Parameters
----------
urdf: TextIO
A text buffer containing the URDF XML.
Returns
-------
frames : Dict[str, Frames]
A dict of frames (links and joints) contained in the file.
links : Dict[str, Frames]
A dict of links in the graph. Names are chosen based on joint names.
See Also
--------
:mod:`skbot.transform`
Notes
-----
``frames[jointName]`` will return the joint's frame that is attached to the
parent.
"""
tree = ElementTree.fromstring(urdf)
frames = dict()
links = dict()
links_to_process: List[ElementTree.Element] = list()
for child in tree:
if child.tag == "link":
frames[child.attrib["name"]] = rtf.Frame(3, name=child.attrib["name"])
elif child.tag == "joint":
frames[child.attrib["name"]] = rtf.Frame(3, name=child.attrib["name"])
links_to_process.append(child)
for link in links_to_process:
frame_joint = frames[link.attrib["name"]]
joint_type = link.attrib["type"]
frame_offset = np.zeros(3, dtype=np.float_)
frame_rotation = np.zeros(3, dtype=np.float_)
for child in link:
if child.tag == "parent":
frame_parent = frames[child.attrib["link"]]
elif child.tag == "child":
frame_child = frames[child.attrib["link"]]
elif child.tag == "origin":
try:
frame_offset = child.attrib["xyz"]
frame_offset = -np.array(frame_offset.split(" "), dtype=np.float_)
except KeyError:
frame_offset = np.zeros(3, dtype=np.float_)
try:
frame_rotation = child.attrib["rpy"]
frame_rotation = np.array(
frame_rotation.split(" "), dtype=np.float_
)
except KeyError:
frame_rotation = np.zeros(3, dtype=np.float_)
elif child.tag == "axis":
axis = child.attrib["xyz"]
axis = np.array(axis.split(" "), dtype=np.float_)
# link parent -> joint
rotation = Rotation.from_euler("xyz", frame_rotation)
if rotation.magnitude() > 0:
intermediate_frame = rtf.EulerRotation("xyz", frame_rotation)(frame_parent)
rtf.affine.Translation(frame_offset)(intermediate_frame, frame_joint)
else:
rtf.affine.Translation(frame_offset)(frame_parent, frame_joint)
# link joint -> child
if joint_type == "fixed":
frame_link = rtf.affine.Translation((0, 0, 0))
elif joint_type == "revolute":
frame_link = rtf.RotvecRotation(axis, angle=0)
elif joint_type == "prismatic":
frame_link = rtf.affine.Translation(-axis, amount=0)
else:
raise ValueError(f"Unsupported Joint type {joint_type}")
frame_link(frame_joint, frame_child)
links[link.attrib["name"]] = frame_link
return frames, links | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ros/generic/urdf.py | urdf.py | from xml.etree import ElementTree
from scipy.spatial.transform import Rotation
from typing import Dict, List, Tuple
import numpy as np
from ... import transform as rtf
def create_frame_graph(urdf: str) -> Tuple[Dict[str, rtf.Frame], Dict[str, rtf.Link]]:
"""Create a frame graph from a URDF string.
Parameters
----------
urdf: TextIO
A text buffer containing the URDF XML.
Returns
-------
frames : Dict[str, Frames]
A dict of frames (links and joints) contained in the file.
links : Dict[str, Frames]
A dict of links in the graph. Names are chosen based on joint names.
See Also
--------
:mod:`skbot.transform`
Notes
-----
``frames[jointName]`` will return the joint's frame that is attached to the
parent.
"""
tree = ElementTree.fromstring(urdf)
frames = dict()
links = dict()
links_to_process: List[ElementTree.Element] = list()
for child in tree:
if child.tag == "link":
frames[child.attrib["name"]] = rtf.Frame(3, name=child.attrib["name"])
elif child.tag == "joint":
frames[child.attrib["name"]] = rtf.Frame(3, name=child.attrib["name"])
links_to_process.append(child)
for link in links_to_process:
frame_joint = frames[link.attrib["name"]]
joint_type = link.attrib["type"]
frame_offset = np.zeros(3, dtype=np.float_)
frame_rotation = np.zeros(3, dtype=np.float_)
for child in link:
if child.tag == "parent":
frame_parent = frames[child.attrib["link"]]
elif child.tag == "child":
frame_child = frames[child.attrib["link"]]
elif child.tag == "origin":
try:
frame_offset = child.attrib["xyz"]
frame_offset = -np.array(frame_offset.split(" "), dtype=np.float_)
except KeyError:
frame_offset = np.zeros(3, dtype=np.float_)
try:
frame_rotation = child.attrib["rpy"]
frame_rotation = np.array(
frame_rotation.split(" "), dtype=np.float_
)
except KeyError:
frame_rotation = np.zeros(3, dtype=np.float_)
elif child.tag == "axis":
axis = child.attrib["xyz"]
axis = np.array(axis.split(" "), dtype=np.float_)
# link parent -> joint
rotation = Rotation.from_euler("xyz", frame_rotation)
if rotation.magnitude() > 0:
intermediate_frame = rtf.EulerRotation("xyz", frame_rotation)(frame_parent)
rtf.affine.Translation(frame_offset)(intermediate_frame, frame_joint)
else:
rtf.affine.Translation(frame_offset)(frame_parent, frame_joint)
# link joint -> child
if joint_type == "fixed":
frame_link = rtf.affine.Translation((0, 0, 0))
elif joint_type == "revolute":
frame_link = rtf.RotvecRotation(axis, angle=0)
elif joint_type == "prismatic":
frame_link = rtf.affine.Translation(-axis, amount=0)
else:
raise ValueError(f"Unsupported Joint type {joint_type}")
frame_link(frame_joint, frame_child)
links[link.attrib["name"]] = frame_link
return frames, links | 0.90241 | 0.547162 |
import zmq
import subprocess
import socket
import os
import getpass
from . import messages
class Subscriber:
"""Subscribe and listen to Ignition messages.
Ignition uses ZMQ_ to pass around protocol buffers as means of
communication. This subscriber enabels python to receive copies of these
buffers. For more information on the messages published by ignition, and how
it works, check out the `Ign-Msgs documentation`_ as well as the
`Ign-Transport documentation`_.
.. _ZMQ: https://zeromq.org/
.. _`Ign-Msgs documentation`: https://ignitionrobotics.org/api/msgs/6.4/index.html
.. _`Ign-Transport documentation`: https://ignitionrobotics.org/api/transport/9.1/index.html
"""
def __init__(self, topic: str, *, parser=None):
"""Initialize a new subscriber for the given topic.
Creates an object that uses a context manager to subscribe to
ign-transport topics and receive messages from it.
Parameters
----------
topic : str
The name of the topic to subscribe to as shown by `ign topic -l`.
parser : function
A function that deserializes the message. The signature of the parser function
is ``fn(zmq_message) -> result`` where zmq_message is a 3-tuple of the form
(zmq_topic, protobuf_message, message_type). If None, the subscriber
will use a default parser that converts ``protobuf_message`` into a
``skbot.ignition.message.<message_type>`` data object.
Returns
-------
self : Subscriber
A object that can subscribe to ign-transport within a context
manager
"""
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
self.topic = topic
# this could be streamlined by speaking the ign-transport discovery protcol
host_name = socket.gethostname()
user_name = getpass.getuser()
self.socket.subscribe(f"@/{host_name}:{user_name}@{topic}")
if parser is None:
self.parser = lambda msg: getattr(
messages, msg[3].decode("utf-8").split(".")[-1]
)().parse(msg[2])
else:
self.parser = parser
def recv(self, blocking=True, timeout=1000) -> tuple:
"""Receive a message from the topic
Parameters
----------
blocking : bool
If True (default) block until a message is received. If False, raise
zmq.ZMQError if no message is available at the time of query.
timeout : int
Time (in ms) to wait for a message to arrive. If the time is
exceeded, an IOError will be raised. Will wait indefinitely if set
to `-1`. This only works if ``blocking=True``.
Returns
-------
msg : PyObject
If a parser was specified during instantiation, returns the result
of the parser. Otherwise it will use the default parser and return a
``skbot.ignition.messages.<message_type>`` data object.
"""
self.socket.setsockopt(zmq.RCVTIMEO, timeout)
try:
if blocking:
msg = self.socket.recv_multipart()
else:
msg = self.socket.recv_multipart(zmq.NOBLOCK)
except zmq.Again:
raise IOError(f"Topic {self.topic} did not send a message.")
result = self.parser(msg)
return result
def __enter__(self):
# weird hack to encourage ign-transport to actually publish camera
# messages start an echo subscriber and print the messages into the void
# to make ign realize that something is listening to the topic tracking
# issue: https://github.com/ignitionrobotics/ign-transport/issues/225
self.echo_subscriber = subprocess.Popen(
["ign", "topic", "-e", "-t", self.topic], stdout=open(os.devnull, "w")
)
# this is a bad hack and should be implemented by talking the
# ign-transport discovery protocol
result = subprocess.check_output(f"ign topic -i -t {self.topic}", shell=True)
self.address = (
result.decode("utf-8")
.split("\n")[1]
.split(",")[0]
.replace("\t", "")
.replace(" ", "")
)
if not self.address:
self.echo_subscriber.terminate()
raise IOError(f"Could not identify socket for {self.topic}.")
self.socket.connect(self.address)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.socket.disconnect(self.address)
self.echo_subscriber.terminate() | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/subscriber.py | subscriber.py | import zmq
import subprocess
import socket
import os
import getpass
from . import messages
class Subscriber:
"""Subscribe and listen to Ignition messages.
Ignition uses ZMQ_ to pass around protocol buffers as means of
communication. This subscriber enabels python to receive copies of these
buffers. For more information on the messages published by ignition, and how
it works, check out the `Ign-Msgs documentation`_ as well as the
`Ign-Transport documentation`_.
.. _ZMQ: https://zeromq.org/
.. _`Ign-Msgs documentation`: https://ignitionrobotics.org/api/msgs/6.4/index.html
.. _`Ign-Transport documentation`: https://ignitionrobotics.org/api/transport/9.1/index.html
"""
def __init__(self, topic: str, *, parser=None):
"""Initialize a new subscriber for the given topic.
Creates an object that uses a context manager to subscribe to
ign-transport topics and receive messages from it.
Parameters
----------
topic : str
The name of the topic to subscribe to as shown by `ign topic -l`.
parser : function
A function that deserializes the message. The signature of the parser function
is ``fn(zmq_message) -> result`` where zmq_message is a 3-tuple of the form
(zmq_topic, protobuf_message, message_type). If None, the subscriber
will use a default parser that converts ``protobuf_message`` into a
``skbot.ignition.message.<message_type>`` data object.
Returns
-------
self : Subscriber
A object that can subscribe to ign-transport within a context
manager
"""
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
self.topic = topic
# this could be streamlined by speaking the ign-transport discovery protcol
host_name = socket.gethostname()
user_name = getpass.getuser()
self.socket.subscribe(f"@/{host_name}:{user_name}@{topic}")
if parser is None:
self.parser = lambda msg: getattr(
messages, msg[3].decode("utf-8").split(".")[-1]
)().parse(msg[2])
else:
self.parser = parser
def recv(self, blocking=True, timeout=1000) -> tuple:
"""Receive a message from the topic
Parameters
----------
blocking : bool
If True (default) block until a message is received. If False, raise
zmq.ZMQError if no message is available at the time of query.
timeout : int
Time (in ms) to wait for a message to arrive. If the time is
exceeded, an IOError will be raised. Will wait indefinitely if set
to `-1`. This only works if ``blocking=True``.
Returns
-------
msg : PyObject
If a parser was specified during instantiation, returns the result
of the parser. Otherwise it will use the default parser and return a
``skbot.ignition.messages.<message_type>`` data object.
"""
self.socket.setsockopt(zmq.RCVTIMEO, timeout)
try:
if blocking:
msg = self.socket.recv_multipart()
else:
msg = self.socket.recv_multipart(zmq.NOBLOCK)
except zmq.Again:
raise IOError(f"Topic {self.topic} did not send a message.")
result = self.parser(msg)
return result
def __enter__(self):
# weird hack to encourage ign-transport to actually publish camera
# messages start an echo subscriber and print the messages into the void
# to make ign realize that something is listening to the topic tracking
# issue: https://github.com/ignitionrobotics/ign-transport/issues/225
self.echo_subscriber = subprocess.Popen(
["ign", "topic", "-e", "-t", self.topic], stdout=open(os.devnull, "w")
)
# this is a bad hack and should be implemented by talking the
# ign-transport discovery protocol
result = subprocess.check_output(f"ign topic -i -t {self.topic}", shell=True)
self.address = (
result.decode("utf-8")
.split("\n")[1]
.split(",")[0]
.replace("\t", "")
.replace(" ", "")
)
if not self.address:
self.echo_subscriber.terminate()
raise IOError(f"Could not identify socket for {self.topic}.")
self.socket.connect(self.address)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.socket.disconnect(self.address)
self.echo_subscriber.terminate() | 0.695545 | 0.321487 |
from urllib.parse import quote
import cachetools
import requests
from cachetools import TTLCache
from zipfile import ZipFile
from io import BytesIO
from pathlib import Path
from dataclasses import dataclass, field
from typing import List, Optional, Callable, Union
class InternalCache:
"""Simple Caching structure"""
def __init__(self, maxsize=float("inf"), time_to_live=24 * 60 * 60) -> None:
self._cache = TTLCache(maxsize=maxsize, ttl=time_to_live)
def update(self, url: str, file_path: str, value: str) -> None:
key = hash((url, file_path))
self._cache[key] = value
def get(self, url: str, file_path: str) -> Union[str, None]:
key = hash((url, file_path))
return self._cache.get(key, None)
def clear(self) -> None:
self._cache.clear()
model_cache = InternalCache()
world_cache = InternalCache()
metadata_cache = cachetools.LRUCache(maxsize=100)
download_cache = cachetools.LRUCache(maxsize=5)
class FileCache:
"""A Fuel Model cache on the local filesystem"""
def __init__(self, location: str):
self._base = Path(location).expanduser()
self._base = self._base / "fuel.ignitionrobotics.org"
self._base.mkdir(exist_ok=True, parents=True)
def _model_loc(self, url: str) -> Path:
cache_loc = self._base
metadata = get_fuel_model_info(url)
username = metadata.owner.lower()
model_name = quote(metadata.name)
version = metadata.version
model_loc = cache_loc / username / "models"
model_loc = model_loc / model_name / str(version)
return model_loc.expanduser()
def get(self, url: str, file_path: str) -> Union[str, None]:
"""Load the SDF from the file cache"""
file_loc = self._model_loc(url) / file_path
if file_loc.exists():
return file_loc.read_text()
else:
return None
def update(self, url: str, file_path: str, sdf_string: str) -> str:
"""Update the file cache after a miss"""
model_loc = self._model_loc(url)
blob = download_fuel_model(url)
with ZipFile(BytesIO(blob)) as model_file:
model_file.extractall(model_loc)
@dataclass
class ModelMetadata:
"""Response object of the Fuel REST API"""
createdAt: str
updatedAt: str
name: str
owner: str
description: str
likes: int
downloads: int
filesize: int
upload_date: str
modify_date: str
license_id: int
license_name: str
license_url: str
license_image: str
permission: int
url_name: int
thumbnail_url: int
version: int
private: bool
tags: List[str] = field(default_factory=list)
categories: List[str] = field(default_factory=list)
@cachetools.cached(metadata_cache)
def get_fuel_model_info(url: str) -> ModelMetadata:
"""Fetch a Fuel model's metadata.
Parameters
----------
uri : str
The URI of the fuel model. This matches the URI
used in SDFormat's include tags.
Returns
-------
info : Metadata
A python dataclass of metadata.
Notes
-----
The function caches the most recent 100 calls in an effort to ease the
burden on the Fuel servers and to improve performance. To manually reset
this cache call ``skbot.ignition.fuel.metadata_cache.clear()``. You can
further also this behavior by changing ``skbot.ignition.fuel.metadata_cache``
to a different cache instance. Check the `cachetools docs
<https://cachetools.readthedocs.io/en/stable/#>`_ for more information.
Examples
--------
.. doctest::
>>> import skbot.ignition as ign
>>> foo = ign.get_fuel_model_info(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> # notice that the second call is almost instantaneous due to in-memory caching
>>> foo = ign.get_fuel_model_info(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> foo.owner
'OpenRobotics'
>>> foo.version
2
>>> foo.filesize
622427
"""
result = requests.get(url, headers={"accept": "application/json"})
result.raise_for_status()
return ModelMetadata(**result.json())
@cachetools.cached(download_cache)
def download_fuel_model(url: str) -> bytes:
"""Download a model from the Fuel server.
Parameters
----------
url : str
The URL of the model. This is the same as the URL used for
include elements in SDF files.
Returns
-------
blob : bytes
A gzip compressed blob containing the model files.
Notes
-----
The function caches the most recent 5 calls in an effort to ease the
burden on the Fuel servers and to improve performance. To manually reset
this cache call ``skbot.ignition.fuel.download_cache.clear()``. You can
further also this behavior by changing ``skbot.ignition.fuel.download_cache``
to a different cache instance. Check the `cachetools docs
<https://cachetools.readthedocs.io/en/stable/#>`_ for more information.
"""
metadata = get_fuel_model_info(url)
username = metadata.owner.lower()
model_name = quote(metadata.name)
version = metadata.version
base_url = f"https://fuel.ignitionrobotics.org/1.0/{username}/models/{model_name}/{version}/"
zip_url = base_url + f"{model_name}.zip"
result = requests.get(
url=zip_url, stream=True, headers={"accept": "application/zip"}
)
result.raise_for_status()
blob = result.content
return blob
def get_fuel_model(
url: str,
*,
file_path: str = "model.sdf",
user_cache: Callable[[str, str], Union[str, None]] = None,
use_internal_cache: bool = True,
use_file_cache: bool = True,
update_file_cache: bool = True,
update_internal_cache: bool = True,
update_user_cache: Callable[[str, str, str], None] = None,
file_cache_dir: str = "~/.ignition/fuel",
) -> str:
"""Get a model file from the Fuel server.
Parameters
----------
url : str
The URL of the model. This is the same as the URL used for
include elements in SDF files.
file_path : str
The path - relative to model root - to the file that should be
downloaded. Defaults to the model's primary SDF at "model.sdf".
user_cache : Callable[[str, str], Union[str, None]]
User supplied caching logic. It is a callable that expects two strings
(url and file_path) and returns either a string (the file) or None. If
user_cache returns a string it is considered a cache hit; if user_cache
returns ``None`` this is interpreted as a cache miss. If ``user_cache is
None`` it always misses.
use_internal_cache : bool
If ``True`` (default), use scikit-bot's internal cache. This is a in-memory
cache that evicts files after 24 hours, or when scikit-bot is unloaded. If
``False``, the internal cache always misses.
use_file_cache : bool
If ``True`` (default), check the local filesystem for a copy of the
model file.
update_file_cache : str
If not ``None``, update the file cache at ``file_cache_dir`` on file
cache misses.
update_internal_cache : bool
If ``True`` (default) update the internal cache if it missed.
update_user_cache : Callable[[str, str, str], None]
If not ``None`` and user_cache missed (returns ``None`` or is ``None``),
update_user_cache is called with the signature ``update_user_cache(url,
file_path, sdf_string)``. The expected behavior is that this call will
update the user supplied caching mechanism.
file_cache_dir : str
The folder to use for the file cache. It follows the same layout as
ignition's fuel-tools; see the Notes for more information. The default
is ``~/.ignition/fuel``, which is the default location for ignition.
Returns
-------
sdf_string : str
A string containing the content of the model's primary SDF (./model.sdf)
Notes
-----
Caches are tiered and the order in which they are checked (from first to
last) is: (1) user_cache, (2) internal_cache, (3) file_cache. Updates are
done in reverse order. Further, a cache is only updated if it would have
been used, i.e., if user_cache hits then neither the internal_cache nor the
file_cache are updated since they are never evaluated, even if they would
have produced a miss.
You can manually reset the internal caches by calling::
skbot.ignition.fuel.model_cache.clear()
skbot.ignition.fuel.world_cache.clear()
The file_cache stores models on your local filesystem. It never evicts, so
you should manually delete outdated models. The format of the cache
is::
file_cache_dir/fuel.ignitionrobotics.org/{owner}/models/{model_name}/{version}
Examples
--------
.. doctest::
>>> import skbot.ignition as ign
>>> sdf_string = ign.get_fuel_model(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> sdf_string[:75]+" ..."
'<?xml version="1.0" ?>\\n<sdf version="1.5">\\n <model name="Construction Cone ...'
>>> # Notice that (by default) the entire model is cached. Subsequent calls to
>>> # model files thus happen at least at filesystem speed
>>> model_config = ign.get_fuel_model(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone",
... file_path="model.config"
... )
>>> model_config[:75]+" ..."
'<?xml version="1.0"?>\\n\\n<model>\\n <name>Construction Cone</name>\\n <version> ...'
"""
def cache(get_fn: Optional[Callable], update_fn: Optional[Callable]):
def decorator(download_sdf: Callable):
def inner(url, file_path):
sdf_string = None
if get_fn:
# query cache
sdf_string = get_fn(url, file_path)
if sdf_string is None:
# cache miss
sdf_string = download_sdf(url, file_path)
if update_fn is not None:
update_fn(url, file_path, sdf_string)
return sdf_string
return inner
return decorator
# set up file cache
get_from_file = None
update_file = None
if use_file_cache or update_file_cache:
file_cache = FileCache(file_cache_dir)
if use_file_cache:
get_from_file = file_cache.get
if update_file_cache:
update_file = file_cache.update
file_cache_decorator = cache(get_from_file, update_file)
# set up internal cache
get_internal = model_cache.get if use_internal_cache else None
update_internal = model_cache.update if update_internal_cache else None
internal_cache_decorator = cache(get_internal, update_internal)
# the wrapped loading function
@cache(user_cache, update_user_cache)
@internal_cache_decorator
@file_cache_decorator
def _fetch_online(url: str, file_path: str) -> str:
"""Download the model and extract primary SDF"""
blob = download_fuel_model(url)
with ZipFile(BytesIO(blob)) as model_file:
with model_file.open(file_path, "r") as data_file:
file_content = data_file.read().decode("utf-8")
return file_content
return _fetch_online(url, file_path) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/fuel.py | fuel.py | from urllib.parse import quote
import cachetools
import requests
from cachetools import TTLCache
from zipfile import ZipFile
from io import BytesIO
from pathlib import Path
from dataclasses import dataclass, field
from typing import List, Optional, Callable, Union
class InternalCache:
"""Simple Caching structure"""
def __init__(self, maxsize=float("inf"), time_to_live=24 * 60 * 60) -> None:
self._cache = TTLCache(maxsize=maxsize, ttl=time_to_live)
def update(self, url: str, file_path: str, value: str) -> None:
key = hash((url, file_path))
self._cache[key] = value
def get(self, url: str, file_path: str) -> Union[str, None]:
key = hash((url, file_path))
return self._cache.get(key, None)
def clear(self) -> None:
self._cache.clear()
model_cache = InternalCache()
world_cache = InternalCache()
metadata_cache = cachetools.LRUCache(maxsize=100)
download_cache = cachetools.LRUCache(maxsize=5)
class FileCache:
"""A Fuel Model cache on the local filesystem"""
def __init__(self, location: str):
self._base = Path(location).expanduser()
self._base = self._base / "fuel.ignitionrobotics.org"
self._base.mkdir(exist_ok=True, parents=True)
def _model_loc(self, url: str) -> Path:
cache_loc = self._base
metadata = get_fuel_model_info(url)
username = metadata.owner.lower()
model_name = quote(metadata.name)
version = metadata.version
model_loc = cache_loc / username / "models"
model_loc = model_loc / model_name / str(version)
return model_loc.expanduser()
def get(self, url: str, file_path: str) -> Union[str, None]:
"""Load the SDF from the file cache"""
file_loc = self._model_loc(url) / file_path
if file_loc.exists():
return file_loc.read_text()
else:
return None
def update(self, url: str, file_path: str, sdf_string: str) -> str:
"""Update the file cache after a miss"""
model_loc = self._model_loc(url)
blob = download_fuel_model(url)
with ZipFile(BytesIO(blob)) as model_file:
model_file.extractall(model_loc)
@dataclass
class ModelMetadata:
"""Response object of the Fuel REST API"""
createdAt: str
updatedAt: str
name: str
owner: str
description: str
likes: int
downloads: int
filesize: int
upload_date: str
modify_date: str
license_id: int
license_name: str
license_url: str
license_image: str
permission: int
url_name: int
thumbnail_url: int
version: int
private: bool
tags: List[str] = field(default_factory=list)
categories: List[str] = field(default_factory=list)
@cachetools.cached(metadata_cache)
def get_fuel_model_info(url: str) -> ModelMetadata:
"""Fetch a Fuel model's metadata.
Parameters
----------
uri : str
The URI of the fuel model. This matches the URI
used in SDFormat's include tags.
Returns
-------
info : Metadata
A python dataclass of metadata.
Notes
-----
The function caches the most recent 100 calls in an effort to ease the
burden on the Fuel servers and to improve performance. To manually reset
this cache call ``skbot.ignition.fuel.metadata_cache.clear()``. You can
further also this behavior by changing ``skbot.ignition.fuel.metadata_cache``
to a different cache instance. Check the `cachetools docs
<https://cachetools.readthedocs.io/en/stable/#>`_ for more information.
Examples
--------
.. doctest::
>>> import skbot.ignition as ign
>>> foo = ign.get_fuel_model_info(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> # notice that the second call is almost instantaneous due to in-memory caching
>>> foo = ign.get_fuel_model_info(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> foo.owner
'OpenRobotics'
>>> foo.version
2
>>> foo.filesize
622427
"""
result = requests.get(url, headers={"accept": "application/json"})
result.raise_for_status()
return ModelMetadata(**result.json())
@cachetools.cached(download_cache)
def download_fuel_model(url: str) -> bytes:
"""Download a model from the Fuel server.
Parameters
----------
url : str
The URL of the model. This is the same as the URL used for
include elements in SDF files.
Returns
-------
blob : bytes
A gzip compressed blob containing the model files.
Notes
-----
The function caches the most recent 5 calls in an effort to ease the
burden on the Fuel servers and to improve performance. To manually reset
this cache call ``skbot.ignition.fuel.download_cache.clear()``. You can
further also this behavior by changing ``skbot.ignition.fuel.download_cache``
to a different cache instance. Check the `cachetools docs
<https://cachetools.readthedocs.io/en/stable/#>`_ for more information.
"""
metadata = get_fuel_model_info(url)
username = metadata.owner.lower()
model_name = quote(metadata.name)
version = metadata.version
base_url = f"https://fuel.ignitionrobotics.org/1.0/{username}/models/{model_name}/{version}/"
zip_url = base_url + f"{model_name}.zip"
result = requests.get(
url=zip_url, stream=True, headers={"accept": "application/zip"}
)
result.raise_for_status()
blob = result.content
return blob
def get_fuel_model(
url: str,
*,
file_path: str = "model.sdf",
user_cache: Callable[[str, str], Union[str, None]] = None,
use_internal_cache: bool = True,
use_file_cache: bool = True,
update_file_cache: bool = True,
update_internal_cache: bool = True,
update_user_cache: Callable[[str, str, str], None] = None,
file_cache_dir: str = "~/.ignition/fuel",
) -> str:
"""Get a model file from the Fuel server.
Parameters
----------
url : str
The URL of the model. This is the same as the URL used for
include elements in SDF files.
file_path : str
The path - relative to model root - to the file that should be
downloaded. Defaults to the model's primary SDF at "model.sdf".
user_cache : Callable[[str, str], Union[str, None]]
User supplied caching logic. It is a callable that expects two strings
(url and file_path) and returns either a string (the file) or None. If
user_cache returns a string it is considered a cache hit; if user_cache
returns ``None`` this is interpreted as a cache miss. If ``user_cache is
None`` it always misses.
use_internal_cache : bool
If ``True`` (default), use scikit-bot's internal cache. This is a in-memory
cache that evicts files after 24 hours, or when scikit-bot is unloaded. If
``False``, the internal cache always misses.
use_file_cache : bool
If ``True`` (default), check the local filesystem for a copy of the
model file.
update_file_cache : str
If not ``None``, update the file cache at ``file_cache_dir`` on file
cache misses.
update_internal_cache : bool
If ``True`` (default) update the internal cache if it missed.
update_user_cache : Callable[[str, str, str], None]
If not ``None`` and user_cache missed (returns ``None`` or is ``None``),
update_user_cache is called with the signature ``update_user_cache(url,
file_path, sdf_string)``. The expected behavior is that this call will
update the user supplied caching mechanism.
file_cache_dir : str
The folder to use for the file cache. It follows the same layout as
ignition's fuel-tools; see the Notes for more information. The default
is ``~/.ignition/fuel``, which is the default location for ignition.
Returns
-------
sdf_string : str
A string containing the content of the model's primary SDF (./model.sdf)
Notes
-----
Caches are tiered and the order in which they are checked (from first to
last) is: (1) user_cache, (2) internal_cache, (3) file_cache. Updates are
done in reverse order. Further, a cache is only updated if it would have
been used, i.e., if user_cache hits then neither the internal_cache nor the
file_cache are updated since they are never evaluated, even if they would
have produced a miss.
You can manually reset the internal caches by calling::
skbot.ignition.fuel.model_cache.clear()
skbot.ignition.fuel.world_cache.clear()
The file_cache stores models on your local filesystem. It never evicts, so
you should manually delete outdated models. The format of the cache
is::
file_cache_dir/fuel.ignitionrobotics.org/{owner}/models/{model_name}/{version}
Examples
--------
.. doctest::
>>> import skbot.ignition as ign
>>> sdf_string = ign.get_fuel_model(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone"
... )
>>> sdf_string[:75]+" ..."
'<?xml version="1.0" ?>\\n<sdf version="1.5">\\n <model name="Construction Cone ...'
>>> # Notice that (by default) the entire model is cached. Subsequent calls to
>>> # model files thus happen at least at filesystem speed
>>> model_config = ign.get_fuel_model(
... "https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Construction%20Cone",
... file_path="model.config"
... )
>>> model_config[:75]+" ..."
'<?xml version="1.0"?>\\n\\n<model>\\n <name>Construction Cone</name>\\n <version> ...'
"""
def cache(get_fn: Optional[Callable], update_fn: Optional[Callable]):
def decorator(download_sdf: Callable):
def inner(url, file_path):
sdf_string = None
if get_fn:
# query cache
sdf_string = get_fn(url, file_path)
if sdf_string is None:
# cache miss
sdf_string = download_sdf(url, file_path)
if update_fn is not None:
update_fn(url, file_path, sdf_string)
return sdf_string
return inner
return decorator
# set up file cache
get_from_file = None
update_file = None
if use_file_cache or update_file_cache:
file_cache = FileCache(file_cache_dir)
if use_file_cache:
get_from_file = file_cache.get
if update_file_cache:
update_file = file_cache.update
file_cache_decorator = cache(get_from_file, update_file)
# set up internal cache
get_internal = model_cache.get if use_internal_cache else None
update_internal = model_cache.update if update_internal_cache else None
internal_cache_decorator = cache(get_internal, update_internal)
# the wrapped loading function
@cache(user_cache, update_user_cache)
@internal_cache_decorator
@file_cache_decorator
def _fetch_online(url: str, file_path: str) -> str:
"""Download the model and extract primary SDF"""
blob = download_fuel_model(url)
with ZipFile(BytesIO(blob)) as model_file:
with model_file.open(file_path, "r") as data_file:
file_content = data_file.read().decode("utf-8")
return file_content
return _fetch_online(url, file_path) | 0.883494 | 0.227126 |
from xml.etree import ElementTree
from xsdata.formats.dataclass import parsers
from xsdata.formats.dataclass.context import XmlContext
from xsdata.formats.dataclass.parsers import XmlParser
from xsdata.formats.dataclass.parsers.config import ParserConfig
from xsdata.formats.dataclass.serializers import XmlSerializer
from xsdata.formats.dataclass.serializers.config import SerializerConfig
from xsdata.formats.dataclass.parsers import handlers
from xsdata.exceptions import ParserError as XSDataParserError
import io
from typing import Dict, Callable, Type, TypeVar
import importlib
import warnings
from .exceptions import ParseError
T = TypeVar("T")
# available SDF elements by version
_parser_roots = {
"1.0": "..bindings.v10",
"1.2": "..bindings.v12",
"1.3": "..bindings.v13",
"1.4": "..bindings.v14",
"1.5": "..bindings.v15",
"1.6": "..bindings.v16",
"1.7": "..bindings.v17",
"1.8": "..bindings.v18",
}
# recommended to reuse the same parser context
# see: https://xsdata.readthedocs.io/en/latest/xml.html
xml_ctx = XmlContext()
def get_version(sdf: str) -> str:
"""Returns the version of a SDF string.
Parameters
----------
sdf : str
The SDFormat XML to be parsed.
Returns
-------
version : str
A string containing the SDF version, e.g. "1.8".
Notes
-----
This function only checks the root tag and does not parse the entire string.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.get_version
"""
parser = ElementTree.iterparse(io.StringIO(sdf), events=("start",))
_, root = next(parser)
if root.tag != "sdf":
raise ParseError("SDF root element not found.")
if "version" in root.attrib:
version = root.attrib["version"]
if version not in _parser_roots.keys():
raise ParseError(f"Invalid version: {version}")
return root.attrib["version"]
else:
raise ParseError("SDF doesnt specify a version.")
def loads(
sdf: str,
*,
version: str = None,
custom_constructor: Dict[Type[T], Callable] = None,
handler: str = None,
):
"""Convert an XML string into a sdformat.models tree.
Parameters
----------
sdf : str
The SDFormat XML to be parsed.
version : str
The SDFormat version to use while parsing. If None (default) it will
automatically determine the version from the <sdf> element. If specified
the given version will be used instead.
custom_constructor : Dict[Type[T], Callable]
Overwrite the default constructor for a certain model class with
callable. This is useful for doing pre- or post-initialization of
bound classes or to replace them entirely.
handler : str
The handler that the parser should use when traversing the XML. If
unspecified the default xsData parser will be used (lxml if it is
installed, otherwise xml.etree). Possible values are:
"XmlEventHandler"
A xml.etree event-based handler.
"LxmlEventHandler"
A lxml.etree event-based handler.
Returns
-------
SdfRoot : object
An instance of ``skbot.ignition.models.vXX.Sdf`` where XX corresponds to the
version of the SDFormat XML.
Notes
-----
``custom_constructure`` is currently disabled and has no effect. It will
become available with xsData v21.8.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.loads
"""
if custom_constructor is None:
custom_constructor = dict()
def custom_class_factory(clazz, params):
if clazz in custom_constructor:
return custom_constructor[clazz](**params)
return clazz(**params)
if version is None:
version = get_version(sdf)
if handler in ["XmlSaxHandler", "LxmlSaxHandler"]:
warnings.warn(
"SAX handlers have been deprecated in xsData >= 21.9;"
" falling back to EventHandler. If you need the SAX handler, please open an issue."
" To make this warning dissapear change `handler` to the corresponding EventHandler.",
DeprecationWarning,
)
if handler == "XmlSaxHandler":
handler = "XmlEventHandler"
elif handler == "LxmlSaxHandler":
handler = "LxmlEventHandler"
handler_class = {
None: handlers.default_handler(),
"XmlEventHandler": handlers.XmlEventHandler,
"LxmlEventHandler": handlers.LxmlEventHandler,
}[handler]
binding_location = _parser_roots[version]
bindings = importlib.import_module(binding_location, __name__)
sdf_parser = XmlParser(
ParserConfig(class_factory=custom_class_factory),
context=xml_ctx,
handler=handler_class,
)
try:
root_el = sdf_parser.from_string(sdf, bindings.Sdf)
except XSDataParserError as e:
raise ParseError("Invalid SDFormat XML.") from e
return root_el
def dumps(root_element, *, format=False) -> str:
"""Serialize a SDFormat object to an XML string.
Parameters
----------
root_element : object
An instance of ``skbot.ignition.models.vXX.Sdf``. XX represents the SDFormat
version and can be any version currently supported by scikit-bot.
format : bool
If true, add indentation and linebreaks to the output to increase human
readability. If false (default) the entire XML will appear as a single
line with no spaces between elements.
Returns
-------
sdformat_string : str
A string containing SDFormat XML representing the given input.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.dumps
"""
serializer = XmlSerializer(config=SerializerConfig(pretty_print=format))
return serializer.render(root_element) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/sdformat.py | sdformat.py | from xml.etree import ElementTree
from xsdata.formats.dataclass import parsers
from xsdata.formats.dataclass.context import XmlContext
from xsdata.formats.dataclass.parsers import XmlParser
from xsdata.formats.dataclass.parsers.config import ParserConfig
from xsdata.formats.dataclass.serializers import XmlSerializer
from xsdata.formats.dataclass.serializers.config import SerializerConfig
from xsdata.formats.dataclass.parsers import handlers
from xsdata.exceptions import ParserError as XSDataParserError
import io
from typing import Dict, Callable, Type, TypeVar
import importlib
import warnings
from .exceptions import ParseError
T = TypeVar("T")
# available SDF elements by version
_parser_roots = {
"1.0": "..bindings.v10",
"1.2": "..bindings.v12",
"1.3": "..bindings.v13",
"1.4": "..bindings.v14",
"1.5": "..bindings.v15",
"1.6": "..bindings.v16",
"1.7": "..bindings.v17",
"1.8": "..bindings.v18",
}
# recommended to reuse the same parser context
# see: https://xsdata.readthedocs.io/en/latest/xml.html
xml_ctx = XmlContext()
def get_version(sdf: str) -> str:
"""Returns the version of a SDF string.
Parameters
----------
sdf : str
The SDFormat XML to be parsed.
Returns
-------
version : str
A string containing the SDF version, e.g. "1.8".
Notes
-----
This function only checks the root tag and does not parse the entire string.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.get_version
"""
parser = ElementTree.iterparse(io.StringIO(sdf), events=("start",))
_, root = next(parser)
if root.tag != "sdf":
raise ParseError("SDF root element not found.")
if "version" in root.attrib:
version = root.attrib["version"]
if version not in _parser_roots.keys():
raise ParseError(f"Invalid version: {version}")
return root.attrib["version"]
else:
raise ParseError("SDF doesnt specify a version.")
def loads(
sdf: str,
*,
version: str = None,
custom_constructor: Dict[Type[T], Callable] = None,
handler: str = None,
):
"""Convert an XML string into a sdformat.models tree.
Parameters
----------
sdf : str
The SDFormat XML to be parsed.
version : str
The SDFormat version to use while parsing. If None (default) it will
automatically determine the version from the <sdf> element. If specified
the given version will be used instead.
custom_constructor : Dict[Type[T], Callable]
Overwrite the default constructor for a certain model class with
callable. This is useful for doing pre- or post-initialization of
bound classes or to replace them entirely.
handler : str
The handler that the parser should use when traversing the XML. If
unspecified the default xsData parser will be used (lxml if it is
installed, otherwise xml.etree). Possible values are:
"XmlEventHandler"
A xml.etree event-based handler.
"LxmlEventHandler"
A lxml.etree event-based handler.
Returns
-------
SdfRoot : object
An instance of ``skbot.ignition.models.vXX.Sdf`` where XX corresponds to the
version of the SDFormat XML.
Notes
-----
``custom_constructure`` is currently disabled and has no effect. It will
become available with xsData v21.8.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.loads
"""
if custom_constructor is None:
custom_constructor = dict()
def custom_class_factory(clazz, params):
if clazz in custom_constructor:
return custom_constructor[clazz](**params)
return clazz(**params)
if version is None:
version = get_version(sdf)
if handler in ["XmlSaxHandler", "LxmlSaxHandler"]:
warnings.warn(
"SAX handlers have been deprecated in xsData >= 21.9;"
" falling back to EventHandler. If you need the SAX handler, please open an issue."
" To make this warning dissapear change `handler` to the corresponding EventHandler.",
DeprecationWarning,
)
if handler == "XmlSaxHandler":
handler = "XmlEventHandler"
elif handler == "LxmlSaxHandler":
handler = "LxmlEventHandler"
handler_class = {
None: handlers.default_handler(),
"XmlEventHandler": handlers.XmlEventHandler,
"LxmlEventHandler": handlers.LxmlEventHandler,
}[handler]
binding_location = _parser_roots[version]
bindings = importlib.import_module(binding_location, __name__)
sdf_parser = XmlParser(
ParserConfig(class_factory=custom_class_factory),
context=xml_ctx,
handler=handler_class,
)
try:
root_el = sdf_parser.from_string(sdf, bindings.Sdf)
except XSDataParserError as e:
raise ParseError("Invalid SDFormat XML.") from e
return root_el
def dumps(root_element, *, format=False) -> str:
"""Serialize a SDFormat object to an XML string.
Parameters
----------
root_element : object
An instance of ``skbot.ignition.models.vXX.Sdf``. XX represents the SDFormat
version and can be any version currently supported by scikit-bot.
format : bool
If true, add indentation and linebreaks to the output to increase human
readability. If false (default) the entire XML will appear as a single
line with no spaces between elements.
Returns
-------
sdformat_string : str
A string containing SDFormat XML representing the given input.
Examples
--------
.. minigallery:: skbot.ignition.sdformat.dumps
"""
serializer = XmlSerializer(config=SerializerConfig(pretty_print=format))
return serializer.render(root_element) | 0.897246 | 0.240462 |
from typing import Union, List, Tuple, Dict
from itertools import chain
from ... import transform as tf
from .load_as_generic import loads_generic
from .generic_sdf.world import World
def to_frame_graph(
sdf: str,
*,
unwrap: bool = True,
insert_world_frame: bool = True,
shape: Tuple[int] = (3,),
axis: int = -1
) -> Union[tf.Frame, List[tf.Frame]]:
"""Create a frame graph from a sdformat string.
.. versionadded:: 0.8.0
Added the ability to limit loading to worlds
.. versionadded:: 0.6.0
This function has been added to the library.
Parameters
----------
sdf : str
A SDFormat XML string describing one (or many) worlds.
unwrap : bool
If True (default) and the sdf only contains a single light, model, or
world element return that element's frame. If the sdf contains multiple
lights, models, or worlds a list of root frames is returned. If False,
always return a list of frames.
insert_world_frame : bool
If ``False``, creation of frame graphs is restricted to world elements
contained in the provided SDF. This is because non-world elements
(simulation fragments) may refer to a ``world`` frame that is defined
outside of the provided SDF and hence the full graph can't be
determined. As a consequence, any ``model``, ``actor``, or ``light``
elements are ignored.
If ``True`` (default), this function will insert a ``world`` frame into
the graph of each simulation fragment (non-world element) to allow
smooth construction of the frame graph.
The default is ``True``; however, it will change to ``False`` starting
with scikit-bot v1.0.
shape : tuple
A tuple describing the shape of elements that the resulting graph should
transform. This can be used to add batch dimensions to the graph, for
example to perform vectorized computation on multiple instances of the
same world in different states, or for batched coordinate
transformation. Defaults to (3,), which is the shape of a single 3D
vector in euclidian space.
axis : int
The axis along which elements are stored. The axis must have length 3
(since SDFormat describes 3 dimensional worlds), and all other axis are
considered batch dimensions. Defaults to -1.
Returns
-------
frame_graph : Union[Frame, List[Frame]]
A :class:`skbot.transform.Frame` or list of Frames depending on the value of
``unwrap`` and the number of elements in the SDF's root element.
See Also
--------
:mod:`skbot.transform`
Notes
-----
Frames inside the graph are named after the frames defined by the SDF. You can
retrieve them by searching for them using :func:`skbot.transform.Frame.find_frame`.
Joins are implicit within the frame graph. The joint frame that is attached
to the child frame is named after the joint (<joint_name>), and the
(implicit) joint frame attached to the parent is named
(<joint_name>_parent). The link between the two frames can be retrieved via
:func:`skbot.transform.Frame.links_between`. For example if there is a
joint named "robot_joint0" its link can be retrieved using::
child_frame = frame_graph.find_frame(".../robot_joint0")
parent_frame = frame_graph.find_frame(".../robot_joint0_parent")
link = child_frame.links_between(child_frame)[0]
"""
root = loads_generic(sdf)
declared_frames = root.declared_frames()
dynamic_graphs = root.to_dynamic_graph(declared_frames, shape=shape, axis=axis)
if insert_world_frame:
candidates = dynamic_graphs.values()
else:
candidates = [dynamic_graphs["worlds"]]
graphs = list()
for x in chain([x for x in candidates]):
graphs.extend(x)
if len(graphs) == 0:
raise ValueError("No graphs could be loaded from the provided SDF.")
if unwrap and len(graphs) == 1:
return graphs[0]
else:
return graphs | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/transform_factory.py | transform_factory.py | from typing import Union, List, Tuple, Dict
from itertools import chain
from ... import transform as tf
from .load_as_generic import loads_generic
from .generic_sdf.world import World
def to_frame_graph(
sdf: str,
*,
unwrap: bool = True,
insert_world_frame: bool = True,
shape: Tuple[int] = (3,),
axis: int = -1
) -> Union[tf.Frame, List[tf.Frame]]:
"""Create a frame graph from a sdformat string.
.. versionadded:: 0.8.0
Added the ability to limit loading to worlds
.. versionadded:: 0.6.0
This function has been added to the library.
Parameters
----------
sdf : str
A SDFormat XML string describing one (or many) worlds.
unwrap : bool
If True (default) and the sdf only contains a single light, model, or
world element return that element's frame. If the sdf contains multiple
lights, models, or worlds a list of root frames is returned. If False,
always return a list of frames.
insert_world_frame : bool
If ``False``, creation of frame graphs is restricted to world elements
contained in the provided SDF. This is because non-world elements
(simulation fragments) may refer to a ``world`` frame that is defined
outside of the provided SDF and hence the full graph can't be
determined. As a consequence, any ``model``, ``actor``, or ``light``
elements are ignored.
If ``True`` (default), this function will insert a ``world`` frame into
the graph of each simulation fragment (non-world element) to allow
smooth construction of the frame graph.
The default is ``True``; however, it will change to ``False`` starting
with scikit-bot v1.0.
shape : tuple
A tuple describing the shape of elements that the resulting graph should
transform. This can be used to add batch dimensions to the graph, for
example to perform vectorized computation on multiple instances of the
same world in different states, or for batched coordinate
transformation. Defaults to (3,), which is the shape of a single 3D
vector in euclidian space.
axis : int
The axis along which elements are stored. The axis must have length 3
(since SDFormat describes 3 dimensional worlds), and all other axis are
considered batch dimensions. Defaults to -1.
Returns
-------
frame_graph : Union[Frame, List[Frame]]
A :class:`skbot.transform.Frame` or list of Frames depending on the value of
``unwrap`` and the number of elements in the SDF's root element.
See Also
--------
:mod:`skbot.transform`
Notes
-----
Frames inside the graph are named after the frames defined by the SDF. You can
retrieve them by searching for them using :func:`skbot.transform.Frame.find_frame`.
Joins are implicit within the frame graph. The joint frame that is attached
to the child frame is named after the joint (<joint_name>), and the
(implicit) joint frame attached to the parent is named
(<joint_name>_parent). The link between the two frames can be retrieved via
:func:`skbot.transform.Frame.links_between`. For example if there is a
joint named "robot_joint0" its link can be retrieved using::
child_frame = frame_graph.find_frame(".../robot_joint0")
parent_frame = frame_graph.find_frame(".../robot_joint0_parent")
link = child_frame.links_between(child_frame)[0]
"""
root = loads_generic(sdf)
declared_frames = root.declared_frames()
dynamic_graphs = root.to_dynamic_graph(declared_frames, shape=shape, axis=axis)
if insert_world_frame:
candidates = dynamic_graphs.values()
else:
candidates = [dynamic_graphs["worlds"]]
graphs = list()
for x in chain([x for x in candidates]):
graphs.extend(x)
if len(graphs) == 0:
raise ValueError("No graphs could be loaded from the provided SDF.")
if unwrap and len(graphs) == 1:
return graphs[0]
else:
return graphs | 0.95996 | 0.703569 |
from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.2/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
pose: The reference frame of the collision element, relative to the
reference frame of the link.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce velocity threshold, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
ode: ODE friction parameters
"""
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in the range of [0..1].
mu2: Second coefficient of friction in the range of
[0..1]
fdir1: 3-tuple specifying direction of mu1 in the
collision local reference frame.
slip1: Force dependent slip direction 1 in collision
local frame, between the range of [0..1].
slip2: Force dependent slip direction 2 in collision
local frame, between the range of [0..1].
"""
mu: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
ode: ODE contact parameters
"""
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/collision.py | collision.py | from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.2/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
pose: The reference frame of the collision element, relative to the
reference frame of the link.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce velocity threshold, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
ode: ODE friction parameters
"""
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in the range of [0..1].
mu2: Second coefficient of friction in the range of
[0..1]
fdir1: 3-tuple specifying direction of mu1 in the
collision local reference frame.
slip1: Force dependent slip direction 1 in collision
local frame, between the range of [0..1].
slip2: Force dependent slip direction 2 in collision
local frame, between the range of [0..1].
"""
mu: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
ode: ODE contact parameters
"""
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.945901 | 0.466116 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/light.xsd"
@dataclass
class Light:
"""
The light element describes a light source.
Parameters
----------
cast_shadows: When true, the light will cast shadows.
pose: A position and orientation in the global coordinate frame for
the light.
diffuse: Diffuse light color
specular: Specular light color
attenuation: Light attenuation
direction: Direction of the light, only applicable for spot and
directional lights.
spot: Spot light parameters
name: A unique name for the light.
type: The light type: point, directional, spot.
"""
class Meta:
name = "light"
cast_shadows: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
diffuse: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default=".1 .1 .1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
attenuation: Optional["Light.Attenuation"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
direction: str = field(
default="0 0 -1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
spot: Optional["Light.Spot"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: str = field(
default="__default__",
metadata={
"type": "Attribute",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Attenuation:
"""
Light attenuation.
Parameters
----------
range: Range of the light
linear: The linear attenuation factor: 1 means attenuate evenly
over the distance.
constant: The constant attenuation factor: 1.0 means never
attenuate, 0.0 is complete attenutation.
quadratic: The quadratic attenuation factor: adds a curvature to
the attenuation.
"""
range: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
linear: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constant: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
quadratic: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Spot:
"""
Spot light parameters.
Parameters
----------
inner_angle: Angle covered by the bright inner cone
outer_angle: Angle covered by the outer cone
falloff: The rate of falloff between the inner and outer cones.
1.0 means a linear falloff, less means slower falloff,
higher means faster falloff.
"""
inner_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
outer_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
falloff: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/light.py | light.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/light.xsd"
@dataclass
class Light:
"""
The light element describes a light source.
Parameters
----------
cast_shadows: When true, the light will cast shadows.
pose: A position and orientation in the global coordinate frame for
the light.
diffuse: Diffuse light color
specular: Specular light color
attenuation: Light attenuation
direction: Direction of the light, only applicable for spot and
directional lights.
spot: Spot light parameters
name: A unique name for the light.
type: The light type: point, directional, spot.
"""
class Meta:
name = "light"
cast_shadows: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
diffuse: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default=".1 .1 .1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
attenuation: Optional["Light.Attenuation"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
direction: str = field(
default="0 0 -1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
spot: Optional["Light.Spot"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: str = field(
default="__default__",
metadata={
"type": "Attribute",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Attenuation:
"""
Light attenuation.
Parameters
----------
range: Range of the light
linear: The linear attenuation factor: 1 means attenuate evenly
over the distance.
constant: The constant attenuation factor: 1.0 means never
attenuate, 0.0 is complete attenutation.
quadratic: The quadratic attenuation factor: adds a curvature to
the attenuation.
"""
range: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
linear: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constant: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
quadratic: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Spot:
"""
Spot light parameters.
Parameters
----------
inner_angle: Angle covered by the bright inner cone
outer_angle: Angle covered by the outer cone
falloff: The rate of falloff between the inner and outer cones.
1.0 means a linear falloff, less means slower falloff,
higher means faster falloff.
"""
inner_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
outer_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
falloff: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.947137 | 0.471649 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/scene.xsd"
@dataclass
class Scene:
"""
Specifies the look of the environment.
Parameters
----------
ambient: Color of the ambient light.
background: Color of the background.
sky: Properties for the sky
shadows: Enable/disable shadows
fog: Controls fog
grid: Enable/disable the grid
"""
class Meta:
name = "scene"
ambient: str = field(
default="0.0 0.0 0.0 1.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
background: str = field(
default=".7 .7 .7 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
sky: Optional["Scene.Sky"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fog: Optional["Scene.Fog"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
grid: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Sky:
"""
Properties for the sky.
Parameters
----------
time: Time of day [0..24]
sunrise: Sunrise time [0..24]
sunset: Sunset time [0..24]
clouds: Sunset time [0..24]
"""
time: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunrise: float = field(
default=6.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunset: float = field(
default=20.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
clouds: Optional["Scene.Sky.Clouds"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Clouds:
"""
Sunset time [0..24]
Parameters
----------
speed: Speed of the clouds
direction: Direction of the cloud movement
humidity: Density of clouds
mean_size: Average size of the clouds
ambient: Ambient cloud color
"""
speed: float = field(
default=0.6,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
direction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
humidity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mean_size: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default=".8 .8 .8 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Fog:
"""
Controls fog.
Parameters
----------
color: Fog color
type: Fog type: constant, linear, quadratic
start: Distance to start of fog
end: Distance to end of fog
density: Density of fog
"""
color: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
type: str = field(
default="none",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
start: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
end: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
density: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/scene.py | scene.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/scene.xsd"
@dataclass
class Scene:
"""
Specifies the look of the environment.
Parameters
----------
ambient: Color of the ambient light.
background: Color of the background.
sky: Properties for the sky
shadows: Enable/disable shadows
fog: Controls fog
grid: Enable/disable the grid
"""
class Meta:
name = "scene"
ambient: str = field(
default="0.0 0.0 0.0 1.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
background: str = field(
default=".7 .7 .7 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
sky: Optional["Scene.Sky"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fog: Optional["Scene.Fog"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
grid: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Sky:
"""
Properties for the sky.
Parameters
----------
time: Time of day [0..24]
sunrise: Sunrise time [0..24]
sunset: Sunset time [0..24]
clouds: Sunset time [0..24]
"""
time: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunrise: float = field(
default=6.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunset: float = field(
default=20.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
clouds: Optional["Scene.Sky.Clouds"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Clouds:
"""
Sunset time [0..24]
Parameters
----------
speed: Speed of the clouds
direction: Direction of the cloud movement
humidity: Density of clouds
mean_size: Average size of the clouds
ambient: Ambient cloud color
"""
speed: float = field(
default=0.6,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
direction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
humidity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mean_size: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default=".8 .8 .8 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Fog:
"""
Controls fog.
Parameters
----------
color: Fog color
type: Fog type: constant, linear, quadratic
start: Distance to start of fog
end: Distance to end of fog
density: Density of fog
"""
color: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
type: str = field(
default="none",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
start: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
end: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
density: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.9342 | 0.460228 |
from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.2/visual.xsd"
@dataclass
class Visual:
"""The visual properties of the link.
This element specifies the shape of the object (box, cylinder, etc.)
for visualization purposes.
Parameters
----------
cast_shadows: If true the visual will cast shadows.
laser_retro: will be implemented in the future release.
transparency: The amount of transparency( 0=opaque, 1 = fully
transparent)
pose: Origin of the visual relative to its parent.
material: The material of the visual element.
geometry: The shape of the visual or collision object.
name: Unique name for the visual element within the scope of the
parent link.
"""
class Meta:
name = "visual"
cast_shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
transparency: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
material: Optional["Visual.Material"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Material:
"""
The material of the visual element.
Parameters
----------
script: Name of material from an installed script file. This
will override the color element if the script exists.
shader:
ambient: The ambient color of a material specified by set of
four numbers representing red/green/blue, each in the range
of [0,1].
diffuse: The diffuse color of a material specified by set of
four numbers representing red/green/blue/alpha, each in the
range of [0,1].
specular: The specular color of a material specified by set of
four numbers representing red/green/blue/alpha, each in the
range of [0,1].
emissive: The emissive color of a material specified by set of
four numbers representing red/green/blue, each in the range
of [0,1].
"""
script: Optional["Visual.Material.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shader: Optional["Visual.Material.Shader"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
diffuse: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
emissive: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Script:
"""Name of material from an installed script file.
This will override the color element if the script exists.
Parameters
----------
uri: URI of the material script file
name: Name of the script within the script file
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Shader:
"""
Parameters
----------
normal_map: filename of the normal map
type: vertex, pixel, normal_map_object_space,
normal_map_tangent_space
"""
normal_map: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/visual.py | visual.py | from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.2/visual.xsd"
@dataclass
class Visual:
"""The visual properties of the link.
This element specifies the shape of the object (box, cylinder, etc.)
for visualization purposes.
Parameters
----------
cast_shadows: If true the visual will cast shadows.
laser_retro: will be implemented in the future release.
transparency: The amount of transparency( 0=opaque, 1 = fully
transparent)
pose: Origin of the visual relative to its parent.
material: The material of the visual element.
geometry: The shape of the visual or collision object.
name: Unique name for the visual element within the scope of the
parent link.
"""
class Meta:
name = "visual"
cast_shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
transparency: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
material: Optional["Visual.Material"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Material:
"""
The material of the visual element.
Parameters
----------
script: Name of material from an installed script file. This
will override the color element if the script exists.
shader:
ambient: The ambient color of a material specified by set of
four numbers representing red/green/blue, each in the range
of [0,1].
diffuse: The diffuse color of a material specified by set of
four numbers representing red/green/blue/alpha, each in the
range of [0,1].
specular: The specular color of a material specified by set of
four numbers representing red/green/blue/alpha, each in the
range of [0,1].
emissive: The emissive color of a material specified by set of
four numbers representing red/green/blue, each in the range
of [0,1].
"""
script: Optional["Visual.Material.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shader: Optional["Visual.Material.Shader"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
diffuse: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
emissive: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Script:
"""Name of material from an installed script file.
This will override the color element if the script exists.
Parameters
----------
uri: URI of the material script file
name: Name of the script within the script file
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Shader:
"""
Parameters
----------
normal_map: filename of the normal map
type: vertex, pixel, normal_map_object_space,
normal_map_tangent_space
"""
normal_map: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | 0.922469 | 0.507507 |
from dataclasses import dataclass, field
from typing import List, Optional
from .collision import Collision
from .sensor import Sensor
from .visual import Visual
__NAMESPACE__ = "sdformat/v1.2/link.xsd"
@dataclass
class Link:
"""A physical link with inertia, collision, and visual properties.
A link must be a child of a model, and any number of links may exist
in a model.
Parameters
----------
gravity: If true, the link is affected by gravity.
self_collide: If true, the link can collide with other links in the
model.
kinematic: If true, the link is kinematic only
pose: This is the pose of the link reference frame, relative to the
model reference frame.
velocity_decay: Exponential damping of the link's velocity.
inertial: The inertial properties of the link.
collision: The collision properties of a link. Note that this can be
different from the visual properties of a link, for example,
simpler collision models are often used to reduce computation
time.
visual: The visual properties of the link. This element specifies
the shape of the object (box, cylinder, etc.) for visualization
purposes.
sensor: The sensor tag describes the type and properties of a
sensor.
projector:
name: A unique name for the link within the scope of the model.
"""
class Meta:
name = "link"
gravity: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
self_collide: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kinematic: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
velocity_decay: Optional["Link.VelocityDecay"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertial: Optional["Link.Inertial"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
collision: List[Collision] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
visual: List[Visual] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
projector: Optional["Link.Projector"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class VelocityDecay:
"""
Exponential damping of the link's velocity.
Parameters
----------
linear: Linear damping
angular: Angular damping
"""
linear: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
angular: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Inertial:
"""
The inertial properties of the link.
Parameters
----------
mass: The mass of the link.
pose: This is the pose of the inertial reference frame, relative
to the link reference frame. The origin of the inertial
reference frame needs to be at the center of gravity. The
axes of the inertial reference frame do not need to be
aligned with the principal axes of the inertia.
inertia: The 3x3 rotational inertia matrix. Because the
rotational inertia matrix is symmetric, only 6 above-
diagonal elements of this matrix are specified here, using
the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
mass: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
inertia: Optional["Link.Inertial.Inertia"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Inertia:
"""The 3x3 rotational inertia matrix.
Because the rotational inertia matrix is symmetric, only 6
above-diagonal elements of this matrix are specified here,
using the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
ixx: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixy: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyy: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
izz: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Projector:
"""
Parameters
----------
texture: Texture name
pose: Pose of the projector
fov: Field of view
near_clip: Near clip distance
far_clip: far clip distance
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
name: Name of the projector
"""
texture: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
fov: float = field(
default=0.785,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
near_clip: float = field(
default=0.1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
far_clip: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Link.Projector.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/link.py | link.py | from dataclasses import dataclass, field
from typing import List, Optional
from .collision import Collision
from .sensor import Sensor
from .visual import Visual
__NAMESPACE__ = "sdformat/v1.2/link.xsd"
@dataclass
class Link:
"""A physical link with inertia, collision, and visual properties.
A link must be a child of a model, and any number of links may exist
in a model.
Parameters
----------
gravity: If true, the link is affected by gravity.
self_collide: If true, the link can collide with other links in the
model.
kinematic: If true, the link is kinematic only
pose: This is the pose of the link reference frame, relative to the
model reference frame.
velocity_decay: Exponential damping of the link's velocity.
inertial: The inertial properties of the link.
collision: The collision properties of a link. Note that this can be
different from the visual properties of a link, for example,
simpler collision models are often used to reduce computation
time.
visual: The visual properties of the link. This element specifies
the shape of the object (box, cylinder, etc.) for visualization
purposes.
sensor: The sensor tag describes the type and properties of a
sensor.
projector:
name: A unique name for the link within the scope of the model.
"""
class Meta:
name = "link"
gravity: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
self_collide: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kinematic: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
velocity_decay: Optional["Link.VelocityDecay"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertial: Optional["Link.Inertial"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
collision: List[Collision] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
visual: List[Visual] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
projector: Optional["Link.Projector"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class VelocityDecay:
"""
Exponential damping of the link's velocity.
Parameters
----------
linear: Linear damping
angular: Angular damping
"""
linear: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
angular: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Inertial:
"""
The inertial properties of the link.
Parameters
----------
mass: The mass of the link.
pose: This is the pose of the inertial reference frame, relative
to the link reference frame. The origin of the inertial
reference frame needs to be at the center of gravity. The
axes of the inertial reference frame do not need to be
aligned with the principal axes of the inertia.
inertia: The 3x3 rotational inertia matrix. Because the
rotational inertia matrix is symmetric, only 6 above-
diagonal elements of this matrix are specified here, using
the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
mass: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
inertia: Optional["Link.Inertial.Inertia"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Inertia:
"""The 3x3 rotational inertia matrix.
Because the rotational inertia matrix is symmetric, only 6
above-diagonal elements of this matrix are specified here,
using the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
ixx: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixy: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyy: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
izz: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Projector:
"""
Parameters
----------
texture: Texture name
pose: Pose of the projector
fov: Field of view
near_clip: Near clip distance
far_clip: far clip distance
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
name: Name of the projector
"""
texture: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
fov: float = field(
default=0.785,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
near_clip: float = field(
default=0.1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
far_clip: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Link.Projector.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | 0.945876 | 0.603815 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/joint.xsd"
@dataclass
class Joint:
"""
A joint connections two links with kinematic and dynamic properties.
Parameters
----------
parent: Name of the parent link
child: Name of the child link
pose: offset from child link origin in child link frame.
thread_pitch:
axis: The joint axis specified in the model frame. This is the axis
of rotation for revolute joints, the axis of translation for
prismatic joints. The axis is currently specified in the model
frame of reference, but this will be changed to the joint frame
in future version of SDFormat (see gazebo issue #494).
axis2: The second joint axis specified in the model frame. This is
the second axis of rotation for revolute2 joints and universal
joints. The axis is currently specified in the model frame of
reference, but this will be changed to the joint frame in future
version of SDFormat (see gazebo issue #494).
physics: Parameters that are specific to a certain physics engine.
name: A unique name for the joint within the scope of the model.
type: The type of joint, which must be one of the following:
(revolute) a hinge joint that rotates on a single axis with
either a fixed or continuous range of motion, (revolute2) same
as two revolute joints connected in series, (prismatic) a
sliding joint that slides along an axis with a limited range
specified by upper and lower limits, (ball) a ball and socket
joint, (universal), like a ball joint, but constrains one degree
of freedom, (piston) similar to a Slider joint except that
rotation around the translation axis is possible.
"""
class Meta:
name = "joint"
parent: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
child: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
thread_pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis: Optional["Joint.Axis"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis2: Optional["Joint.Axis2"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional["Joint.Physics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Axis:
"""The joint axis specified in the model frame.
This is the axis of rotation for revolute joints, the axis of
translation for prismatic joints. The axis is currently
specified in the model frame of reference, but this will be
changed to the joint frame in future version of SDFormat (see
gazebo issue #494).
Parameters
----------
xyz: Represents the x,y,z components of a vector. The vector
should be normalized.
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit: specifies the limits of this joint
"""
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
dynamics: Optional["Joint.Axis.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
specifies the limits of this joint.
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: (not implemented) An attribute for enforcing the
maximum joint effort.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Axis2:
"""The second joint axis specified in the model frame.
This is the second axis of rotation for revolute2 joints and
universal joints. The axis is currently specified in the model
frame of reference, but this will be changed to the joint frame
in future version of SDFormat (see gazebo issue #494).
Parameters
----------
xyz: Represents the x,y,z components of a vector. The vector
should be normalized.
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit:
"""
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
dynamics: Optional["Joint.Axis2.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis2.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: (not implemented) An attribute for enforcing the
maximum joint effort.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Physics:
"""
Parameters that are specific to a certain physics engine.
Parameters
----------
ode: ODE specific parameters
"""
ode: Optional["Joint.Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE specific parameters.
Parameters
----------
fudge_factor: Scale the excess for in a joint motor at joint
limits. Should be between zero and one.
cfm: Constraint force mixing used when not at a stop
bounce: Bounciness of the limits
max_force: Maximum force or torque used to reach the desired
velocity.
velocity: The desired velocity of the joint. Should only be
set if you want the joint to move on load.
limit:
suspension:
"""
fudge_factor: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bounce: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_force: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
limit: Optional["Joint.Physics.Ode.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
suspension: Optional["Joint.Physics.Ode.Suspension"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Limit:
"""
Parameters
----------
cfm: Constraint force mixing parameter used by the joint
stop
erp: Error reduction parameter used by the joint stop
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Suspension:
"""
Parameters
----------
cfm: Suspension constraint force mixing parameter
erp: Suspension error reduction parameter
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/joint.py | joint.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/joint.xsd"
@dataclass
class Joint:
"""
A joint connections two links with kinematic and dynamic properties.
Parameters
----------
parent: Name of the parent link
child: Name of the child link
pose: offset from child link origin in child link frame.
thread_pitch:
axis: The joint axis specified in the model frame. This is the axis
of rotation for revolute joints, the axis of translation for
prismatic joints. The axis is currently specified in the model
frame of reference, but this will be changed to the joint frame
in future version of SDFormat (see gazebo issue #494).
axis2: The second joint axis specified in the model frame. This is
the second axis of rotation for revolute2 joints and universal
joints. The axis is currently specified in the model frame of
reference, but this will be changed to the joint frame in future
version of SDFormat (see gazebo issue #494).
physics: Parameters that are specific to a certain physics engine.
name: A unique name for the joint within the scope of the model.
type: The type of joint, which must be one of the following:
(revolute) a hinge joint that rotates on a single axis with
either a fixed or continuous range of motion, (revolute2) same
as two revolute joints connected in series, (prismatic) a
sliding joint that slides along an axis with a limited range
specified by upper and lower limits, (ball) a ball and socket
joint, (universal), like a ball joint, but constrains one degree
of freedom, (piston) similar to a Slider joint except that
rotation around the translation axis is possible.
"""
class Meta:
name = "joint"
parent: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
child: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
thread_pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis: Optional["Joint.Axis"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis2: Optional["Joint.Axis2"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional["Joint.Physics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Axis:
"""The joint axis specified in the model frame.
This is the axis of rotation for revolute joints, the axis of
translation for prismatic joints. The axis is currently
specified in the model frame of reference, but this will be
changed to the joint frame in future version of SDFormat (see
gazebo issue #494).
Parameters
----------
xyz: Represents the x,y,z components of a vector. The vector
should be normalized.
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit: specifies the limits of this joint
"""
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
dynamics: Optional["Joint.Axis.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
specifies the limits of this joint.
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: (not implemented) An attribute for enforcing the
maximum joint effort.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Axis2:
"""The second joint axis specified in the model frame.
This is the second axis of rotation for revolute2 joints and
universal joints. The axis is currently specified in the model
frame of reference, but this will be changed to the joint frame
in future version of SDFormat (see gazebo issue #494).
Parameters
----------
xyz: Represents the x,y,z components of a vector. The vector
should be normalized.
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit:
"""
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
dynamics: Optional["Joint.Axis2.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis2.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: (not implemented) An attribute for enforcing the
maximum joint effort.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Physics:
"""
Parameters that are specific to a certain physics engine.
Parameters
----------
ode: ODE specific parameters
"""
ode: Optional["Joint.Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE specific parameters.
Parameters
----------
fudge_factor: Scale the excess for in a joint motor at joint
limits. Should be between zero and one.
cfm: Constraint force mixing used when not at a stop
bounce: Bounciness of the limits
max_force: Maximum force or torque used to reach the desired
velocity.
velocity: The desired velocity of the joint. Should only be
set if you want the joint to move on load.
limit:
suspension:
"""
fudge_factor: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bounce: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_force: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
limit: Optional["Joint.Physics.Ode.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
suspension: Optional["Joint.Physics.Ode.Suspension"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Limit:
"""
Parameters
----------
cfm: Constraint force mixing parameter used by the joint
stop
erp: Error reduction parameter used by the joint stop
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Suspension:
"""
Parameters
----------
cfm: Suspension constraint force mixing parameter
erp: Suspension error reduction parameter
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.968194 | 0.629789 |
from dataclasses import dataclass, field
from typing import List, Optional
from .actor import Actor
from .joint import Joint
from .light import Light
from .model import Model
from .physics import Physics
from .scene import Scene
from .state import State
__NAMESPACE__ = "sdformat/v1.2/world.xsd"
@dataclass
class World:
"""
The world element encapsulates an entire world description including:
models, scene, physics, joints, and plugins.
Parameters
----------
gui:
physics: The physics tag specifies the type and properties of the
dynamics engine.
scene: Specifies the look of the environment.
light: The light element describes a light source.
model: The model element defines a complete robot or any other
physical object.
actor:
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
joint: A joint connections two links with kinematic and dynamic
properties.
road:
state:
name: Unique name of the world
"""
class Meta:
name = "world"
gui: Optional["World.Gui"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional[Physics] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scene: Optional[Scene] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
actor: List[Actor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
road: List["World.Road"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
state: List[State] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gui:
camera: Optional["World.Gui.Camera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
fullscreen: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Camera:
view_controller: str = field(
default="orbit",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
track_visual: Optional["World.Gui.Camera.TrackVisual"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class TrackVisual:
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Road:
"""
Parameters
----------
width: Width of the road
point: A series of points define the path of the road.
name: Name of the road
"""
width: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
point: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/world.py | world.py | from dataclasses import dataclass, field
from typing import List, Optional
from .actor import Actor
from .joint import Joint
from .light import Light
from .model import Model
from .physics import Physics
from .scene import Scene
from .state import State
__NAMESPACE__ = "sdformat/v1.2/world.xsd"
@dataclass
class World:
"""
The world element encapsulates an entire world description including:
models, scene, physics, joints, and plugins.
Parameters
----------
gui:
physics: The physics tag specifies the type and properties of the
dynamics engine.
scene: Specifies the look of the environment.
light: The light element describes a light source.
model: The model element defines a complete robot or any other
physical object.
actor:
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
joint: A joint connections two links with kinematic and dynamic
properties.
road:
state:
name: Unique name of the world
"""
class Meta:
name = "world"
gui: Optional["World.Gui"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional[Physics] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scene: Optional[Scene] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
actor: List[Actor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
road: List["World.Road"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
state: List[State] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gui:
camera: Optional["World.Gui.Camera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
fullscreen: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Camera:
view_controller: str = field(
default="orbit",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
track_visual: Optional["World.Gui.Camera.TrackVisual"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class TrackVisual:
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Road:
"""
Parameters
----------
width: Width of the road
point: A series of points define the path of the road.
name: Name of the road
"""
width: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
point: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | 0.930797 | 0.41947 |
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.2/state.xsd"
@dataclass
class State:
"""
Parameters
----------
time: Time stamp of the state [seconds nanoseconds]
model: Model state
world_name: Name of the world this state applies to
"""
class Meta:
name = "state"
time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
model: List["State.Model"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
world_name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Model:
"""
Model state.
Parameters
----------
pose: Pose of the model
link: Link state
name: Name of the model
"""
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
link: List["State.Model.Link"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Link:
"""
Link state.
Parameters
----------
pose: Pose of the link relative to the model
velocity: Velocity of the link
wrench: Force applied to the link
name: Name of the link
"""
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
velocity: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
wrench: List["State.Model.Link.Wrench"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Wrench:
"""
Force applied to the link.
Parameters
----------
pos: Position of the force.
mag: Magnitude of the force.
"""
pos: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
mag: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/state.py | state.py | from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.2/state.xsd"
@dataclass
class State:
"""
Parameters
----------
time: Time stamp of the state [seconds nanoseconds]
model: Model state
world_name: Name of the world this state applies to
"""
class Meta:
name = "state"
time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
model: List["State.Model"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
world_name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Model:
"""
Model state.
Parameters
----------
pose: Pose of the model
link: Link state
name: Name of the model
"""
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
link: List["State.Model.Link"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Link:
"""
Link state.
Parameters
----------
pose: Pose of the link relative to the model
velocity: Velocity of the link
wrench: Force applied to the link
name: Name of the link
"""
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
velocity: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
wrench: List["State.Model.Link.Wrench"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Wrench:
"""
Force applied to the link.
Parameters
----------
pos: Position of the force.
mag: Magnitude of the force.
"""
pos: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
mag: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
) | 0.892438 | 0.475788 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/physics.xsd"
@dataclass
class Physics:
"""
The physics tag specifies the type and properties of the dynamics engine.
Parameters
----------
update_rate: Rate at which to update the physics engine
max_contacts: Maximum number of contacts allowed between two
entities. This value can be over ridden by a max_contacts
element in a collision element.
gravity: The gravity vector
bullet: Bullet specific physics properties
ode: ODE specific physics properties
type: The type of the dynamics engine. Currently must be set to ode
"""
class Meta:
name = "physics"
update_rate: float = field(
default=1000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gravity: str = field(
default="0 0 -9.8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
bullet: Optional["Physics.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet specific physics properties.
Parameters
----------
dt: Time step
"""
dt: float = field(
default=0.003,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific physics properties.
"""
solver: Optional["Physics.Ode.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Ode.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: world, quick
dt: The time duration which advances with each iteration of
the dynamics engine.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
precon_iters:
sor: Set the successive over-relaxation parameter.
"""
type: str = field(
default="quick",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dt: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precon_iters: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_max_correcting_vel: The maximum correcting
velocities allowed when resolving contacts.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_max_correcting_vel: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/physics.py | physics.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.2/physics.xsd"
@dataclass
class Physics:
"""
The physics tag specifies the type and properties of the dynamics engine.
Parameters
----------
update_rate: Rate at which to update the physics engine
max_contacts: Maximum number of contacts allowed between two
entities. This value can be over ridden by a max_contacts
element in a collision element.
gravity: The gravity vector
bullet: Bullet specific physics properties
ode: ODE specific physics properties
type: The type of the dynamics engine. Currently must be set to ode
"""
class Meta:
name = "physics"
update_rate: float = field(
default=1000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gravity: str = field(
default="0 0 -9.8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
bullet: Optional["Physics.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet specific physics properties.
Parameters
----------
dt: Time step
"""
dt: float = field(
default=0.003,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific physics properties.
"""
solver: Optional["Physics.Ode.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Ode.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: world, quick
dt: The time duration which advances with each iteration of
the dynamics engine.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
precon_iters:
sor: Set the successive over-relaxation parameter.
"""
type: str = field(
default="quick",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dt: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precon_iters: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_max_correcting_vel: The maximum correcting
velocities allowed when resolving contacts.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_max_correcting_vel: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.943996 | 0.475849 |
from dataclasses import dataclass, field
from typing import List, Optional
from .joint import Joint
from .link import Link
__NAMESPACE__ = "sdformat/v1.2/model.xsd"
@dataclass
class Model:
"""
The model element defines a complete robot or any other physical object.
Parameters
----------
static: If set to true, the model is immovable. Otherwise the model
is simulated in the dynamics engine.
allow_auto_disable: Allows a model to auto-disable, which is means
the physics engine can skip updating the model when the model is
at rest. This parameter is only used by models with no joints.
pose: A position and orientation in the global coordinate frame for
the model. Position(x,y,z) and rotation (roll, pitch yaw) in the
global coordinate frame.
link: A physical link with inertia, collision, and visual
properties. A link must be a child of a model, and any number of
links may exist in a model.
joint: A joint connections two links with kinematic and dynamic
properties.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
gripper:
name: A unique name for the model. This name must not match another
model in the world.
"""
class Meta:
name = "model"
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
allow_auto_disable: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
link: List[Link] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["Model.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
gripper: List["Model.Gripper"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gripper:
grasp_check: Optional["Model.Gripper.GraspCheck"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
gripper_link: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
palm_link: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class GraspCheck:
detach_steps: int = field(
default=40,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
attach_steps: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_contact_count: int = field(
default=2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/model.py | model.py | from dataclasses import dataclass, field
from typing import List, Optional
from .joint import Joint
from .link import Link
__NAMESPACE__ = "sdformat/v1.2/model.xsd"
@dataclass
class Model:
"""
The model element defines a complete robot or any other physical object.
Parameters
----------
static: If set to true, the model is immovable. Otherwise the model
is simulated in the dynamics engine.
allow_auto_disable: Allows a model to auto-disable, which is means
the physics engine can skip updating the model when the model is
at rest. This parameter is only used by models with no joints.
pose: A position and orientation in the global coordinate frame for
the model. Position(x,y,z) and rotation (roll, pitch yaw) in the
global coordinate frame.
link: A physical link with inertia, collision, and visual
properties. A link must be a child of a model, and any number of
links may exist in a model.
joint: A joint connections two links with kinematic and dynamic
properties.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
gripper:
name: A unique name for the model. This name must not match another
model in the world.
"""
class Meta:
name = "model"
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
allow_auto_disable: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
link: List[Link] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["Model.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
gripper: List["Model.Gripper"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gripper:
grasp_check: Optional["Model.Gripper.GraspCheck"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
gripper_link: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
palm_link: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class GraspCheck:
detach_steps: int = field(
default=40,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
attach_steps: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_contact_count: int = field(
default=2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.945045 | 0.54698 |
from dataclasses import dataclass, field
from typing import List, Optional
from .joint import Joint
from .link import Link
__NAMESPACE__ = "sdformat/v1.2/actor.xsd"
@dataclass
class Actor:
"""
Parameters
----------
pose: Origin of the actor
skin:
animation:
script:
link: A physical link with inertia, collision, and visual
properties. A link must be a child of a model, and any number of
links may exist in a model.
joint: A joint connections two links with kinematic and dynamic
properties.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
name:
static:
"""
class Meta:
name = "actor"
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
skin: Optional["Actor.Skin"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
animation: List["Actor.Animation"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
script: Optional["Actor.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
link: List[Link] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["Actor.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
static: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Skin:
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Animation:
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
interpolate_x: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Script:
loop: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
delay_start: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
auto_start: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
trajectory: List["Actor.Script.Trajectory"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Trajectory:
waypoint: List["Actor.Script.Trajectory.Waypoint"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
id: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Waypoint:
time: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/actor.py | actor.py | from dataclasses import dataclass, field
from typing import List, Optional
from .joint import Joint
from .link import Link
__NAMESPACE__ = "sdformat/v1.2/actor.xsd"
@dataclass
class Actor:
"""
Parameters
----------
pose: Origin of the actor
skin:
animation:
script:
link: A physical link with inertia, collision, and visual
properties. A link must be a child of a model, and any number of
links may exist in a model.
joint: A joint connections two links with kinematic and dynamic
properties.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
name:
static:
"""
class Meta:
name = "actor"
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
skin: Optional["Actor.Skin"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
animation: List["Actor.Animation"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
script: Optional["Actor.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
link: List[Link] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["Actor.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
static: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Skin:
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Animation:
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
interpolate_x: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Script:
loop: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
delay_start: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
auto_start: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
trajectory: List["Actor.Script.Trajectory"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Trajectory:
waypoint: List["Actor.Script.Trajectory.Waypoint"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
id: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Waypoint:
time: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | 0.908582 | 0.416381 |
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.2/geometry.xsd"
@dataclass
class Geometry:
"""
The shape of the visual or collision object.
Parameters
----------
box: Box shape
sphere: Sphere shape
cylinder: Cylinder shape
mesh: Mesh shape
plane: Plane shape
image: Extrude a set of boxes from a grayscale image.
heightmap: A heightmap based on a 2d grayscale image.
"""
class Meta:
name = "geometry"
box: Optional["Geometry.Box"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
sphere: Optional["Geometry.Sphere"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
cylinder: Optional["Geometry.Cylinder"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
mesh: Optional["Geometry.Mesh"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
plane: Optional["Geometry.Plane"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
image: Optional["Geometry.Image"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
heightmap: Optional["Geometry.Heightmap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Box:
"""
Box shape.
Parameters
----------
size: The three side lengths of the box. The origin of the box
is in its geometric center (inside the center of the box).
"""
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Sphere:
"""
Sphere shape.
Parameters
----------
radius: radius of the sphere
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Cylinder:
"""
Cylinder shape.
Parameters
----------
radius: Radius of the cylinder
length: Length of the cylinder
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
length: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Mesh:
"""
Mesh shape.
Parameters
----------
filename: Mesh filename. DEPRECATED
uri: Mesh uri
scale: Scaling factor applied to the mesh
"""
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Plane:
"""
Plane shape.
Parameters
----------
normal: Normal direction for the plane
size: Length of each side of the plane
"""
normal: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
size: str = field(
default="1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+)((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Image:
"""
Extrude a set of boxes from a grayscale image.
Parameters
----------
uri: URI of the grayscale image file
scale: Scaling factor applied to the image
threshold: Grayscale threshold
height: Height of the extruded boxes
granularity: The amount of error in the model
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: int = field(
default=200,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
height: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
granularity: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Heightmap:
"""
A heightmap based on a 2d grayscale image.
Parameters
----------
uri: URI to a grayscale image file
size: The size of the heightmap in world units
pos: A position offset.
texture: The heightmap can contain multiple textures. The order
of the texture matters. The first texture will appear at the
lowest height, and the last texture at the highest height.
Use blend to control the height thresholds and fade between
textures.
blend: The blend tag controls how two adjacent textures are
mixed. The number of blend elements should equal one less
than the number of textures.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
pos: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
texture: List["Geometry.Heightmap.Texture"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
blend: List["Geometry.Heightmap.Blend"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Texture:
"""The heightmap can contain multiple textures.
The order of the texture matters. The first texture will
appear at the lowest height, and the last texture at the
highest height. Use blend to control the height thresholds
and fade between textures.
Parameters
----------
size: Size of the applied texture in meters.
diffuse: Diffuse texture image filename
normal: Normalmap texture image filename
"""
size: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
diffuse: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
normal: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Blend:
"""The blend tag controls how two adjacent textures are mixed.
The number of blend elements should equal one less than the
number of textures.
Parameters
----------
min_height: Min height of a blend layer
fade_dist: Distance over which the blend occurs
"""
min_height: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fade_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v12/geometry.py | geometry.py | from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.2/geometry.xsd"
@dataclass
class Geometry:
"""
The shape of the visual or collision object.
Parameters
----------
box: Box shape
sphere: Sphere shape
cylinder: Cylinder shape
mesh: Mesh shape
plane: Plane shape
image: Extrude a set of boxes from a grayscale image.
heightmap: A heightmap based on a 2d grayscale image.
"""
class Meta:
name = "geometry"
box: Optional["Geometry.Box"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
sphere: Optional["Geometry.Sphere"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
cylinder: Optional["Geometry.Cylinder"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
mesh: Optional["Geometry.Mesh"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
plane: Optional["Geometry.Plane"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
image: Optional["Geometry.Image"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
heightmap: Optional["Geometry.Heightmap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Box:
"""
Box shape.
Parameters
----------
size: The three side lengths of the box. The origin of the box
is in its geometric center (inside the center of the box).
"""
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Sphere:
"""
Sphere shape.
Parameters
----------
radius: radius of the sphere
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Cylinder:
"""
Cylinder shape.
Parameters
----------
radius: Radius of the cylinder
length: Length of the cylinder
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
length: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Mesh:
"""
Mesh shape.
Parameters
----------
filename: Mesh filename. DEPRECATED
uri: Mesh uri
scale: Scaling factor applied to the mesh
"""
filename: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Plane:
"""
Plane shape.
Parameters
----------
normal: Normal direction for the plane
size: Length of each side of the plane
"""
normal: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
size: str = field(
default="1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+)((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Image:
"""
Extrude a set of boxes from a grayscale image.
Parameters
----------
uri: URI of the grayscale image file
scale: Scaling factor applied to the image
threshold: Grayscale threshold
height: Height of the extruded boxes
granularity: The amount of error in the model
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: int = field(
default=200,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
height: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
granularity: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Heightmap:
"""
A heightmap based on a 2d grayscale image.
Parameters
----------
uri: URI to a grayscale image file
size: The size of the heightmap in world units
pos: A position offset.
texture: The heightmap can contain multiple textures. The order
of the texture matters. The first texture will appear at the
lowest height, and the last texture at the highest height.
Use blend to control the height thresholds and fade between
textures.
blend: The blend tag controls how two adjacent textures are
mixed. The number of blend elements should equal one less
than the number of textures.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
pos: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
texture: List["Geometry.Heightmap.Texture"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
blend: List["Geometry.Heightmap.Blend"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Texture:
"""The heightmap can contain multiple textures.
The order of the texture matters. The first texture will
appear at the lowest height, and the last texture at the
highest height. Use blend to control the height thresholds
and fade between textures.
Parameters
----------
size: Size of the applied texture in meters.
diffuse: Diffuse texture image filename
normal: Normalmap texture image filename
"""
size: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
diffuse: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
normal: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Blend:
"""The blend tag controls how two adjacent textures are mixed.
The number of blend elements should equal one less than the
number of textures.
Parameters
----------
min_height: Min height of a blend layer
fade_dist: Distance over which the blend occurs
"""
min_height: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fade_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.951301 | 0.592637 |
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.6/material.xsd"
@dataclass
class Material:
"""
The material of the visual element.
Parameters
----------
script: Name of material from an installed script file. This will
override the color element if the script exists.
shader:
lighting: If false, dynamic lighting will be disabled
ambient: The ambient color of a material specified by set of four
numbers representing red/green/blue, each in the range of [0,1].
diffuse: The diffuse color of a material specified by set of four
numbers representing red/green/blue/alpha, each in the range of
[0,1].
specular: The specular color of a material specified by set of four
numbers representing red/green/blue/alpha, each in the range of
[0,1].
emissive: The emissive color of a material specified by set of four
numbers representing red/green/blue, each in the range of [0,1].
pbr: Physically Based Rendering (PBR) material. There are two PBR
workflows: metal and specular. While both workflows and their
parameters can be specified at the same time, typically only one
of them will be used (depending on the underlying renderer
capability). It is also recommended to use the same workflow for
all materials in the world.
"""
class Meta:
name = "material"
script: Optional["Material.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shader: Optional["Material.Shader"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
lighting: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
diffuse: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
emissive: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
pbr: Optional["Material.Pbr"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Script:
"""Name of material from an installed script file.
This will override the color element if the script exists.
Parameters
----------
uri: URI of the material script file
name: Name of the script within the script file
"""
uri: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Shader:
"""
Parameters
----------
normal_map: filename of the normal map
type: vertex, pixel, normal_map_object_space,
normal_map_tangent_space
"""
normal_map: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pbr:
"""Physically Based Rendering (PBR) material.
There are two PBR workflows: metal and specular. While both
workflows and their parameters can be specified at the same
time, typically only one of them will be used (depending on the
underlying renderer capability). It is also recommended to use
the same workflow for all materials in the world.
Parameters
----------
metal: PBR using the Metallic/Roughness workflow.
specular: PBR using the Specular/Glossiness workflow.
"""
metal: Optional["Material.Pbr.Metal"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
specular: Optional["Material.Pbr.Specular"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Metal:
"""
PBR using the Metallic/Roughness workflow.
Parameters
----------
albedo_map: Filename of the diffuse/albedo map.
roughness_map: Filename of the roughness map.
roughness: Material roughness in the range of [0,1], where 0
represents a smooth surface and 1 represents a rough
surface. This is the inverse of a specular map in a PBR
specular workflow.
metalness_map: Filename of the metalness map.
metalness: Material metalness in the range of [0,1], where 0
represents non-metal and 1 represents raw metal
environment_map: Filename of the environment / reflection
map, typically in the form of a cubemap
ambient_occlusion_map: Filename of the ambient occlusion
map. The map defines the amount of ambient lighting on
the surface.
normal_map: Filename of the normal map. The normals can be
in the object space or tangent space as specified in the
'type' attribute
emissive_map: Filename of the emissive map.
"""
albedo_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
roughness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
roughness: str = field(
default="0.5",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
metalness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
metalness: str = field(
default="0.5",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
environment_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient_occlusion_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
normal_map: Optional["Material.Pbr.Metal.NormalMap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
emissive_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class NormalMap:
"""
Parameters
----------
value:
type: The space that the normals are in. Values are:
'object' or 'tangent'
"""
value: str = field(
default="",
metadata={
"required": True,
},
)
type: str = field(
default="tangent",
metadata={
"type": "Attribute",
},
)
@dataclass
class Specular:
"""
PBR using the Specular/Glossiness workflow.
Parameters
----------
albedo_map: Filename of the diffuse/albedo map.
specular_map: Filename of the specular map.
glossiness_map: Filename of the glossiness map.
glossiness: Material glossiness in the range of [0-1], where
0 represents a rough surface and 1 represents a smooth
surface. This is the inverse of a roughness map in a PBR
metal workflow.
environment_map: Filename of the environment / reflection
map, typically in the form of a cubemap
ambient_occlusion_map: Filename of the ambient occlusion
map. The map defines the amount of ambient lighting on
the surface.
normal_map: Filename of the normal map. The normals can be
in the object space or tangent space as specified in the
'type' attribute
emissive_map: Filename of the emissive map.
"""
albedo_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
specular_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
glossiness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
glossiness: str = field(
default="0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
environment_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient_occlusion_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
normal_map: Optional["Material.Pbr.Specular.NormalMap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
emissive_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class NormalMap:
"""
Parameters
----------
value:
type: The space that the normals are in. Values are:
'object' or 'tangent'
"""
value: str = field(
default="",
metadata={
"required": True,
},
)
type: str = field(
default="tangent",
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/material.py | material.py | from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.6/material.xsd"
@dataclass
class Material:
"""
The material of the visual element.
Parameters
----------
script: Name of material from an installed script file. This will
override the color element if the script exists.
shader:
lighting: If false, dynamic lighting will be disabled
ambient: The ambient color of a material specified by set of four
numbers representing red/green/blue, each in the range of [0,1].
diffuse: The diffuse color of a material specified by set of four
numbers representing red/green/blue/alpha, each in the range of
[0,1].
specular: The specular color of a material specified by set of four
numbers representing red/green/blue/alpha, each in the range of
[0,1].
emissive: The emissive color of a material specified by set of four
numbers representing red/green/blue, each in the range of [0,1].
pbr: Physically Based Rendering (PBR) material. There are two PBR
workflows: metal and specular. While both workflows and their
parameters can be specified at the same time, typically only one
of them will be used (depending on the underlying renderer
capability). It is also recommended to use the same workflow for
all materials in the world.
"""
class Meta:
name = "material"
script: Optional["Material.Script"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shader: Optional["Material.Shader"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
lighting: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
diffuse: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
emissive: str = field(
default="0 0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
pbr: Optional["Material.Pbr"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Script:
"""Name of material from an installed script file.
This will override the color element if the script exists.
Parameters
----------
uri: URI of the material script file
name: Name of the script within the script file
"""
uri: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Shader:
"""
Parameters
----------
normal_map: filename of the normal map
type: vertex, pixel, normal_map_object_space,
normal_map_tangent_space
"""
normal_map: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pbr:
"""Physically Based Rendering (PBR) material.
There are two PBR workflows: metal and specular. While both
workflows and their parameters can be specified at the same
time, typically only one of them will be used (depending on the
underlying renderer capability). It is also recommended to use
the same workflow for all materials in the world.
Parameters
----------
metal: PBR using the Metallic/Roughness workflow.
specular: PBR using the Specular/Glossiness workflow.
"""
metal: Optional["Material.Pbr.Metal"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
specular: Optional["Material.Pbr.Specular"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Metal:
"""
PBR using the Metallic/Roughness workflow.
Parameters
----------
albedo_map: Filename of the diffuse/albedo map.
roughness_map: Filename of the roughness map.
roughness: Material roughness in the range of [0,1], where 0
represents a smooth surface and 1 represents a rough
surface. This is the inverse of a specular map in a PBR
specular workflow.
metalness_map: Filename of the metalness map.
metalness: Material metalness in the range of [0,1], where 0
represents non-metal and 1 represents raw metal
environment_map: Filename of the environment / reflection
map, typically in the form of a cubemap
ambient_occlusion_map: Filename of the ambient occlusion
map. The map defines the amount of ambient lighting on
the surface.
normal_map: Filename of the normal map. The normals can be
in the object space or tangent space as specified in the
'type' attribute
emissive_map: Filename of the emissive map.
"""
albedo_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
roughness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
roughness: str = field(
default="0.5",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
metalness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
metalness: str = field(
default="0.5",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
environment_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient_occlusion_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
normal_map: Optional["Material.Pbr.Metal.NormalMap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
emissive_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class NormalMap:
"""
Parameters
----------
value:
type: The space that the normals are in. Values are:
'object' or 'tangent'
"""
value: str = field(
default="",
metadata={
"required": True,
},
)
type: str = field(
default="tangent",
metadata={
"type": "Attribute",
},
)
@dataclass
class Specular:
"""
PBR using the Specular/Glossiness workflow.
Parameters
----------
albedo_map: Filename of the diffuse/albedo map.
specular_map: Filename of the specular map.
glossiness_map: Filename of the glossiness map.
glossiness: Material glossiness in the range of [0-1], where
0 represents a rough surface and 1 represents a smooth
surface. This is the inverse of a roughness map in a PBR
metal workflow.
environment_map: Filename of the environment / reflection
map, typically in the form of a cubemap
ambient_occlusion_map: Filename of the ambient occlusion
map. The map defines the amount of ambient lighting on
the surface.
normal_map: Filename of the normal map. The normals can be
in the object space or tangent space as specified in the
'type' attribute
emissive_map: Filename of the emissive map.
"""
albedo_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
specular_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
glossiness_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
glossiness: str = field(
default="0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
environment_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ambient_occlusion_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
normal_map: Optional["Material.Pbr.Specular.NormalMap"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
emissive_map: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class NormalMap:
"""
Parameters
----------
value:
type: The space that the normals are in. Values are:
'object' or 'tangent'
"""
value: str = field(
default="",
metadata={
"required": True,
},
)
type: str = field(
default="tangent",
metadata={
"type": "Attribute",
},
) | 0.944472 | 0.46952 |
from dataclasses import dataclass, field
from typing import List, Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.6/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Collision.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Collision.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Collision.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
soft_contact: Optional["Collision.Surface.SoftContact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce capture velocity, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
torsional: Parameters for torsional friction
ode: ODE friction parameters
bullet:
"""
torsional: Optional["Collision.Surface.Friction.Torsional"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Collision.Surface.Friction.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Torsional:
"""
Parameters for torsional friction.
Parameters
----------
coefficient: Torsional friction coefficient, unitless
maximum ratio of tangential stress to
normal stress.
use_patch_radius: If this flag is true,
torsional friction is calculated using the
"patch_radius" parameter. If this flag is
set to false, "surface_radius" (R) and
contact depth (d) are used to compute the
patch radius as sqrt(R*d).
patch_radius: Radius of contact patch surface.
surface_radius: Surface radius on the point of contact.
ode: Torsional friction parameters for ODE
"""
coefficient: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_patch_radius: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
patch_radius: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface_radius: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ode: Optional["Collision.Surface.Friction.Torsional.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
Torsional friction parameters for ODE.
Parameters
----------
slip: Force dependent slip for torsional friction,
equivalent to inverse of viscous damping
coefficient with units of
rad/s/(Nm). A slip value of 0 is
infinitely viscous.
"""
slip: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in first friction pyramid
direction, the unitless maximum ratio of
force in first friction pyramid direction
to normal force.
mu2: Coefficient of friction in second friction pyramid
direction, the unitless maximum ratio of
force in second friction pyramid direction
to normal force.
fdir1: Unit vector specifying first friction pyramid
direction in collision-fixed reference
frame. If the friction pyramid model is in
use, and this value is set to a unit
vector for one of the colliding surfaces,
the ODE Collide callback function will align the
friction pyramid directions with a
reference frame fixed to that collision surface.
If both surfaces have this value set to a vector of
zeros, the friction pyramid directions
will be aligned with the world frame. If
this value is set for both surfaces, the behavior is
undefined.
slip1: Force dependent slip in first friction pyramid
direction, equivalent to inverse of
viscous damping coefficient with units of
m/s/N. A slip value of 0 is infinitely
viscous.
slip2: Force dependent slip in second friction pyramid
direction, equivalent to inverse of
viscous damping coefficient with units of
m/s/N. A slip value of 0 is infinitely
viscous.
"""
mu: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Parameters
----------
friction: Coefficient of friction in first friction
pyramid direction, the unitless maximum
ratio of force in first friction pyramid
direction to normal force.
friction2: Coefficient of friction in second friction
pyramid direction, the unitless maximum
ratio of force in second friction pyramid
direction to normal force.
fdir1: Unit vector specifying first friction pyramid
direction in collision-fixed reference
frame. If the friction pyramid model is in
use, and this value is set to a unit
vector for one of the colliding surfaces,
the friction pyramid directions will be aligned
with a reference frame fixed to that collision
surface. If both surfaces have this value
set to a vector of zeros, the friction
pyramid directions will be aligned with the world
frame. If this value is set for both
surfaces, the behavior is undefined.
rolling_friction: Coefficient of rolling friction
"""
friction: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction2: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
rolling_friction: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
collide_without_contact: Flag to disable contact force
generation, while still allowing collision checks and
contact visualization to occur.
collide_without_contact_bitmask: Bitmask for collision
filtering when collide_without_contact is on
collide_bitmask: Bitmask for collision filtering. This will
override collide_without_contact. Parsed as 16-bit
unsigned integer.
category_bitmask: Bitmask for category of collision
filtering. Collision happens if ((category1 &
collision2) | (category2 & collision1)) is not zero.
If not specified, the category_bitmask should be
interpreted as being the same as collide_bitmask. Parsed
as 16-bit unsigned integer.
poissons_ratio: Poisson's ratio is the unitless ratio
between transverse and axial strain. This value
must lie between (-1, 0.5). Defaults to 0.3 for typical
steel. Note typical silicone elastomers have
Poisson's ratio near 0.49 ~ 0.50. For
reference, approximate values for Material:(Young's
Modulus, Poisson's Ratio) for some of the
typical materials are: Plastic: (1e8 ~ 3e9
Pa, 0.35 ~ 0.41), Wood: (4e9 ~ 1e10 Pa,
0.22 ~ 0.50), Aluminum: (7e10 Pa, 0.32 ~
0.35), Steel: (2e11 Pa, 0.26 ~ 0.31).
elastic_modulus: Young's Modulus in SI derived unit Pascal.
Defaults to -1. If value is less or equal to zero,
contact using elastic modulus (with Poisson's Ratio) is
disabled. For reference, approximate values for
Material:(Young's Modulus, Poisson's Ratio) for
some of the typical materials are: Plastic:
(1e8 ~ 3e9 Pa, 0.35 ~ 0.41), Wood: (4e9 ~
1e10 Pa, 0.22 ~ 0.50), Aluminum: (7e10 Pa,
0.32 ~ 0.35), Steel: (2e11 Pa, 0.26 ~
0.31).
ode: ODE contact parameters
bullet: Bullet contact parameters
"""
collide_without_contact: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collide_without_contact_bitmask: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collide_bitmask: int = field(
default=65535,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
category_bitmask: int = field(
default=65535,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
poissons_ratio: float = field(
default=0.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
elastic_modulus: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Collision.Surface.Contact.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
split_impulse: Similar to ODE's max_vel implementation.
See
http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
split_impulse_penetration_threshold: Similar to ODE's
max_vel implementation. See
http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse_penetration_threshold: float = field(
default=-0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class SoftContact:
"""
Parameters
----------
dart: soft contact pamameters based on paper:
http://www.cc.gatech.edu/graphics/projects/Sumit/homepage/papers/sigasia11/jain_softcontacts_siga11.pdf
"""
dart: Optional["Collision.Surface.SoftContact.Dart"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Dart:
"""
soft contact pamameters based on paper: http://www
.cc.gatech.edu/graphics/projects/Sumit/homepage/papers/sigasia1
1/jain_softcontacts_siga11.pdf.
Parameters
----------
bone_attachment: This is variable k_v in the soft
contacts paper. Its unit is N/m.
stiffness: This is variable k_e in the soft contacts
paper. Its unit is N/m.
damping: Viscous damping of point velocity in body
frame. Its unit is N/m/s.
flesh_mass_fraction: Fraction of mass to be distributed
among deformable nodes.
"""
bone_attachment: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
damping: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
flesh_mass_fraction: float = field(
default=0.05,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/collision.py | collision.py | from dataclasses import dataclass, field
from typing import List, Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.6/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Collision.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Collision.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Collision.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
soft_contact: Optional["Collision.Surface.SoftContact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce capture velocity, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
torsional: Parameters for torsional friction
ode: ODE friction parameters
bullet:
"""
torsional: Optional["Collision.Surface.Friction.Torsional"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Collision.Surface.Friction.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Torsional:
"""
Parameters for torsional friction.
Parameters
----------
coefficient: Torsional friction coefficient, unitless
maximum ratio of tangential stress to
normal stress.
use_patch_radius: If this flag is true,
torsional friction is calculated using the
"patch_radius" parameter. If this flag is
set to false, "surface_radius" (R) and
contact depth (d) are used to compute the
patch radius as sqrt(R*d).
patch_radius: Radius of contact patch surface.
surface_radius: Surface radius on the point of contact.
ode: Torsional friction parameters for ODE
"""
coefficient: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_patch_radius: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
patch_radius: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface_radius: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ode: Optional["Collision.Surface.Friction.Torsional.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
Torsional friction parameters for ODE.
Parameters
----------
slip: Force dependent slip for torsional friction,
equivalent to inverse of viscous damping
coefficient with units of
rad/s/(Nm). A slip value of 0 is
infinitely viscous.
"""
slip: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in first friction pyramid
direction, the unitless maximum ratio of
force in first friction pyramid direction
to normal force.
mu2: Coefficient of friction in second friction pyramid
direction, the unitless maximum ratio of
force in second friction pyramid direction
to normal force.
fdir1: Unit vector specifying first friction pyramid
direction in collision-fixed reference
frame. If the friction pyramid model is in
use, and this value is set to a unit
vector for one of the colliding surfaces,
the ODE Collide callback function will align the
friction pyramid directions with a
reference frame fixed to that collision surface.
If both surfaces have this value set to a vector of
zeros, the friction pyramid directions
will be aligned with the world frame. If
this value is set for both surfaces, the behavior is
undefined.
slip1: Force dependent slip in first friction pyramid
direction, equivalent to inverse of
viscous damping coefficient with units of
m/s/N. A slip value of 0 is infinitely
viscous.
slip2: Force dependent slip in second friction pyramid
direction, equivalent to inverse of
viscous damping coefficient with units of
m/s/N. A slip value of 0 is infinitely
viscous.
"""
mu: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Parameters
----------
friction: Coefficient of friction in first friction
pyramid direction, the unitless maximum
ratio of force in first friction pyramid
direction to normal force.
friction2: Coefficient of friction in second friction
pyramid direction, the unitless maximum
ratio of force in second friction pyramid
direction to normal force.
fdir1: Unit vector specifying first friction pyramid
direction in collision-fixed reference
frame. If the friction pyramid model is in
use, and this value is set to a unit
vector for one of the colliding surfaces,
the friction pyramid directions will be aligned
with a reference frame fixed to that collision
surface. If both surfaces have this value
set to a vector of zeros, the friction
pyramid directions will be aligned with the world
frame. If this value is set for both
surfaces, the behavior is undefined.
rolling_friction: Coefficient of rolling friction
"""
friction: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction2: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
rolling_friction: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
collide_without_contact: Flag to disable contact force
generation, while still allowing collision checks and
contact visualization to occur.
collide_without_contact_bitmask: Bitmask for collision
filtering when collide_without_contact is on
collide_bitmask: Bitmask for collision filtering. This will
override collide_without_contact. Parsed as 16-bit
unsigned integer.
category_bitmask: Bitmask for category of collision
filtering. Collision happens if ((category1 &
collision2) | (category2 & collision1)) is not zero.
If not specified, the category_bitmask should be
interpreted as being the same as collide_bitmask. Parsed
as 16-bit unsigned integer.
poissons_ratio: Poisson's ratio is the unitless ratio
between transverse and axial strain. This value
must lie between (-1, 0.5). Defaults to 0.3 for typical
steel. Note typical silicone elastomers have
Poisson's ratio near 0.49 ~ 0.50. For
reference, approximate values for Material:(Young's
Modulus, Poisson's Ratio) for some of the
typical materials are: Plastic: (1e8 ~ 3e9
Pa, 0.35 ~ 0.41), Wood: (4e9 ~ 1e10 Pa,
0.22 ~ 0.50), Aluminum: (7e10 Pa, 0.32 ~
0.35), Steel: (2e11 Pa, 0.26 ~ 0.31).
elastic_modulus: Young's Modulus in SI derived unit Pascal.
Defaults to -1. If value is less or equal to zero,
contact using elastic modulus (with Poisson's Ratio) is
disabled. For reference, approximate values for
Material:(Young's Modulus, Poisson's Ratio) for
some of the typical materials are: Plastic:
(1e8 ~ 3e9 Pa, 0.35 ~ 0.41), Wood: (4e9 ~
1e10 Pa, 0.22 ~ 0.50), Aluminum: (7e10 Pa,
0.32 ~ 0.35), Steel: (2e11 Pa, 0.26 ~
0.31).
ode: ODE contact parameters
bullet: Bullet contact parameters
"""
collide_without_contact: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collide_without_contact_bitmask: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collide_bitmask: int = field(
default=65535,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
category_bitmask: int = field(
default=65535,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
poissons_ratio: float = field(
default=0.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
elastic_modulus: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Collision.Surface.Contact.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
split_impulse: Similar to ODE's max_vel implementation.
See
http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
split_impulse_penetration_threshold: Similar to ODE's
max_vel implementation. See
http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse_penetration_threshold: float = field(
default=-0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class SoftContact:
"""
Parameters
----------
dart: soft contact pamameters based on paper:
http://www.cc.gatech.edu/graphics/projects/Sumit/homepage/papers/sigasia11/jain_softcontacts_siga11.pdf
"""
dart: Optional["Collision.Surface.SoftContact.Dart"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Dart:
"""
soft contact pamameters based on paper: http://www
.cc.gatech.edu/graphics/projects/Sumit/homepage/papers/sigasia1
1/jain_softcontacts_siga11.pdf.
Parameters
----------
bone_attachment: This is variable k_v in the soft
contacts paper. Its unit is N/m.
stiffness: This is variable k_e in the soft contacts
paper. Its unit is N/m.
damping: Viscous damping of point velocity in body
frame. Its unit is N/m/s.
flesh_mass_fraction: Fraction of mass to be distributed
among deformable nodes.
"""
bone_attachment: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
damping: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
flesh_mass_fraction: float = field(
default=0.05,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.953902 | 0.532668 |
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.6/light.xsd"
@dataclass
class Light:
"""
The light element describes a light source.
Parameters
----------
cast_shadows: When true, the light will cast shadows.
diffuse: Diffuse light color
specular: Specular light color
attenuation: Light attenuation
direction: Direction of the light, only applicable for spot and
directional lights.
spot: Spot light parameters
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: A unique name for the light.
type: The light type: point, directional, spot.
"""
class Meta:
name = "light"
cast_shadows: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
diffuse: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default=".1 .1 .1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
attenuation: Optional["Light.Attenuation"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
direction: str = field(
default="0 0 -1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
spot: Optional["Light.Spot"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Light.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Light.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Attenuation:
"""
Light attenuation.
Parameters
----------
range: Range of the light
linear: The linear attenuation factor: 1 means attenuate evenly
over the distance.
constant: The constant attenuation factor: 1.0 means never
attenuate, 0.0 is complete attenutation.
quadratic: The quadratic attenuation factor: adds a curvature to
the attenuation.
"""
range: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
linear: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constant: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
quadratic: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Spot:
"""
Spot light parameters.
Parameters
----------
inner_angle: Angle covered by the bright inner cone
outer_angle: Angle covered by the outer cone
falloff: The rate of falloff between the inner and outer cones.
1.0 means a linear falloff, less means slower falloff,
higher means faster falloff.
"""
inner_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
outer_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
falloff: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Light.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/light.py | light.py | from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "sdformat/v1.6/light.xsd"
@dataclass
class Light:
"""
The light element describes a light source.
Parameters
----------
cast_shadows: When true, the light will cast shadows.
diffuse: Diffuse light color
specular: Specular light color
attenuation: Light attenuation
direction: Direction of the light, only applicable for spot and
directional lights.
spot: Spot light parameters
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: A unique name for the light.
type: The light type: point, directional, spot.
"""
class Meta:
name = "light"
cast_shadows: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
diffuse: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
specular: str = field(
default=".1 .1 .1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
attenuation: Optional["Light.Attenuation"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
direction: str = field(
default="0 0 -1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
spot: Optional["Light.Spot"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Light.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Light.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Attenuation:
"""
Light attenuation.
Parameters
----------
range: Range of the light
linear: The linear attenuation factor: 1 means attenuate evenly
over the distance.
constant: The constant attenuation factor: 1.0 means never
attenuate, 0.0 is complete attenutation.
quadratic: The quadratic attenuation factor: adds a curvature to
the attenuation.
"""
range: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
linear: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constant: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
quadratic: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Spot:
"""
Spot light parameters.
Parameters
----------
inner_angle: Angle covered by the bright inner cone
outer_angle: Angle covered by the outer cone
falloff: The rate of falloff between the inner and outer cones.
1.0 means a linear falloff, less means slower falloff,
higher means faster falloff.
"""
inner_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
outer_angle: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
falloff: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Light.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | 0.935854 | 0.48182 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.6/scene.xsd"
@dataclass
class Scene:
"""
Specifies the look of the environment.
Parameters
----------
ambient: Color of the ambient light.
background: Color of the background.
sky: Properties for the sky
shadows: Enable/disable shadows
fog: Controls fog
grid: Enable/disable the grid
origin_visual: Show/hide world origin indicator
"""
class Meta:
name = "scene"
ambient: str = field(
default="0.4 0.4 0.4 1.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
background: str = field(
default=".7 .7 .7 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
sky: Optional["Scene.Sky"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fog: Optional["Scene.Fog"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
grid: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
origin_visual: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Sky:
"""
Properties for the sky.
Parameters
----------
time: Time of day [0..24]
sunrise: Sunrise time [0..24]
sunset: Sunset time [0..24]
clouds: Sunset time [0..24]
"""
time: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunrise: float = field(
default=6.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunset: float = field(
default=20.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
clouds: Optional["Scene.Sky.Clouds"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Clouds:
"""
Sunset time [0..24]
Parameters
----------
speed: Speed of the clouds
direction: Direction of the cloud movement
humidity: Density of clouds
mean_size: Average size of the clouds
ambient: Ambient cloud color
"""
speed: float = field(
default=0.6,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
direction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
humidity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mean_size: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default=".8 .8 .8 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Fog:
"""
Controls fog.
Parameters
----------
color: Fog color
type: Fog type: constant, linear, quadratic
start: Distance to start of fog
end: Distance to end of fog
density: Density of fog
"""
color: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
type: str = field(
default="none",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
start: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
end: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
density: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/scene.py | scene.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.6/scene.xsd"
@dataclass
class Scene:
"""
Specifies the look of the environment.
Parameters
----------
ambient: Color of the ambient light.
background: Color of the background.
sky: Properties for the sky
shadows: Enable/disable shadows
fog: Controls fog
grid: Enable/disable the grid
origin_visual: Show/hide world origin indicator
"""
class Meta:
name = "scene"
ambient: str = field(
default="0.4 0.4 0.4 1.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
background: str = field(
default=".7 .7 .7 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
sky: Optional["Scene.Sky"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fog: Optional["Scene.Fog"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
grid: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
origin_visual: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Sky:
"""
Properties for the sky.
Parameters
----------
time: Time of day [0..24]
sunrise: Sunrise time [0..24]
sunset: Sunset time [0..24]
clouds: Sunset time [0..24]
"""
time: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunrise: float = field(
default=6.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sunset: float = field(
default=20.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
clouds: Optional["Scene.Sky.Clouds"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Clouds:
"""
Sunset time [0..24]
Parameters
----------
speed: Speed of the clouds
direction: Direction of the cloud movement
humidity: Density of clouds
mean_size: Average size of the clouds
ambient: Ambient cloud color
"""
speed: float = field(
default=0.6,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
direction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
humidity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mean_size: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ambient: str = field(
default=".8 .8 .8 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
@dataclass
class Fog:
"""
Controls fog.
Parameters
----------
color: Fog color
type: Fog type: constant, linear, quadratic
start: Distance to start of fog
end: Distance to end of fog
density: Density of fog
"""
color: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
type: str = field(
default="none",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
start: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
end: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
density: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.921675 | 0.460774 |
from dataclasses import dataclass, field
from typing import List, Optional
from .geometry import Geometry
from .material import Material
__NAMESPACE__ = "sdformat/v1.6/visual.xsd"
@dataclass
class Visual:
"""The visual properties of the link.
This element specifies the shape of the object (box, cylinder, etc.)
for visualization purposes.
Parameters
----------
cast_shadows: If true the visual will cast shadows.
laser_retro: will be implemented in the future release.
transparency: The amount of transparency( 0=opaque, 1 = fully
transparent)
meta: Optional meta information for the visual. The information
contained within this element should be used to provide
additional feedback to an end user.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
material: The material of the visual element.
geometry: The shape of the visual or collision object.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
name: Unique name for the visual element within the scope of the
parent link.
"""
class Meta:
name = "visual"
cast_shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
transparency: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
meta: Optional["Visual.MetaType"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Visual.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Visual.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Visual.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class MetaType:
"""Optional meta information for the visual.
The information contained within this element should be used to
provide additional feedback to an end user.
Parameters
----------
layer: The layer in which this visual is displayed. The layer
number is useful for programs, such as Gazebo, that put
visuals in different layers for enhanced visualization.
"""
layer: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Visual.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/visual.py | visual.py | from dataclasses import dataclass, field
from typing import List, Optional
from .geometry import Geometry
from .material import Material
__NAMESPACE__ = "sdformat/v1.6/visual.xsd"
@dataclass
class Visual:
"""The visual properties of the link.
This element specifies the shape of the object (box, cylinder, etc.)
for visualization purposes.
Parameters
----------
cast_shadows: If true the visual will cast shadows.
laser_retro: will be implemented in the future release.
transparency: The amount of transparency( 0=opaque, 1 = fully
transparent)
meta: Optional meta information for the visual. The information
contained within this element should be used to provide
additional feedback to an end user.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
material: The material of the visual element.
geometry: The shape of the visual or collision object.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
name: Unique name for the visual element within the scope of the
parent link.
"""
class Meta:
name = "visual"
cast_shadows: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
transparency: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
meta: Optional["Visual.MetaType"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Visual.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Visual.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Visual.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class MetaType:
"""Optional meta information for the visual.
The information contained within this element should be used to
provide additional feedback to an end user.
Parameters
----------
layer: The layer in which this visual is displayed. The layer
number is useful for programs, such as Gazebo, that put
visuals in different layers for enhanced visualization.
"""
layer: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Visual.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
) | 0.943543 | 0.524456 |
from dataclasses import dataclass, field
from typing import List, Optional
from .collision import Collision
from .light import Light
from .material import Material
from .sensor import Sensor
from .visual import Visual
__NAMESPACE__ = "sdformat/v1.6/link.xsd"
@dataclass
class Link:
"""A physical link with inertia, collision, and visual properties.
A link must be a child of a model, and any number of links may exist
in a model.
Parameters
----------
gravity: If true, the link is affected by gravity.
enable_wind: If true, the link is affected by the wind.
self_collide: If true, the link can collide with other links in the
model. Two links within a model will collide if
link1.self_collide OR link2.self_collide. Links connected by a
joint will never collide.
kinematic: If true, the link is kinematic only
must_be_base_link: If true, the link will have 6DOF and be a direct
child of world.
velocity_decay: Exponential damping of the link's velocity.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
inertial: The inertial properties of the link.
collision: The collision properties of a link. Note that this can be
different from the visual properties of a link, for example,
simpler collision models are often used to reduce computation
time.
visual: The visual properties of the link. This element specifies
the shape of the object (box, cylinder, etc.) for visualization
purposes.
sensor: The sensor tag describes the type and properties of a
sensor.
projector:
audio_sink: An audio sink.
audio_source: An audio source.
battery: Description of a battery.
light: The light element describes a light source.
particle_emitter: A particle emitter that can be used to describe
fog, smoke, and dust.
name: A unique name for the link within the scope of the model.
"""
class Meta:
name = "link"
gravity: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
enable_wind: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
self_collide: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kinematic: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
must_be_base_link: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity_decay: Optional["Link.VelocityDecay"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Link.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertial: Optional["Link.Inertial"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
collision: List[Collision] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
visual: List[Visual] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
projector: Optional["Link.Projector"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
audio_sink: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
audio_source: List["Link.AudioSource"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
battery: List["Link.Battery"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
particle_emitter: List["Link.ParticleEmitter"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class VelocityDecay:
"""
Exponential damping of the link's velocity.
Parameters
----------
linear: Linear damping
angular: Angular damping
"""
linear: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
angular: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Link.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Inertial:
"""
The inertial properties of the link.
Parameters
----------
mass: The mass of the link.
inertia: The 3x3 rotational inertia matrix. Because the
rotational inertia matrix is symmetric, only 6 above-
diagonal elements of this matrix are specified here, using
the attributes ixx, ixy, ixz, iyy, iyz, izz.
frame: A frame of reference to which a pose is relative.
pose: This is the pose of the inertial reference frame, relative
to the specified reference frame. The origin of the inertial
reference frame needs to be at the center of gravity. The
axes of the inertial reference frame do not need to be
aligned with the principal axes of the inertia.
"""
mass: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertia: Optional["Link.Inertial.Inertia"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Link.Inertial.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Inertial.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Inertia:
"""The 3x3 rotational inertia matrix.
Because the rotational inertia matrix is symmetric, only 6
above-diagonal elements of this matrix are specified here,
using the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
ixx: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixy: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyy: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
izz: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.Inertial.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Projector:
"""
Parameters
----------
texture: Texture name
fov: Field of view
near_clip: Near clip distance
far_clip: far clip distance
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
name: Name of the projector
"""
texture: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fov: float = field(
default=0.785,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
near_clip: float = field(
default=0.1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
far_clip: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Link.Projector.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Projector.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Link.Projector.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.Projector.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class AudioSource:
"""
An audio source.
Parameters
----------
uri: URI of the audio media.
pitch: Pitch for the audio media, in Hz
gain: Gain for the audio media, in dB.
contact: List of collision objects that will trigger audio
playback.
loop: True to make the audio source loop playback.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gain: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact: Optional["Link.AudioSource.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
loop: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Link.AudioSource.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.AudioSource.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
List of collision objects that will trigger audio playback.
Parameters
----------
collision: Name of child collision element that will trigger
audio playback.
"""
collision: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.AudioSource.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Battery:
"""
Description of a battery.
Parameters
----------
voltage: Initial voltage in volts.
name: Unique name for the battery.
"""
voltage: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class ParticleEmitter:
"""
A particle emitter that can be used to describe fog, smoke, and dust.
Parameters
----------
emitting: True indicates that the particle emitter should
generate particles when loaded
duration: The number of seconds the emitter is active. A value
less than or equal to zero means infinite duration.
size: The size of the emitter where the particles are sampled.
Default value is (1, 1, 1). Note that the interpretation
of the emitter area varies depending on the emmiter
type: - point: The area is ignored. - box: The
area is interpreted as width X height X depth. -
cylinder: The area is interpreted as the bounding box of the
cylinder. The cylinder is oriented along the Z-axis. -
ellipsoid: The area is interpreted as the bounding box of an
ellipsoid shaped area, i.e. a sphere or
squashed-sphere area. The parameters are again
identical to EM_BOX, except that the dimensions
describe the widest points along each of the axes.
particle_size: The particle dimensions (width, height, depth).
lifetime: The number of seconds each particle will ’live’ for
before being destroyed. This value must be greater than
zero.
rate: The number of particles per second that should be emitted.
min_velocity: Sets a minimum velocity for each particle (m/s).
max_velocity: Sets a maximum velocity for each particle (m/s).
scale_rate: Sets the amount by which to scale the particles in
both x and y direction per second.
color_start: Sets the starting color for all particles emitted.
The actual color will be interpolated between this color
and the one set under color_end. Color::White is the
default color for the particles unless a specific
function is used. To specify a color, RGB values should
be passed in. For example, to specify red, a user
should enter: &lt;color_start&gt;1 0
0&lt;/color_start&gt; Note that this function
overrides the particle colors set with
color_range_image.
color_end: Sets the end color for all particles emitted. The
actual color will be interpolated between this color and
the one set under color_start. Color::White is the
default color for the particles unless a specific
function is used (see color_start for more information
about defining custom colors with RGB values). Note
that this function overrides the particle colors set
with color_range_image.
color_range_image: Sets the path to the color image used as an
affector. This affector modifies the color of particles in
flight. The colors are taken from a specified image file.
The range of color values begins from the left side of the
image and moves to the right over the lifetime of the
particle, therefore only the horizontal dimension of the
image is used. Note that this function overrides the
particle colors set with color_start and color_end.
topic: Topic used to update particle emitter properties at
runtime. The default topic is
/model/{model_name}/particle_emitter/{emitter_name}
Note that the emitter id and name may not be changed.
particle_scatter_ratio: This is used to determine the ratio of
particles that will be detected by sensors. Increasing
the ratio means there is a higher chance of particles
reflecting and interfering with depth sensing, making the
emitter appear more dense. Decreasing the ratio decreases
the chance of particles reflecting and interfering with
depth sensing, making it appear less dense.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
material: The material of the visual element.
name: A unique name for the particle emitter.
type: The type of a particle emitter. One of "box", "cylinder",
"ellipsoid", or "point".
"""
emitting: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
duration: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
particle_size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
lifetime: float = field(
default=5.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
rate: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_velocity: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_velocity: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale_rate: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
color_start: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
color_end: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
color_range_image: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
topic: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
particle_scatter_ratio: float = field(
default=0.65,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: Optional["Link.ParticleEmitter.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/link.py | link.py | from dataclasses import dataclass, field
from typing import List, Optional
from .collision import Collision
from .light import Light
from .material import Material
from .sensor import Sensor
from .visual import Visual
__NAMESPACE__ = "sdformat/v1.6/link.xsd"
@dataclass
class Link:
"""A physical link with inertia, collision, and visual properties.
A link must be a child of a model, and any number of links may exist
in a model.
Parameters
----------
gravity: If true, the link is affected by gravity.
enable_wind: If true, the link is affected by the wind.
self_collide: If true, the link can collide with other links in the
model. Two links within a model will collide if
link1.self_collide OR link2.self_collide. Links connected by a
joint will never collide.
kinematic: If true, the link is kinematic only
must_be_base_link: If true, the link will have 6DOF and be a direct
child of world.
velocity_decay: Exponential damping of the link's velocity.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
inertial: The inertial properties of the link.
collision: The collision properties of a link. Note that this can be
different from the visual properties of a link, for example,
simpler collision models are often used to reduce computation
time.
visual: The visual properties of the link. This element specifies
the shape of the object (box, cylinder, etc.) for visualization
purposes.
sensor: The sensor tag describes the type and properties of a
sensor.
projector:
audio_sink: An audio sink.
audio_source: An audio source.
battery: Description of a battery.
light: The light element describes a light source.
particle_emitter: A particle emitter that can be used to describe
fog, smoke, and dust.
name: A unique name for the link within the scope of the model.
"""
class Meta:
name = "link"
gravity: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
enable_wind: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
self_collide: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kinematic: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
must_be_base_link: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity_decay: Optional["Link.VelocityDecay"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Link.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertial: Optional["Link.Inertial"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
collision: List[Collision] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
visual: List[Visual] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
projector: Optional["Link.Projector"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
audio_sink: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
audio_source: List["Link.AudioSource"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
battery: List["Link.Battery"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
particle_emitter: List["Link.ParticleEmitter"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class VelocityDecay:
"""
Exponential damping of the link's velocity.
Parameters
----------
linear: Linear damping
angular: Angular damping
"""
linear: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
angular: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Link.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Inertial:
"""
The inertial properties of the link.
Parameters
----------
mass: The mass of the link.
inertia: The 3x3 rotational inertia matrix. Because the
rotational inertia matrix is symmetric, only 6 above-
diagonal elements of this matrix are specified here, using
the attributes ixx, ixy, ixz, iyy, iyz, izz.
frame: A frame of reference to which a pose is relative.
pose: This is the pose of the inertial reference frame, relative
to the specified reference frame. The origin of the inertial
reference frame needs to be at the center of gravity. The
axes of the inertial reference frame do not need to be
aligned with the principal axes of the inertia.
"""
mass: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
inertia: Optional["Link.Inertial.Inertia"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Link.Inertial.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Inertial.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Inertia:
"""The 3x3 rotational inertia matrix.
Because the rotational inertia matrix is symmetric, only 6
above-diagonal elements of this matrix are specified here,
using the attributes ixx, ixy, ixz, iyy, iyz, izz.
"""
ixx: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixy: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
ixz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyy: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iyz: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
izz: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.Inertial.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Projector:
"""
Parameters
----------
texture: Texture name
fov: Field of view
near_clip: Near clip distance
far_clip: far clip distance
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
name: Name of the projector
"""
texture: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fov: float = field(
default=0.785,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
near_clip: float = field(
default=0.1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
far_clip: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Link.Projector.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.Projector.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["Link.Projector.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.Projector.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class AudioSource:
"""
An audio source.
Parameters
----------
uri: URI of the audio media.
pitch: Pitch for the audio media, in Hz
gain: Gain for the audio media, in dB.
contact: List of collision objects that will trigger audio
playback.
loop: True to make the audio source loop playback.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gain: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact: Optional["Link.AudioSource.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
loop: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
frame: List["Link.AudioSource.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Link.AudioSource.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
List of collision objects that will trigger audio playback.
Parameters
----------
collision: Name of child collision element that will trigger
audio playback.
"""
collision: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Link.AudioSource.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Battery:
"""
Description of a battery.
Parameters
----------
voltage: Initial voltage in volts.
name: Unique name for the battery.
"""
voltage: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class ParticleEmitter:
"""
A particle emitter that can be used to describe fog, smoke, and dust.
Parameters
----------
emitting: True indicates that the particle emitter should
generate particles when loaded
duration: The number of seconds the emitter is active. A value
less than or equal to zero means infinite duration.
size: The size of the emitter where the particles are sampled.
Default value is (1, 1, 1). Note that the interpretation
of the emitter area varies depending on the emmiter
type: - point: The area is ignored. - box: The
area is interpreted as width X height X depth. -
cylinder: The area is interpreted as the bounding box of the
cylinder. The cylinder is oriented along the Z-axis. -
ellipsoid: The area is interpreted as the bounding box of an
ellipsoid shaped area, i.e. a sphere or
squashed-sphere area. The parameters are again
identical to EM_BOX, except that the dimensions
describe the widest points along each of the axes.
particle_size: The particle dimensions (width, height, depth).
lifetime: The number of seconds each particle will ’live’ for
before being destroyed. This value must be greater than
zero.
rate: The number of particles per second that should be emitted.
min_velocity: Sets a minimum velocity for each particle (m/s).
max_velocity: Sets a maximum velocity for each particle (m/s).
scale_rate: Sets the amount by which to scale the particles in
both x and y direction per second.
color_start: Sets the starting color for all particles emitted.
The actual color will be interpolated between this color
and the one set under color_end. Color::White is the
default color for the particles unless a specific
function is used. To specify a color, RGB values should
be passed in. For example, to specify red, a user
should enter: &lt;color_start&gt;1 0
0&lt;/color_start&gt; Note that this function
overrides the particle colors set with
color_range_image.
color_end: Sets the end color for all particles emitted. The
actual color will be interpolated between this color and
the one set under color_start. Color::White is the
default color for the particles unless a specific
function is used (see color_start for more information
about defining custom colors with RGB values). Note
that this function overrides the particle colors set
with color_range_image.
color_range_image: Sets the path to the color image used as an
affector. This affector modifies the color of particles in
flight. The colors are taken from a specified image file.
The range of color values begins from the left side of the
image and moves to the right over the lifetime of the
particle, therefore only the horizontal dimension of the
image is used. Note that this function overrides the
particle colors set with color_start and color_end.
topic: Topic used to update particle emitter properties at
runtime. The default topic is
/model/{model_name}/particle_emitter/{emitter_name}
Note that the emitter id and name may not be changed.
particle_scatter_ratio: This is used to determine the ratio of
particles that will be detected by sensors. Increasing
the ratio means there is a higher chance of particles
reflecting and interfering with depth sensing, making the
emitter appear more dense. Decreasing the ratio decreases
the chance of particles reflecting and interfering with
depth sensing, making it appear less dense.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
material: The material of the visual element.
name: A unique name for the particle emitter.
type: The type of a particle emitter. One of "box", "cylinder",
"ellipsoid", or "point".
"""
emitting: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
duration: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
particle_size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
lifetime: float = field(
default=5.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
rate: float = field(
default=10.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_velocity: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_velocity: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
scale_rate: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
color_start: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
color_end: str = field(
default="1 1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){3}\+?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s*",
},
)
color_range_image: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
topic: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
particle_scatter_ratio: float = field(
default=0.65,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: Optional["Link.ParticleEmitter.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | 0.955878 | 0.552057 |
from dataclasses import dataclass, field
from typing import List, Optional
from .sensor import Sensor
__NAMESPACE__ = "sdformat/v1.6/joint.xsd"
@dataclass
class Joint:
"""A joint connects two links with kinematic and dynamic properties.
By default, the pose of a joint is expressed in the child link
frame.
Parameters
----------
parent: Name of the parent link
child: Name of the child link
gearbox_ratio: Parameter for gearbox joints. Given theta_1 and
theta_2 defined in description for gearbox_reference_body,
theta_2 = -gearbox_ratio * theta_1.
gearbox_reference_body: Parameter for gearbox joints. Gearbox ratio
is enforced over two joint angles. First joint angle (theta_1)
is the angle from the gearbox_reference_body to the parent link
in the direction of the axis element and the second joint angle
(theta_2) is the angle from the gearbox_reference_body to the
child link in the direction of the axis2 element.
thread_pitch: Parameter for screw joints.
axis: Parameters related to the axis of rotation for revolute
joints, the axis of translation for prismatic joints.
axis2: Parameters related to the second axis of rotation for
revolute2 joints and universal joints.
physics: Parameters that are specific to a certain physics engine.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
sensor: The sensor tag describes the type and properties of a
sensor.
name: A unique name for the joint within the scope of the model.
type: The type of joint, which must be one of the following:
(continuous) a hinge joint that rotates on a single axis with a
continuous range of motion, (revolute) a hinge joint that
rotates on a single axis with a fixed range of motion,
(gearbox) geared revolute joints, (revolute2) same as two
revolute joints connected in series, (prismatic) a sliding
joint that slides along an axis with a limited range specified
by upper and lower limits, (ball) a ball and socket joint,
(screw) a single degree of freedom joint with coupled sliding
and rotational motion, (universal) like a ball joint, but
constrains one degree of freedom, (fixed) a joint with
zero degrees of freedom that rigidly connects two links.
"""
class Meta:
name = "joint"
parent: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
child: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gearbox_ratio: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gearbox_reference_body: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
thread_pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis: Optional["Joint.Axis"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
axis2: Optional["Joint.Axis2"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional["Joint.Physics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Joint.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Joint.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Axis:
"""
Parameters related to the axis of rotation for revolute joints,
the axis of translation for prismatic joints.
Parameters
----------
initial_position: Default joint position for this joint axis.
xyz: Represents the x,y,z components of the axis unit vector.
The axis is expressed in the joint frame unless the
use_parent_model_frame flag is set to true. The
vector should be normalized.
use_parent_model_frame: Flag to interpret the axis xyz element
in the parent model frame instead of joint frame.
Provided for Gazebo compatibility (see
https://github.com/osrf/gazebo/issue/494 ).
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit: specifies the limits of this joint
"""
initial_position: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
use_parent_model_frame: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamics: Optional["Joint.Axis.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
spring_reference: The spring reference position for this
joint axis.
spring_stiffness: The spring stiffness for this joint axis.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_reference: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_stiffness: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
specifies the limits of this joint.
Parameters
----------
lower: Specifies the lower joint limit (radians for revolute
joints, meters for prismatic joints). Omit if joint is
continuous.
upper: Specifies the upper joint limit (radians for revolute
joints, meters for prismatic joints). Omit if joint is
continuous.
effort: A value for enforcing the maximum joint effort
applied. Limit is not enforced if value is negative.
velocity: A value for enforcing the maximum joint velocity.
stiffness: Joint stop stiffness.
dissipation: Joint stop dissipation.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Axis2:
"""
Parameters related to the second axis of rotation for revolute2 joints
and universal joints.
Parameters
----------
initial_position: Default joint position for this joint axis.
xyz: Represents the x,y,z components of the axis unit vector.
The axis is expressed in the joint frame unless the
use_parent_model_frame flag is set to true. The
vector should be normalized.
use_parent_model_frame: Flag to interpret the axis xyz element
in the parent model frame instead of joint frame.
Provided for Gazebo compatibility (see
https://github.com/osrf/gazebo/issue/494 ).
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit:
"""
initial_position: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
use_parent_model_frame: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamics: Optional["Joint.Axis2.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis2.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint. EXPERIMENTAL: if damping
coefficient is negative and implicit_spring_damper is
true, adaptive damping is used.
friction: The physical static friction value of the joint.
spring_reference: The spring reference position for this
joint axis.
spring_stiffness: The spring stiffness for this joint axis.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_reference: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_stiffness: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: An attribute for enforcing the maximum joint effort
applied by Joint::SetForce. Limit is not enforced if
value is negative.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
stiffness: Joint stop stiffness. Supported physics engines:
SimBody.
dissipation: Joint stop dissipation. Supported physics
engines: SimBody.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Physics:
"""
Parameters that are specific to a certain physics engine.
Parameters
----------
simbody: Simbody specific parameters
ode: ODE specific parameters
provide_feedback: If provide feedback is set to true, physics
engine will compute the constraint forces at this joint.
For now, provide_feedback under ode block will override this
tag and given user warning about the migration.
provide_feedback under ode is scheduled to be removed in
SDFormat 1.5.
"""
simbody: Optional["Joint.Physics.Simbody"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Joint.Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
provide_feedback: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Simbody:
"""
Simbody specific parameters.
Parameters
----------
must_be_loop_joint: Force cut in the multibody graph at this
joint.
"""
must_be_loop_joint: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific parameters.
Parameters
----------
provide_feedback: (DEPRECATION WARNING: In SDFormat 1.5
this tag will be replaced by the same tag directly under
the physics-block. For now, this tag overrides the one
outside of ode-block, but in SDFormat 1.5 this tag will
be removed completely.) If provide feedback is set to
true, ODE will compute the constraint forces at this
joint.
cfm_damping: If cfm damping is set to true, ODE will use CFM
to simulate damping, allows for infinite damping, and
one additional constraint row (previously used for joint
limit) is always active.
implicit_spring_damper: If implicit_spring_damper is set to
true, ODE will use CFM, ERP to simulate stiffness and
damping, allows for infinite damping, and one additional
constraint row (previously used for joint limit) is
always active. This replaces cfm_damping parameter in
SDFormat 1.4.
fudge_factor: Scale the excess for in a joint motor at joint
limits. Should be between zero and one.
cfm: Constraint force mixing for constrained directions
erp: Error reduction parameter for constrained directions
bounce: Bounciness of the limits
max_force: Maximum force or torque used to reach the desired
velocity.
velocity: The desired velocity of the joint. Should only be
set if you want the joint to move on load.
limit:
suspension:
"""
provide_feedback: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm_damping: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
implicit_spring_damper: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fudge_factor: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bounce: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_force: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
limit: Optional["Joint.Physics.Ode.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
suspension: Optional["Joint.Physics.Ode.Suspension"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Limit:
"""
Parameters
----------
cfm: Constraint force mixing parameter used by the joint
stop
erp: Error reduction parameter used by the joint stop
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Suspension:
"""
Parameters
----------
cfm: Suspension constraint force mixing parameter
erp: Suspension error reduction parameter
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Joint.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/joint.py | joint.py | from dataclasses import dataclass, field
from typing import List, Optional
from .sensor import Sensor
__NAMESPACE__ = "sdformat/v1.6/joint.xsd"
@dataclass
class Joint:
"""A joint connects two links with kinematic and dynamic properties.
By default, the pose of a joint is expressed in the child link
frame.
Parameters
----------
parent: Name of the parent link
child: Name of the child link
gearbox_ratio: Parameter for gearbox joints. Given theta_1 and
theta_2 defined in description for gearbox_reference_body,
theta_2 = -gearbox_ratio * theta_1.
gearbox_reference_body: Parameter for gearbox joints. Gearbox ratio
is enforced over two joint angles. First joint angle (theta_1)
is the angle from the gearbox_reference_body to the parent link
in the direction of the axis element and the second joint angle
(theta_2) is the angle from the gearbox_reference_body to the
child link in the direction of the axis2 element.
thread_pitch: Parameter for screw joints.
axis: Parameters related to the axis of rotation for revolute
joints, the axis of translation for prismatic joints.
axis2: Parameters related to the second axis of rotation for
revolute2 joints and universal joints.
physics: Parameters that are specific to a certain physics engine.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
sensor: The sensor tag describes the type and properties of a
sensor.
name: A unique name for the joint within the scope of the model.
type: The type of joint, which must be one of the following:
(continuous) a hinge joint that rotates on a single axis with a
continuous range of motion, (revolute) a hinge joint that
rotates on a single axis with a fixed range of motion,
(gearbox) geared revolute joints, (revolute2) same as two
revolute joints connected in series, (prismatic) a sliding
joint that slides along an axis with a limited range specified
by upper and lower limits, (ball) a ball and socket joint,
(screw) a single degree of freedom joint with coupled sliding
and rotational motion, (universal) like a ball joint, but
constrains one degree of freedom, (fixed) a joint with
zero degrees of freedom that rigidly connects two links.
"""
class Meta:
name = "joint"
parent: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
child: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gearbox_ratio: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gearbox_reference_body: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
thread_pitch: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
axis: Optional["Joint.Axis"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
axis2: Optional["Joint.Axis2"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: Optional["Joint.Physics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Joint.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Joint.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sensor: List[Sensor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Axis:
"""
Parameters related to the axis of rotation for revolute joints,
the axis of translation for prismatic joints.
Parameters
----------
initial_position: Default joint position for this joint axis.
xyz: Represents the x,y,z components of the axis unit vector.
The axis is expressed in the joint frame unless the
use_parent_model_frame flag is set to true. The
vector should be normalized.
use_parent_model_frame: Flag to interpret the axis xyz element
in the parent model frame instead of joint frame.
Provided for Gazebo compatibility (see
https://github.com/osrf/gazebo/issue/494 ).
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit: specifies the limits of this joint
"""
initial_position: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
use_parent_model_frame: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamics: Optional["Joint.Axis.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint.
friction: The physical static friction value of the joint.
spring_reference: The spring reference position for this
joint axis.
spring_stiffness: The spring stiffness for this joint axis.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_reference: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_stiffness: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
specifies the limits of this joint.
Parameters
----------
lower: Specifies the lower joint limit (radians for revolute
joints, meters for prismatic joints). Omit if joint is
continuous.
upper: Specifies the upper joint limit (radians for revolute
joints, meters for prismatic joints). Omit if joint is
continuous.
effort: A value for enforcing the maximum joint effort
applied. Limit is not enforced if value is negative.
velocity: A value for enforcing the maximum joint velocity.
stiffness: Joint stop stiffness.
dissipation: Joint stop dissipation.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Axis2:
"""
Parameters related to the second axis of rotation for revolute2 joints
and universal joints.
Parameters
----------
initial_position: Default joint position for this joint axis.
xyz: Represents the x,y,z components of the axis unit vector.
The axis is expressed in the joint frame unless the
use_parent_model_frame flag is set to true. The
vector should be normalized.
use_parent_model_frame: Flag to interpret the axis xyz element
in the parent model frame instead of joint frame.
Provided for Gazebo compatibility (see
https://github.com/osrf/gazebo/issue/494 ).
dynamics: An element specifying physical properties of the
joint. These values are used to specify modeling properties
of the joint, particularly useful for simulation.
limit:
"""
initial_position: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="0 0 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
use_parent_model_frame: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamics: Optional["Joint.Axis2.Dynamics"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
limit: Optional["Joint.Axis2.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Dynamics:
"""An element specifying physical properties of the joint.
These values are used to specify modeling properties of the
joint, particularly useful for simulation.
Parameters
----------
damping: The physical velocity dependent viscous damping
coefficient of the joint. EXPERIMENTAL: if damping
coefficient is negative and implicit_spring_damper is
true, adaptive damping is used.
friction: The physical static friction value of the joint.
spring_reference: The spring reference position for this
joint axis.
spring_stiffness: The spring stiffness for this joint axis.
"""
damping: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_reference: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
spring_stiffness: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Limit:
"""
Parameters
----------
lower: An attribute specifying the lower joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
upper: An attribute specifying the upper joint limit
(radians for revolute joints, meters for prismatic
joints). Omit if joint is continuous.
effort: An attribute for enforcing the maximum joint effort
applied by Joint::SetForce. Limit is not enforced if
value is negative.
velocity: (not implemented) An attribute for enforcing the
maximum joint velocity.
stiffness: Joint stop stiffness. Supported physics engines:
SimBody.
dissipation: Joint stop dissipation. Supported physics
engines: SimBody.
"""
lower: float = field(
default=-1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
upper: float = field(
default=1e16,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
effort: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Physics:
"""
Parameters that are specific to a certain physics engine.
Parameters
----------
simbody: Simbody specific parameters
ode: ODE specific parameters
provide_feedback: If provide feedback is set to true, physics
engine will compute the constraint forces at this joint.
For now, provide_feedback under ode block will override this
tag and given user warning about the migration.
provide_feedback under ode is scheduled to be removed in
SDFormat 1.5.
"""
simbody: Optional["Joint.Physics.Simbody"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Joint.Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
provide_feedback: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Simbody:
"""
Simbody specific parameters.
Parameters
----------
must_be_loop_joint: Force cut in the multibody graph at this
joint.
"""
must_be_loop_joint: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific parameters.
Parameters
----------
provide_feedback: (DEPRECATION WARNING: In SDFormat 1.5
this tag will be replaced by the same tag directly under
the physics-block. For now, this tag overrides the one
outside of ode-block, but in SDFormat 1.5 this tag will
be removed completely.) If provide feedback is set to
true, ODE will compute the constraint forces at this
joint.
cfm_damping: If cfm damping is set to true, ODE will use CFM
to simulate damping, allows for infinite damping, and
one additional constraint row (previously used for joint
limit) is always active.
implicit_spring_damper: If implicit_spring_damper is set to
true, ODE will use CFM, ERP to simulate stiffness and
damping, allows for infinite damping, and one additional
constraint row (previously used for joint limit) is
always active. This replaces cfm_damping parameter in
SDFormat 1.4.
fudge_factor: Scale the excess for in a joint motor at joint
limits. Should be between zero and one.
cfm: Constraint force mixing for constrained directions
erp: Error reduction parameter for constrained directions
bounce: Bounciness of the limits
max_force: Maximum force or torque used to reach the desired
velocity.
velocity: The desired velocity of the joint. Should only be
set if you want the joint to move on load.
limit:
suspension:
"""
provide_feedback: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm_damping: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
implicit_spring_damper: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fudge_factor: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
bounce: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_force: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
velocity: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
limit: Optional["Joint.Physics.Ode.Limit"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
suspension: Optional["Joint.Physics.Ode.Suspension"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Limit:
"""
Parameters
----------
cfm: Constraint force mixing parameter used by the joint
stop
erp: Error reduction parameter used by the joint stop
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Suspension:
"""
Parameters
----------
cfm: Suspension constraint force mixing parameter
erp: Suspension error reduction parameter
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Joint.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | 0.956665 | 0.653766 |
from dataclasses import dataclass, field
from typing import List, Optional
from .actor import Actor
from .joint import Joint
from .light import Light
from .material import Material
from .model import Model
from .physics import Physics
from .scene import Scene
from .state import State
__NAMESPACE__ = "sdformat/v1.6/world.xsd"
@dataclass
class World:
"""
The world element encapsulates an entire world description including:
models, scene, physics, joints, and plugins.
Parameters
----------
audio: Global audio properties.
wind: The wind tag specifies the type and properties of the wind.
include: Include resources from a URI
gravity: The gravity vector in m/s^2, expressed in a coordinate
frame defined by the spherical_coordinates tag.
magnetic_field: The magnetic vector in Tesla, expressed in a
coordinate frame defined by the spherical_coordinates tag.
atmosphere: The atmosphere tag specifies the type and properties of
the atmosphere model.
gui:
physics: The physics tag specifies the type and properties of the
dynamics engine.
scene: Specifies the look of the environment.
light: The light element describes a light source.
model: The model element defines a complete robot or any other
physical object.
actor: A special kind of model which can have a scripted motion.
This includes both global waypoint type animations and skeleton
animations.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
joint: A joint connects two links with kinematic and dynamic
properties. By default, the pose of a joint is expressed in the
child link frame.
road:
spherical_coordinates:
state:
population: The population element defines how and where a set of
models will be automatically populated in Gazebo.
name: Unique name of the world
"""
class Meta:
name = "world"
audio: Optional["World.Audio"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
wind: Optional["World.Wind"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
include: List["World.Include"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
gravity: str = field(
default="0 0 -9.8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
magnetic_field: str = field(
default="5.5645e-6 22.8758e-6 -42.3884e-6",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
atmosphere: Optional["World.Atmosphere"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gui: Optional["World.Gui"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: List[Physics] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
scene: Optional[Scene] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
actor: List[Actor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
road: List["World.Road"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
spherical_coordinates: Optional["World.SphericalCoordinates"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
state: List[State] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
population: List["World.Population"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Audio:
"""
Global audio properties.
Parameters
----------
device: Device to use for audio playback. A value of "default"
will use the system's default audio device. Otherwise,
specify a an audio device file"
"""
device: str = field(
default="default",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Wind:
"""
The wind tag specifies the type and properties of the wind.
Parameters
----------
linear_velocity: Linear velocity of the wind.
"""
linear_velocity: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Include:
"""
Include resources from a URI.
Parameters
----------
uri: URI to a resource, such as a model
name: Override the name of the included model.
static: Override the static value of the included model.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: Optional["World.Include.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["World.Include.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Atmosphere:
"""
The atmosphere tag specifies the type and properties of the atmosphere
model.
Parameters
----------
temperature: Temperature at sea level in kelvins.
pressure: Pressure at sea level in pascals.
temperature_gradient: Temperature gradient with respect to
increasing altitude at sea level in units of K/m.
type: The type of the atmosphere engine. Current options are
adiabatic. Defaults to adiabatic if left unspecified.
"""
temperature: float = field(
default=288.15,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pressure: float = field(
default=101325.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
temperature_gradient: float = field(
default=-0.0065,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gui:
"""
Parameters
----------
camera:
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
fullscreen:
"""
camera: Optional["World.Gui.Camera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Gui.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
fullscreen: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Camera:
"""
Parameters
----------
view_controller:
projection_type: Set the type of projection for the camera.
Valid values are "perspective" and "orthographic".
track_visual:
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name:
"""
view_controller: str = field(
default="orbit",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
projection_type: str = field(
default="perspective",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
track_visual: Optional["World.Gui.Camera.TrackVisual"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["World.Gui.Camera.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["World.Gui.Camera.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class TrackVisual:
"""
Parameters
----------
name: Name of the tracked visual. If no name is
provided, the remaining settings will be applied
whenever tracking is triggered in the GUI.
min_dist: Minimum distance between the camera and the
tracked visual. This parameter is only used if
static is set to false.
max_dist: Maximum distance between the camera and the
tracked visual. This parameter is only used if
static is set to false.
static: If set to true, the position of the camera is
fixed relatively to the model or to the world,
depending on the value of the use_model_frame
element. Otherwise, the position of the camera may
vary but the distance between the camera and the
model will depend on the value of the min_dist and
max_dist elements. In any case, the camera will
always follow the model by changing its orientation.
use_model_frame: If set to true, the position of the
camera is relative to the model reference frame,
which means that its position relative to the model
will not change. Otherwise, the position of the
camera is relative to the world reference frame,
which means that its position relative to the world
will not change. This parameter is only used if
static is set to true.
xyz: The position of the camera's reference frame. This
parameter is only used if static is set to true. If
use_model_frame is set to true, the position is
relative to the model reference frame, otherwise it
represents world coordinates.
inherit_yaw: If set to true, the camera will inherit the
yaw rotation of the tracked model. This parameter is
only used if static and use_model_frame are set to
true.
"""
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_model_frame: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="-5.0 0.0 3.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
inherit_yaw: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match
another frame defined inside the parent that this
frame is attached to.
"""
pose: Optional["World.Gui.Camera.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined
relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Road:
"""
Parameters
----------
width: Width of the road
point: A series of points that define the path of the road.
material: The material of the visual element.
name: Name of the road
"""
width: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
point: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class SphericalCoordinates:
"""
Parameters
----------
surface_model: Name of planetary surface model, used to
determine the surface altitude at a given latitude and
longitude. The default is an ellipsoid model of the
earth based on the WGS-84 standard. It is used in Gazebo's
GPS sensor implementation.
world_frame_orientation: This field identifies how Gazebo world
frame is aligned in Geographical sense. The final
Gazebo world frame orientation is obtained by rotating
a frame aligned with following notation by the field
heading_deg (Note that heading_deg corresponds to
positive yaw rotation in the NED frame, so it's
inverse specifies positive Z-rotation in ENU or NWU).
Options are: - ENU (East-North-Up) - NED
(North-East-Down) - NWU (North-West-Up) For
example, world frame specified by setting
world_orientation="ENU" and heading_deg=-90° is
effectively equivalent to NWU with heading of 0°.
latitude_deg: Geodetic latitude at origin of gazebo reference
frame, specified in units of degrees.
longitude_deg: Longitude at origin of gazebo reference frame,
specified in units of degrees.
elevation: Elevation of origin of gazebo reference frame,
specified in meters.
heading_deg: Heading offset of gazebo reference frame, measured
as angle between Gazebo world frame and the
world_frame_orientation type (ENU/NED/NWU). Rotations
about the downward-vector (e.g. North to East) are positive.
The direction of rotation is chosen to be consistent with
compass heading convention (e.g. 0 degrees points
North and 90 degrees points East, positive rotation
indicates counterclockwise rotation when viewed from
top-down direction). The angle is specified in
degrees.
"""
surface_model: str = field(
default="EARTH_WGS84",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
world_frame_orientation: str = field(
default="ENU",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
latitude_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
longitude_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
elevation: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
heading_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Population:
"""
The population element defines how and where a set of models will
be automatically populated in Gazebo.
Parameters
----------
model_count: The number of models to place.
distribution: Specifies the type of object distribution and its
optional parameters.
box: Box shape
cylinder: Cylinder shape
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
model: The model element defines a complete robot or any other
physical object.
name: A unique name for the population. This name must not match
another population in the world.
"""
model_count: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
distribution: Optional["World.Population.Distribution"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
box: Optional["World.Population.Box"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
cylinder: Optional["World.Population.Cylinder"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["World.Population.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["World.Population.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
model: Optional[Model] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Distribution:
"""
Specifies the type of object distribution and its optional
parameters.
Parameters
----------
type: Define how the objects will be placed in the specified
region. - random: Models placed at random.
- uniform: Models approximately placed in a 2D grid
pattern with control over the number of
objects. - grid: Models evenly placed in a 2D
grid pattern. The number of objects is not
explicitly specified, it is based on the number of rows
and columns of the grid. - linear-x:
Models evently placed in a row along the global x-axis.
- linear-y: Models evently placed in a row along the
global y-axis. - linear-z: Models evently placed
in a row along the global z-axis.
rows: Number of rows in the grid.
cols: Number of columns in the grid.
step: Distance between elements of the grid.
"""
type: str = field(
default="random",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
rows: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cols: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
step: str = field(
default="0.5 0.5 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Box:
"""
Box shape.
Parameters
----------
size: The three side lengths of the box. The origin of the
box is in its geometric center (inside the center of the
box).
"""
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Cylinder:
"""
Cylinder shape.
Parameters
----------
radius: Radius of the cylinder
length: Length of the cylinder along the z axis
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
length: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["World.Population.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/world.py | world.py | from dataclasses import dataclass, field
from typing import List, Optional
from .actor import Actor
from .joint import Joint
from .light import Light
from .material import Material
from .model import Model
from .physics import Physics
from .scene import Scene
from .state import State
__NAMESPACE__ = "sdformat/v1.6/world.xsd"
@dataclass
class World:
"""
The world element encapsulates an entire world description including:
models, scene, physics, joints, and plugins.
Parameters
----------
audio: Global audio properties.
wind: The wind tag specifies the type and properties of the wind.
include: Include resources from a URI
gravity: The gravity vector in m/s^2, expressed in a coordinate
frame defined by the spherical_coordinates tag.
magnetic_field: The magnetic vector in Tesla, expressed in a
coordinate frame defined by the spherical_coordinates tag.
atmosphere: The atmosphere tag specifies the type and properties of
the atmosphere model.
gui:
physics: The physics tag specifies the type and properties of the
dynamics engine.
scene: Specifies the look of the environment.
light: The light element describes a light source.
model: The model element defines a complete robot or any other
physical object.
actor: A special kind of model which can have a scripted motion.
This includes both global waypoint type animations and skeleton
animations.
plugin: A plugin is a dynamically loaded chunk of code. It can exist
as a child of world, model, and sensor.
joint: A joint connects two links with kinematic and dynamic
properties. By default, the pose of a joint is expressed in the
child link frame.
road:
spherical_coordinates:
state:
population: The population element defines how and where a set of
models will be automatically populated in Gazebo.
name: Unique name of the world
"""
class Meta:
name = "world"
audio: Optional["World.Audio"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
wind: Optional["World.Wind"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
include: List["World.Include"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
gravity: str = field(
default="0 0 -9.8",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
magnetic_field: str = field(
default="5.5645e-6 22.8758e-6 -42.3884e-6",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
atmosphere: Optional["World.Atmosphere"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
gui: Optional["World.Gui"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
physics: List[Physics] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
scene: Optional[Scene] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
actor: List[Actor] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
joint: List[Joint] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
road: List["World.Road"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
spherical_coordinates: Optional["World.SphericalCoordinates"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
state: List[State] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
population: List["World.Population"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Audio:
"""
Global audio properties.
Parameters
----------
device: Device to use for audio playback. A value of "default"
will use the system's default audio device. Otherwise,
specify a an audio device file"
"""
device: str = field(
default="default",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Wind:
"""
The wind tag specifies the type and properties of the wind.
Parameters
----------
linear_velocity: Linear velocity of the wind.
"""
linear_velocity: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Include:
"""
Include resources from a URI.
Parameters
----------
uri: URI to a resource, such as a model
name: Override the name of the included model.
static: Override the static value of the included model.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
"""
uri: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: Optional["World.Include.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plugin: List["World.Include.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Atmosphere:
"""
The atmosphere tag specifies the type and properties of the atmosphere
model.
Parameters
----------
temperature: Temperature at sea level in kelvins.
pressure: Pressure at sea level in pascals.
temperature_gradient: Temperature gradient with respect to
increasing altitude at sea level in units of K/m.
type: The type of the atmosphere engine. Current options are
adiabatic. Defaults to adiabatic if left unspecified.
"""
temperature: float = field(
default=288.15,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pressure: float = field(
default=101325.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
temperature_gradient: float = field(
default=-0.0065,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Gui:
"""
Parameters
----------
camera:
plugin: A plugin is a dynamically loaded chunk of code. It can
exist as a child of world, model, and sensor.
fullscreen:
"""
camera: Optional["World.Gui.Camera"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
plugin: List["World.Gui.Plugin"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
fullscreen: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
@dataclass
class Camera:
"""
Parameters
----------
view_controller:
projection_type: Set the type of projection for the camera.
Valid values are "perspective" and "orthographic".
track_visual:
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name:
"""
view_controller: str = field(
default="orbit",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
projection_type: str = field(
default="perspective",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
track_visual: Optional["World.Gui.Camera.TrackVisual"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["World.Gui.Camera.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["World.Gui.Camera.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class TrackVisual:
"""
Parameters
----------
name: Name of the tracked visual. If no name is
provided, the remaining settings will be applied
whenever tracking is triggered in the GUI.
min_dist: Minimum distance between the camera and the
tracked visual. This parameter is only used if
static is set to false.
max_dist: Maximum distance between the camera and the
tracked visual. This parameter is only used if
static is set to false.
static: If set to true, the position of the camera is
fixed relatively to the model or to the world,
depending on the value of the use_model_frame
element. Otherwise, the position of the camera may
vary but the distance between the camera and the
model will depend on the value of the min_dist and
max_dist elements. In any case, the camera will
always follow the model by changing its orientation.
use_model_frame: If set to true, the position of the
camera is relative to the model reference frame,
which means that its position relative to the model
will not change. Otherwise, the position of the
camera is relative to the world reference frame,
which means that its position relative to the world
will not change. This parameter is only used if
static is set to true.
xyz: The position of the camera's reference frame. This
parameter is only used if static is set to true. If
use_model_frame is set to true, the position is
relative to the model reference frame, otherwise it
represents world coordinates.
inherit_yaw: If set to true, the camera will inherit the
yaw rotation of the tracked model. This parameter is
only used if static and use_model_frame are set to
true.
"""
name: str = field(
default="__default__",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_dist: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
static: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_model_frame: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
xyz: str = field(
default="-5.0 0.0 3.0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
inherit_yaw: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match
another frame defined inside the parent that this
frame is attached to.
"""
pose: Optional["World.Gui.Camera.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined
relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies
child elements into the SDFormat element so that a
plugin can access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the
filename is not a full path name, the file will be
searched for in the configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Plugin:
"""A plugin is a dynamically loaded chunk of code.
It can exist as a child of world, model, and sensor.
Parameters
----------
any_element: This is a special element that should not be
specified in an SDFormat file. It automatically copies child
elements into the SDFormat element so that a plugin can
access the data.
name: A unique name for the plugin, scoped to its parent.
filename: Name of the shared library to load. If the filename is
not a full path name, the file will be searched for in the
configuration paths.
"""
any_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
filename: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Road:
"""
Parameters
----------
width: Width of the road
point: A series of points that define the path of the road.
material: The material of the visual element.
name: Name of the road
"""
width: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
point: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
material: Optional[Material] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class SphericalCoordinates:
"""
Parameters
----------
surface_model: Name of planetary surface model, used to
determine the surface altitude at a given latitude and
longitude. The default is an ellipsoid model of the
earth based on the WGS-84 standard. It is used in Gazebo's
GPS sensor implementation.
world_frame_orientation: This field identifies how Gazebo world
frame is aligned in Geographical sense. The final
Gazebo world frame orientation is obtained by rotating
a frame aligned with following notation by the field
heading_deg (Note that heading_deg corresponds to
positive yaw rotation in the NED frame, so it's
inverse specifies positive Z-rotation in ENU or NWU).
Options are: - ENU (East-North-Up) - NED
(North-East-Down) - NWU (North-West-Up) For
example, world frame specified by setting
world_orientation="ENU" and heading_deg=-90° is
effectively equivalent to NWU with heading of 0°.
latitude_deg: Geodetic latitude at origin of gazebo reference
frame, specified in units of degrees.
longitude_deg: Longitude at origin of gazebo reference frame,
specified in units of degrees.
elevation: Elevation of origin of gazebo reference frame,
specified in meters.
heading_deg: Heading offset of gazebo reference frame, measured
as angle between Gazebo world frame and the
world_frame_orientation type (ENU/NED/NWU). Rotations
about the downward-vector (e.g. North to East) are positive.
The direction of rotation is chosen to be consistent with
compass heading convention (e.g. 0 degrees points
North and 90 degrees points East, positive rotation
indicates counterclockwise rotation when viewed from
top-down direction). The angle is specified in
degrees.
"""
surface_model: str = field(
default="EARTH_WGS84",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
world_frame_orientation: str = field(
default="ENU",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
latitude_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
longitude_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
elevation: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
heading_deg: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Population:
"""
The population element defines how and where a set of models will
be automatically populated in Gazebo.
Parameters
----------
model_count: The number of models to place.
distribution: Specifies the type of object distribution and its
optional parameters.
box: Box shape
cylinder: Cylinder shape
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
model: The model element defines a complete robot or any other
physical object.
name: A unique name for the population. This name must not match
another population in the world.
"""
model_count: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
distribution: Optional["World.Population.Distribution"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
box: Optional["World.Population.Box"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
cylinder: Optional["World.Population.Cylinder"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["World.Population.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["World.Population.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
model: Optional[Model] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Distribution:
"""
Specifies the type of object distribution and its optional
parameters.
Parameters
----------
type: Define how the objects will be placed in the specified
region. - random: Models placed at random.
- uniform: Models approximately placed in a 2D grid
pattern with control over the number of
objects. - grid: Models evenly placed in a 2D
grid pattern. The number of objects is not
explicitly specified, it is based on the number of rows
and columns of the grid. - linear-x:
Models evently placed in a row along the global x-axis.
- linear-y: Models evently placed in a row along the
global y-axis. - linear-z: Models evently placed
in a row along the global z-axis.
rows: Number of rows in the grid.
cols: Number of columns in the grid.
step: Distance between elements of the grid.
"""
type: str = field(
default="random",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
rows: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
cols: int = field(
default=1,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
step: str = field(
default="0.5 0.5 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Box:
"""
Box shape.
Parameters
----------
size: The three side lengths of the box. The origin of the
box is in its geometric center (inside the center of the
box).
"""
size: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
@dataclass
class Cylinder:
"""
Cylinder shape.
Parameters
----------
radius: Radius of the cylinder
length: Length of the cylinder along the z axis
"""
radius: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
length: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["World.Population.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | 0.938505 | 0.492859 |
from dataclasses import dataclass, field
from typing import List, Optional
from .light import Light
from .model import Model as ModelModel
__NAMESPACE__ = "sdformat/v1.6/state.xsd"
@dataclass
class Model:
"""
Model state.
Parameters
----------
joint: Joint angle
model: A nested model state element
scale: Scale for the 3 dimensions of the model.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
link: Link state
name: Name of the model
"""
class Meta:
name = "model"
joint: List["Model.Joint"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List["Model"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
scale: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: List["Model.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
link: List["Model.Link"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Joint:
"""
Joint angle.
Parameters
----------
angle: Angle of an axis
name: Name of the joint
"""
angle: List["Model.Joint.Angle"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Angle:
"""
Parameters
----------
value:
axis: Index of the axis.
"""
value: Optional[float] = field(
default=None,
metadata={
"required": True,
},
)
axis: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Model.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Link:
"""
Link state.
Parameters
----------
velocity: Velocity of the link. The x, y, z components of the
pose correspond to the linear velocity of the link,
and the roll, pitch, yaw components correspond to the
angular velocity of the link
acceleration: Acceleration of the link. The x, y, z components
of the pose correspond to the linear acceleration of
the link, and the roll, pitch, yaw components
correspond to the angular acceleration of the link
wrench: Force and torque applied to the link. The x, y, z
components of the pose correspond to the force applied
to the link, and the roll, pitch, yaw components
correspond to the torque applied to the link
collision: Collision state
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the link
"""
velocity: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
acceleration: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
wrench: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
collision: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Model.Link.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Link.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Model.Link.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class State:
"""
Parameters
----------
sim_time: Simulation time stamp of the state [seconds nanoseconds]
wall_time: Wall time stamp of the state [seconds nanoseconds]
real_time: Real time stamp of the state [seconds nanoseconds]
iterations: Number of simulation iterations.
insertions: A list containing the entire description of entities
inserted.
deletions: A list of names of deleted entities/
model: Model state
light: Light state
world_name: Name of the world this state applies to
"""
class Meta:
name = "state"
sim_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
wall_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
real_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
iterations: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
insertions: Optional["State.Insertions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
deletions: Optional["State.Deletions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List["State.Light"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
world_name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Insertions:
"""
A list containing the entire description of entities inserted.
Parameters
----------
model: The model element defines a complete robot or any other
physical object.
light: The light element describes a light source.
"""
model: List[ModelModel] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Deletions:
"""
A list of names of deleted entities/
Parameters
----------
name: The name of a deleted entity.
"""
name: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
@dataclass
class Light:
"""
Light state.
Parameters
----------
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the light
"""
frame: List["State.Light.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["State.Light.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["State.Light.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/state.py | state.py | from dataclasses import dataclass, field
from typing import List, Optional
from .light import Light
from .model import Model as ModelModel
__NAMESPACE__ = "sdformat/v1.6/state.xsd"
@dataclass
class Model:
"""
Model state.
Parameters
----------
joint: Joint angle
model: A nested model state element
scale: Scale for the 3 dimensions of the model.
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
link: Link state
name: Name of the model
"""
class Meta:
name = "model"
joint: List["Model.Joint"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List["Model"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
scale: str = field(
default="1 1 1",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: List["Model.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
link: List["Model.Link"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Joint:
"""
Joint angle.
Parameters
----------
angle: Angle of an axis
name: Name of the joint
"""
angle: List["Model.Joint.Angle"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Angle:
"""
Parameters
----------
value:
axis: Index of the axis.
"""
value: Optional[float] = field(
default=None,
metadata={
"required": True,
},
)
axis: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the frame. This name must not match another frame
defined inside the parent that this frame is attached to.
"""
pose: Optional["Model.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Link:
"""
Link state.
Parameters
----------
velocity: Velocity of the link. The x, y, z components of the
pose correspond to the linear velocity of the link,
and the roll, pitch, yaw components correspond to the
angular velocity of the link
acceleration: Acceleration of the link. The x, y, z components
of the pose correspond to the linear acceleration of
the link, and the roll, pitch, yaw components
correspond to the angular acceleration of the link
wrench: Force and torque applied to the link. The x, y, z
components of the pose correspond to the force applied
to the link, and the roll, pitch, yaw components
correspond to the torque applied to the link
collision: Collision state
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the link
"""
velocity: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
acceleration: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
wrench: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
collision: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
frame: List["Model.Link.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["Model.Link.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["Model.Link.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class State:
"""
Parameters
----------
sim_time: Simulation time stamp of the state [seconds nanoseconds]
wall_time: Wall time stamp of the state [seconds nanoseconds]
real_time: Real time stamp of the state [seconds nanoseconds]
iterations: Number of simulation iterations.
insertions: A list containing the entire description of entities
inserted.
deletions: A list of names of deleted entities/
model: Model state
light: Light state
world_name: Name of the world this state applies to
"""
class Meta:
name = "state"
sim_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
wall_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
real_time: str = field(
default="0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"white_space": "collapse",
"pattern": r"\d+ \d+",
},
)
iterations: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
insertions: Optional["State.Insertions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
deletions: Optional["State.Deletions"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
model: List[Model] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List["State.Light"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
world_name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Insertions:
"""
A list containing the entire description of entities inserted.
Parameters
----------
model: The model element defines a complete robot or any other
physical object.
light: The light element describes a light source.
"""
model: List[ModelModel] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
light: List[Light] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Deletions:
"""
A list of names of deleted entities/
Parameters
----------
name: The name of a deleted entity.
"""
name: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
},
)
@dataclass
class Light:
"""
Light state.
Parameters
----------
frame: A frame of reference to which a pose is relative.
pose: A position(x,y,z) and orientation(roll, pitch yaw) with
respect to the specified frame.
name: Name of the light
"""
frame: List["State.Light.Frame"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
},
)
pose: Optional["State.Light.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Frame:
"""
A frame of reference to which a pose is relative.
Parameters
----------
pose: A position(x,y,z) and orientation(roll, pitch yaw)
with respect to the specified frame.
name: Name of the frame. This name must not match another
frame defined inside the parent that this frame is
attached to.
"""
pose: Optional["State.Light.Frame.Pose"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative
to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
@dataclass
class Pose:
"""
Parameters
----------
value:
frame: Name of frame which the pose is defined relative to.
"""
value: str = field(
default="0 0 0 0 0 0",
metadata={
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
frame: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | 0.948728 | 0.47993 |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.6/physics.xsd"
@dataclass
class Physics:
"""
The physics tag specifies the type and properties of the dynamics engine.
Parameters
----------
max_step_size: Maximum time step size at which every system in
simulation can interact with the states of the world. (was
physics.sdf's dt).
real_time_factor: target simulation speedup factor, defined by ratio
of simulation time to real-time.
real_time_update_rate: Rate at which to update the physics engine
(UpdatePhysics calls per real-time second). (was physics.sdf's
update_rate).
max_contacts: Maximum number of contacts allowed between two
entities. This value can be over ridden by a max_contacts
element in a collision element.
dart: DART specific physics properties
simbody: Simbody specific physics properties
bullet: Bullet specific physics properties
ode: ODE specific physics properties
name: The name of this set of physics parameters.
default: If true, this physics element is set as the default physics
profile for the world. If multiple default physics elements
exist, the first element marked as default is chosen. If no
default physics element exists, the first physics element is
chosen.
type: The type of the dynamics engine. Current options are ode,
bullet, simbody and dart. Defaults to ode if left unspecified.
"""
class Meta:
name = "physics"
max_step_size: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_factor: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_update_rate: float = field(
default=1000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dart: Optional["Physics.Dart"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
simbody: Optional["Physics.Simbody"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Physics.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: str = field(
default="default_physics",
metadata={
"type": "Attribute",
},
)
default: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Dart:
"""
DART specific physics properties.
Parameters
----------
solver:
collision_detector: Specify collision detector for DART to use.
Can be dart, fcl, bullet or ode.
"""
solver: Optional["Physics.Dart.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collision_detector: str = field(
default="fcl",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
solver_type: One of the following types: pgs, dantzig. PGS
stands for Projected Gauss-Seidel.
"""
solver_type: str = field(
default="dantzig",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Simbody:
"""
Simbody specific physics properties.
Parameters
----------
min_step_size: (Currently not used in simbody) The time duration
which advances with each iteration of the dynamics engine,
this has to be no bigger than max_step_size under physics
block. If left unspecified, min_step_size defaults to
max_step_size.
accuracy: Roughly the relative error of the system.
-LOG(accuracy) is roughly the number of significant digits.
max_transient_velocity: Tolerable "slip" velocity allowed by the
solver when static friction is supposed to hold
object in place.
contact: Relationship among dissipation, coef. restitution, etc.
d = dissipation coefficient (1/velocity) vc =
capture velocity (velocity where e=e_max) vp =
plastic velocity (smallest v where e=e_min) &gt; vc
Assume real COR=1 when v=0. e_min = given minimum
COR, at v &gt;= vp (a.k.a. plastic_coef_restitution)
d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp
e_max = maximum COR = 1-d*vc, reached at v=vc e = 0,
v &lt;= vc = 1 - d*v, vc
&lt; v &lt; vp = e_min,
v &gt;= vp dissipation factor = d*min(v,vp)
[compliant] cor = e
[rigid] Combining rule e = 0,
e1==e2==0 = 2*e1*e2/(e1+e2),
otherwise
"""
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
accuracy: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_transient_velocity: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact: Optional["Physics.Simbody.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Contact:
"""Relationship among dissipation, coef.
restitution, etc. d = dissipation coefficient (1/velocity) vc = capture velocity (velocity where e=e_max) vp = plastic velocity (smallest v where e=e_min) &gt; vc Assume real COR=1 when v=0. e_min = given minimum COR, at v &gt;= vp (a.k.a. plastic_coef_restitution) d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp e_max = maximum COR = 1-d*vc, reached at v=vc e = 0, v &lt;= vc = 1 - d*v, vc &lt; v &lt; vp = e_min, v &gt;= vp dissipation factor = d*min(v,vp) [compliant] cor = e [rigid] Combining rule e = 0, e1==e2==0 = 2*e1*e2/(e1+e2), otherwise
Parameters
----------
stiffness: Default contact material stiffness
(force/dist or torque/radian).
dissipation: dissipation coefficient to be used in compliant
contact; if not given it is
(1-min_cor)/plastic_impact_velocity
plastic_coef_restitution: this is the COR to be used at high
velocities for rigid impacts; if not given it is 1 -
dissipation*plastic_impact_velocity
plastic_impact_velocity: smallest impact velocity at which
min COR is reached; set to zero if you want the
min COR always to be used
static_friction: static friction (mu_s) as described by this
plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
dynamic_friction: dynamic friction (mu_d) as described by
this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
viscous_friction: viscous friction (mu_v) with units of
(1/velocity) as described by this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
override_impact_capture_velocity: for rigid impacts only,
impact velocity at which COR is set to zero;
normally inherited from global default but can
be overridden here. Combining rule: use larger velocity
override_stiction_transition_velocity: This is the largest
slip velocity at which we'll consider a
transition to stiction. Normally inherited
from a global default setting. For a continuous friction
model this is the velocity at which the max
static friction force is reached. Combining
rule: use larger velocity
"""
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_coef_restitution: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_impact_velocity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
static_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamic_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
viscous_friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_impact_capture_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_stiction_transition_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet specific physics properties.
Parameters
----------
solver:
constraints: Bullet constraint parameters.
"""
solver: Optional["Physics.Bullet.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Bullet.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: sequential_impulse only.
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
sor: Set the successive over-relaxation parameter.
"""
type: str = field(
default="sequential_impulse",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
Bullet constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
split_impulse: Similar to ODE's max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
split_impulse_penetration_threshold: Similar to ODE's
max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse_penetration_threshold: float = field(
default=-0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific physics properties.
Parameters
----------
solver:
constraints: ODE constraint parameters.
"""
solver: Optional["Physics.Ode.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Ode.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: world, quick
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
island_threads: Number of threads to use for "islands" of
disconnected models.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
precon_iters: Experimental parameter.
sor: Set the successive over-relaxation parameter.
thread_position_correction: Flag to use threading to speed
up position correction computation.
use_dynamic_moi_rescaling: Flag to enable dynamic rescaling
of moment of inertia in constrained directions.
See gazebo pull request 1114 for the implementation of
this feature. https://osrf-
migration.github.io/gazebo-gh-pages/#!/osrf/gazebo/pull-
request/1114
friction_model: Name of ODE friction model to use. Valid
values include: pyramid_model: (default)
friction forces limited in two directions in
proportion to normal force. box_model:
friction forces limited to constant in two directions.
cone_model: friction force magnitude limited in
proportion to normal force. See gazebo pull
request 1522 for the implementation of this feature.
https://osrf-migration.github.io/gazebo-gh-
pages/#!/osrf/gazebo/pull-request/1522
https://github.com/osrf/gazebo/commit/968dccafdfbfca09c9b3326f855612076fed7e6f
"""
type: str = field(
default="quick",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
island_threads: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precon_iters: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
thread_position_correction: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_dynamic_moi_rescaling: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction_model: str = field(
default="pyramid_model",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
ODE constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_max_correcting_vel: The maximum correcting
velocities allowed when resolving contacts.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_max_correcting_vel: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | scikit-bot | /scikit-bot-0.14.0.tar.gz/scikit-bot-0.14.0/skbot/ignition/sdformat/bindings/v16/physics.py | physics.py | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "sdformat/v1.6/physics.xsd"
@dataclass
class Physics:
"""
The physics tag specifies the type and properties of the dynamics engine.
Parameters
----------
max_step_size: Maximum time step size at which every system in
simulation can interact with the states of the world. (was
physics.sdf's dt).
real_time_factor: target simulation speedup factor, defined by ratio
of simulation time to real-time.
real_time_update_rate: Rate at which to update the physics engine
(UpdatePhysics calls per real-time second). (was physics.sdf's
update_rate).
max_contacts: Maximum number of contacts allowed between two
entities. This value can be over ridden by a max_contacts
element in a collision element.
dart: DART specific physics properties
simbody: Simbody specific physics properties
bullet: Bullet specific physics properties
ode: ODE specific physics properties
name: The name of this set of physics parameters.
default: If true, this physics element is set as the default physics
profile for the world. If multiple default physics elements
exist, the first element marked as default is chosen. If no
default physics element exists, the first physics element is
chosen.
type: The type of the dynamics engine. Current options are ode,
bullet, simbody and dart. Defaults to ode if left unspecified.
"""
class Meta:
name = "physics"
max_step_size: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_factor: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
real_time_update_rate: float = field(
default=1000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=20,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dart: Optional["Physics.Dart"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
simbody: Optional["Physics.Simbody"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
bullet: Optional["Physics.Bullet"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
ode: Optional["Physics.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: str = field(
default="default_physics",
metadata={
"type": "Attribute",
},
)
default: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
type: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Dart:
"""
DART specific physics properties.
Parameters
----------
solver:
collision_detector: Specify collision detector for DART to use.
Can be dart, fcl, bullet or ode.
"""
solver: Optional["Physics.Dart.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
collision_detector: str = field(
default="fcl",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
solver_type: One of the following types: pgs, dantzig. PGS
stands for Projected Gauss-Seidel.
"""
solver_type: str = field(
default="dantzig",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Simbody:
"""
Simbody specific physics properties.
Parameters
----------
min_step_size: (Currently not used in simbody) The time duration
which advances with each iteration of the dynamics engine,
this has to be no bigger than max_step_size under physics
block. If left unspecified, min_step_size defaults to
max_step_size.
accuracy: Roughly the relative error of the system.
-LOG(accuracy) is roughly the number of significant digits.
max_transient_velocity: Tolerable "slip" velocity allowed by the
solver when static friction is supposed to hold
object in place.
contact: Relationship among dissipation, coef. restitution, etc.
d = dissipation coefficient (1/velocity) vc =
capture velocity (velocity where e=e_max) vp =
plastic velocity (smallest v where e=e_min) &gt; vc
Assume real COR=1 when v=0. e_min = given minimum
COR, at v &gt;= vp (a.k.a. plastic_coef_restitution)
d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp
e_max = maximum COR = 1-d*vc, reached at v=vc e = 0,
v &lt;= vc = 1 - d*v, vc
&lt; v &lt; vp = e_min,
v &gt;= vp dissipation factor = d*min(v,vp)
[compliant] cor = e
[rigid] Combining rule e = 0,
e1==e2==0 = 2*e1*e2/(e1+e2),
otherwise
"""
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
accuracy: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_transient_velocity: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact: Optional["Physics.Simbody.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Contact:
"""Relationship among dissipation, coef.
restitution, etc. d = dissipation coefficient (1/velocity) vc = capture velocity (velocity where e=e_max) vp = plastic velocity (smallest v where e=e_min) &gt; vc Assume real COR=1 when v=0. e_min = given minimum COR, at v &gt;= vp (a.k.a. plastic_coef_restitution) d = slope = (1-e_min)/vp OR, e_min = 1 - d*vp e_max = maximum COR = 1-d*vc, reached at v=vc e = 0, v &lt;= vc = 1 - d*v, vc &lt; v &lt; vp = e_min, v &gt;= vp dissipation factor = d*min(v,vp) [compliant] cor = e [rigid] Combining rule e = 0, e1==e2==0 = 2*e1*e2/(e1+e2), otherwise
Parameters
----------
stiffness: Default contact material stiffness
(force/dist or torque/radian).
dissipation: dissipation coefficient to be used in compliant
contact; if not given it is
(1-min_cor)/plastic_impact_velocity
plastic_coef_restitution: this is the COR to be used at high
velocities for rigid impacts; if not given it is 1 -
dissipation*plastic_impact_velocity
plastic_impact_velocity: smallest impact velocity at which
min COR is reached; set to zero if you want the
min COR always to be used
static_friction: static friction (mu_s) as described by this
plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
dynamic_friction: dynamic friction (mu_d) as described by
this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
viscous_friction: viscous friction (mu_v) with units of
(1/velocity) as described by this plot:
http://gazebosim.org/wiki/File:Stribeck_friction.png
override_impact_capture_velocity: for rigid impacts only,
impact velocity at which COR is set to zero;
normally inherited from global default but can
be overridden here. Combining rule: use larger velocity
override_stiction_transition_velocity: This is the largest
slip velocity at which we'll consider a
transition to stiction. Normally inherited
from a global default setting. For a continuous friction
model this is the velocity at which the max
static friction force is reached. Combining
rule: use larger velocity
"""
stiffness: float = field(
default=100000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dissipation: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_coef_restitution: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
plastic_impact_velocity: float = field(
default=0.5,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
static_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
dynamic_friction: float = field(
default=0.9,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
viscous_friction: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_impact_capture_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
override_stiction_transition_velocity: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Bullet:
"""
Bullet specific physics properties.
Parameters
----------
solver:
constraints: Bullet constraint parameters.
"""
solver: Optional["Physics.Bullet.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Bullet.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: sequential_impulse only.
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
sor: Set the successive over-relaxation parameter.
"""
type: str = field(
default="sequential_impulse",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
Bullet constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
split_impulse: Similar to ODE's max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
split_impulse_penetration_threshold: Similar to ODE's
max_vel implementation. See
http://web.archive.org/web/20120430155635/http://bulletphysics.org/mediawiki-1.5.8/index.php/BtContactSolverInfo#Split_Impulse
for more information.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse: bool = field(
default=True,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
split_impulse_penetration_threshold: float = field(
default=-0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Ode:
"""
ODE specific physics properties.
Parameters
----------
solver:
constraints: ODE constraint parameters.
"""
solver: Optional["Physics.Ode.Solver"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
constraints: Optional["Physics.Ode.Constraints"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Solver:
"""
Parameters
----------
type: One of the following types: world, quick
min_step_size: The time duration which advances with each
iteration of the dynamics engine, this has to be no
bigger than max_step_size under physics block. If left
unspecified, min_step_size defaults to max_step_size.
island_threads: Number of threads to use for "islands" of
disconnected models.
iters: Number of iterations for each step. A higher number
produces greater accuracy at a performance cost.
precon_iters: Experimental parameter.
sor: Set the successive over-relaxation parameter.
thread_position_correction: Flag to use threading to speed
up position correction computation.
use_dynamic_moi_rescaling: Flag to enable dynamic rescaling
of moment of inertia in constrained directions.
See gazebo pull request 1114 for the implementation of
this feature. https://osrf-
migration.github.io/gazebo-gh-pages/#!/osrf/gazebo/pull-
request/1114
friction_model: Name of ODE friction model to use. Valid
values include: pyramid_model: (default)
friction forces limited in two directions in
proportion to normal force. box_model:
friction forces limited to constant in two directions.
cone_model: friction force magnitude limited in
proportion to normal force. See gazebo pull
request 1522 for the implementation of this feature.
https://osrf-migration.github.io/gazebo-gh-
pages/#!/osrf/gazebo/pull-request/1522
https://github.com/osrf/gazebo/commit/968dccafdfbfca09c9b3326f855612076fed7e6f
"""
type: str = field(
default="quick",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_step_size: float = field(
default=0.0001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
island_threads: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
iters: int = field(
default=50,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
precon_iters: int = field(
default=0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
sor: float = field(
default=1.3,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
thread_position_correction: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
use_dynamic_moi_rescaling: bool = field(
default=False,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
friction_model: str = field(
default="pyramid_model",
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Constraints:
"""
ODE constraint parameters.
Parameters
----------
cfm: Constraint force mixing parameter. See the ODE page for
more information.
erp: Error reduction parameter. See the ODE page for more
information.
contact_max_correcting_vel: The maximum correcting
velocities allowed when resolving contacts.
contact_surface_layer: The depth of the surface layer around
all geometry objects. Contacts are allowed to sink into
the surface layer up to the given depth before coming to
rest. The default value is zero. Increasing this to some
small value (e.g. 0.001) can help prevent jittering
problems due to contacts being repeatedly made and
broken.
"""
cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_max_correcting_vel: float = field(
default=100.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
contact_surface_layer: float = field(
default=0.001,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
) | 0.955961 | 0.531209 |