code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import numpy as np
import pandas as pd
import allantools as allan
import ahrs.filters as filters
from scipy import constants, signal, integrate
from sklearn import preprocessing
from skdiveMove.tdrsource import _load_dataset
from .allan import allan_coefs
from .vector import rotate_vector
_TIME_NAME = "timestamp"
_DEPTH_NAME = "depth"
_ACCEL_NAME = "acceleration"
_OMEGA_NAME = "angular_velocity"
_MAGNT_NAME = "magnetic_density"
class IMUBase:
"""Define IMU data source
Use :class:`xarray.Dataset` to ensure pseudo-standard metadata.
Attributes
----------
imu_file : str
String indicating the file where the data comes from.
imu : xarray.Dataset
Dataset with input data.
imu_var_names : list
Names of the data variables with accelerometer, angular velocity,
and magnetic density measurements.
has_depth : bool
Whether input data include depth measurements.
depth_name : str
Name of the data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
quats : numpy.ndarray
Array of quaternions representing the orientation relative to the
frame of the IMU object data. Note that the scalar component is
last, following `scipy`'s convention.
Examples
--------
This example illustrates some of the issues encountered while reading
data files in a real-world scenario. ``scikit-diveMove`` includes a
NetCDF file with IMU signals collected using a Samsung Galaxy S5 mobile
phone. Set up instance from NetCDF example data:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import xarray as xr
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "samsung_galaxy_s5.nc")))
The angular velocity and magnetic density arrays have two sets of
measurements: output and measured, which, along with the sensor axis
designation, constitutes a multi-index. These multi-indices can be
rebuilt prior to instantiating IMUBase, as they provide significant
advantages for indexing later:
>>> s5ds = (xr.load_dataset(icdf)
... .set_index(gyroscope=["gyroscope_type", "gyroscope_axis"],
... magnetometer=["magnetometer_type",
... "magnetometer_axis"]))
>>> imu = imutools.IMUBase(s5ds.sel(gyroscope="output",
... magnetometer="output"),
... imu_filename=icdf)
See :doc:`demo_allan` demo for an extended example of typical usage of
the methods in this class.
"""
def __init__(self, dataset,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
imu_filename=None):
"""Set up attributes for IMU objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing IMU sensor DataArrays, and optionally other
DataArrays.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
time_name : str, optional
Name of the time dimension in the dataset.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
imu_filename : str, optional
Name of the file from which ``dataset`` originated.
"""
self.time_name = time_name
self.imu = dataset
self.imu_var_names = [acceleration_name,
angular_velocity_name,
magnetic_density_name]
if has_depth:
self.has_depth = True
self.depth_name = depth_name
else:
self.has_depth = False
self.depth_name = None
self.imu_file = imu_filename
self.quats = None
@classmethod
def read_netcdf(cls, imu_file,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
**kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Provided all ``DataArray`` in the NetCDF file have the same
dimensions (N, 3), this is an efficient way to instantiate.
Parameters
----------
imu_file : str
As first argument for :func:`xarray.load_dataset`.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
dimension_names : list, optional
Names of the dimensions of the data in each of the sensors.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : IMUBase
Class matches the caller.
"""
dataset = _load_dataset(imu_file, **kwargs)
return cls(dataset, acceleration_name=acceleration_name,
angular_velocity_name=angular_velocity_name,
magnetic_density_name=magnetic_density_name,
time_name=time_name, has_depth=has_depth,
depth_name=depth_name, imu_filename=imu_file)
def __str__(self):
x = self.imu
objcls = ("IMU -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.imu_file)
imu_desc = "IMU: {}".format(x.__str__())
return objcls + src + imu_desc
def _allan_deviation(self, sensor, taus):
"""Compute Allan deviation for all axes of a given sensor
Currently uses the modified Allan deviation in package
`allantools`.
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
pandas.DataFrame
Allan deviation and error for each sensor axis. DataFrame
index is the averaging time `tau` for each estimate.
"""
sensor_obj = getattr(self, sensor)
sampling_rate = sensor_obj.attrs["sampling_rate"]
sensor_std = preprocessing.scale(sensor_obj, with_std=False)
allan_l = []
for axis in sensor_std.T:
taus, adevs, errs, ns = allan.mdev(axis, rate=sampling_rate,
data_type="freq",
taus=taus)
# taus is common to all sensor axes
adevs_df = pd.DataFrame(np.column_stack((adevs, errs)),
columns=["allan_dev", "error"],
index=taus)
allan_l.append(adevs_df)
keys = [sensor + "_" + i for i in list("xyz")]
devs = pd.concat(allan_l, axis=1, keys=keys)
return devs
def allan_coefs(self, sensor, taus):
"""Estimate Allan deviation coefficients for each error type
This procedure implements the autonomous regression method for
Allan variance described in [1]_.
Given averaging intervals ``taus`` and corresponding Allan
deviation ``adevs``, compute the Allan deviation coefficient for
each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
coefs_all : pandas.DataFrame
Allan deviation coefficient and corresponding averaging time
for each sensor axis and error type.
adevs : pandas.DataFrame
`MultiIndex` DataFrame with Allan deviation, corresponding
averaging time, and fitted ARMAV model estimates of the
coefficients for each sensor axis and error type.
Notes
-----
Currently uses a modified Allan deviation formula.
.. [1] Jurado, J, Schubert Kabban, CM, Raquet, J (2019). A
regression-based methodology to improve estimation of
inertial sensor errors using Allan variance data. Navigation
66:251-263.
"""
adevs_errs = self._allan_deviation(sensor, taus)
taus = adevs_errs.index.to_numpy()
adevs = adevs_errs.xs("allan_dev", level=1, axis=1).to_numpy()
coefs_l = []
fitted_l = []
for adevs_i in adevs.T:
coefs_i, adevs_fitted = allan_coefs(taus, adevs_i)
# Parse output for dataframe
coefs_l.append(pd.Series(coefs_i))
fitted_l.append(adevs_fitted)
keys = [sensor + "_" + i for i in list("xyz")]
coefs_all = pd.concat(coefs_l, keys=keys, axis=1)
fitted_all = pd.DataFrame(np.column_stack(fitted_l), columns=keys,
index=taus)
fitted_all.columns = (pd.MultiIndex
.from_tuples([(c, "fitted")
for c in fitted_all]))
adevs = (pd.concat([adevs_errs, fitted_all], axis=1)
.sort_index(axis=1))
return (coefs_all, adevs)
def compute_orientation(self, method="Madgwick", **kwargs):
"""Compute the orientation of IMU tri-axial signals
The method must be one of the following estimators implemented in
Python module :mod:`ahrs.filters`:
- ``AngularRate``: Attitude from angular rate
- ``AQUA``: Algebraic quaternion algorithm
- ``Complementary``: Complementary filter
- ``Davenport``: Davenport's q-method
- ``EKF``: Extended Kalman filter
- ``FAAM``: Fast accelerometer-magnetometer combination
- ``FLAE``: Fast linear attitude estimator
- ``Fourati``: Fourati's nonlinear attitude estimation
- ``FQA``: Factored quaternion algorithm
- ``Madgwick``: Madgwick orientation filter
- ``Mahony``: Mahony orientation filter
- ``OLEQ``: Optimal linear estimator quaternion
- ``QUEST``
- ``ROLEQ``: Recursive optimal linear estimator of quaternion
- ``SAAM``: Super-fast attitude from accelerometer and magnetometer
- ``Tilt``: Attitude from gravity
The estimated quaternions are stored in the ``quats`` attribute.
Parameters
----------
method : str, optional
Name of the filtering method to use.
**kwargs : optional keyword arguments
Arguments passed to filtering method.
"""
orienter_cls = getattr(filters, method)
orienter = orienter_cls(acc=self.acceleration,
gyr=self.angular_velocity,
mag=self.magnetic_density,
Dt=self.sampling_interval,
**kwargs)
self.quats = orienter.Q
def dead_reckon(self, g=constants.g, Wn=1.0, k=1.0):
"""Calculate position assuming orientation is already known
Integrate dynamic acceleration in the body frame to calculate
velocity and position. If the IMU instance has a depth signal, it
is used in the integration instead of acceleration in the vertical
dimension.
Parameters
----------
g : float, optional
Assume gravity (:math:`m / s^2`) is equal to this value.
Default to standard gravity.
Wn : float, optional
Cutoff frequency for second-order Butterworth lowpass filter.
k : float, optional
Scalar to apply to scale lowpass-filtered dynamic acceleration.
This scaling has the effect of making position estimates
realistic for dead-reckoning tracking purposes.
Returns
-------
vel, pos : numpy.ndarray
Velocity and position 2D arrays.
"""
# Acceleration, velocity, and position from q and the measured
# acceleration, get the \frac{d^2x}{dt^2}. Retrieved sampling
# frequency assumes common frequency
fs = self.acceleration.attrs["sampling_rate"]
# Shift quaternions to scalar last to match convention
quats = np.roll(self.quats, -1, axis=1)
g_v = rotate_vector(np.array([0, 0, g]), quats, inverse=True)
acc_sensor = self.acceleration - g_v
acc_space = rotate_vector(acc_sensor, quats, inverse=False)
# Low-pass Butterworth filter design
b, a = signal.butter(2, Wn, btype="lowpass", output="ba", fs=fs)
acc_space_f = signal.filtfilt(b, a, acc_space, axis=0)
# Position and Velocity through integration, assuming 0-velocity at t=0
vel = integrate.cumulative_trapezoid(acc_space_f / k, dx=1.0 / fs,
initial=0, axis=0)
# Use depth derivative (on FLU) for the vertical dimension
if self.has_depth:
pos_z = self.depth
zdiff = np.append([0], np.diff(pos_z))
vel[:, -1] = -zdiff
pos = np.nan * np.ones_like(acc_space)
pos[:, -1] = pos_z
pos[:, :2] = (integrate
.cumulative_trapezoid(vel[:, :2], dx=1.0 / fs,
axis=0, initial=0))
else:
pos = integrate.cumulative_trapezoid(vel, dx=1.0 / fs,
axis=0, initial=0)
return vel, pos
def _get_acceleration(self):
# Acceleration name is the first
return self.imu[self.imu_var_names[0]]
acceleration = property(_get_acceleration)
"""Return acceleration array
Returns
-------
xarray.DataArray
"""
def _get_angular_velocity(self):
# Angular velocity name is the second
return self.imu[self.imu_var_names[1]]
angular_velocity = property(_get_angular_velocity)
"""Return angular velocity array
Returns
-------
xarray.DataArray
"""
def _get_magnetic_density(self):
# Magnetic density name is the last one
return self.imu[self.imu_var_names[-1]]
magnetic_density = property(_get_magnetic_density)
"""Return magnetic_density array
Returns
-------
xarray.DataArray
"""
def _get_depth(self):
return getattr(self.imu, self.depth_name)
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_sampling_interval(self):
# Retrieve sampling rate from one DataArray
sampling_rate = self.acceleration.attrs["sampling_rate"]
sampling_rate_units = (self.acceleration
.attrs["sampling_rate_units"])
if sampling_rate_units.lower() == "hz":
itvl = 1.0 / sampling_rate
else:
itvl = sampling_rate
return itvl
sampling_interval = property(_get_sampling_interval)
"""Return sampling interval
Assuming all `DataArray`s have the same interval, the sampling interval
is retrieved from the acceleration `DataArray`.
Returns
-------
xarray.DataArray
Warnings
--------
The sampling rate is retrieved from the attribute named `sampling_rate`
in the NetCDF file, which is assumed to be in Hz units.
""" | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/imu.py | imu.py | import numpy as np
import pandas as pd
import allantools as allan
import ahrs.filters as filters
from scipy import constants, signal, integrate
from sklearn import preprocessing
from skdiveMove.tdrsource import _load_dataset
from .allan import allan_coefs
from .vector import rotate_vector
_TIME_NAME = "timestamp"
_DEPTH_NAME = "depth"
_ACCEL_NAME = "acceleration"
_OMEGA_NAME = "angular_velocity"
_MAGNT_NAME = "magnetic_density"
class IMUBase:
"""Define IMU data source
Use :class:`xarray.Dataset` to ensure pseudo-standard metadata.
Attributes
----------
imu_file : str
String indicating the file where the data comes from.
imu : xarray.Dataset
Dataset with input data.
imu_var_names : list
Names of the data variables with accelerometer, angular velocity,
and magnetic density measurements.
has_depth : bool
Whether input data include depth measurements.
depth_name : str
Name of the data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
quats : numpy.ndarray
Array of quaternions representing the orientation relative to the
frame of the IMU object data. Note that the scalar component is
last, following `scipy`'s convention.
Examples
--------
This example illustrates some of the issues encountered while reading
data files in a real-world scenario. ``scikit-diveMove`` includes a
NetCDF file with IMU signals collected using a Samsung Galaxy S5 mobile
phone. Set up instance from NetCDF example data:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import xarray as xr
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "samsung_galaxy_s5.nc")))
The angular velocity and magnetic density arrays have two sets of
measurements: output and measured, which, along with the sensor axis
designation, constitutes a multi-index. These multi-indices can be
rebuilt prior to instantiating IMUBase, as they provide significant
advantages for indexing later:
>>> s5ds = (xr.load_dataset(icdf)
... .set_index(gyroscope=["gyroscope_type", "gyroscope_axis"],
... magnetometer=["magnetometer_type",
... "magnetometer_axis"]))
>>> imu = imutools.IMUBase(s5ds.sel(gyroscope="output",
... magnetometer="output"),
... imu_filename=icdf)
See :doc:`demo_allan` demo for an extended example of typical usage of
the methods in this class.
"""
def __init__(self, dataset,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
imu_filename=None):
"""Set up attributes for IMU objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing IMU sensor DataArrays, and optionally other
DataArrays.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
time_name : str, optional
Name of the time dimension in the dataset.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
imu_filename : str, optional
Name of the file from which ``dataset`` originated.
"""
self.time_name = time_name
self.imu = dataset
self.imu_var_names = [acceleration_name,
angular_velocity_name,
magnetic_density_name]
if has_depth:
self.has_depth = True
self.depth_name = depth_name
else:
self.has_depth = False
self.depth_name = None
self.imu_file = imu_filename
self.quats = None
@classmethod
def read_netcdf(cls, imu_file,
acceleration_name=_ACCEL_NAME,
angular_velocity_name=_OMEGA_NAME,
magnetic_density_name=_MAGNT_NAME,
time_name=_TIME_NAME,
has_depth=False, depth_name=_DEPTH_NAME,
**kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Provided all ``DataArray`` in the NetCDF file have the same
dimensions (N, 3), this is an efficient way to instantiate.
Parameters
----------
imu_file : str
As first argument for :func:`xarray.load_dataset`.
acceleration_name : str, optional
Name of the acceleration ``DataArray`` in the ``Dataset``.
angular_velocity_name : str, optional
Name of the angular velocity ``DataArray`` in the ``Dataset``.
magnetic_density_name : str, optional
Name of the magnetic density ``DataArray`` in the ``Dataset``.
dimension_names : list, optional
Names of the dimensions of the data in each of the sensors.
has_depth : bool, optional
Whether input data include depth measurements.
depth_name : str, optional
Name of the depth ``DataArray`` in the ``Dataset``.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : IMUBase
Class matches the caller.
"""
dataset = _load_dataset(imu_file, **kwargs)
return cls(dataset, acceleration_name=acceleration_name,
angular_velocity_name=angular_velocity_name,
magnetic_density_name=magnetic_density_name,
time_name=time_name, has_depth=has_depth,
depth_name=depth_name, imu_filename=imu_file)
def __str__(self):
x = self.imu
objcls = ("IMU -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.imu_file)
imu_desc = "IMU: {}".format(x.__str__())
return objcls + src + imu_desc
def _allan_deviation(self, sensor, taus):
"""Compute Allan deviation for all axes of a given sensor
Currently uses the modified Allan deviation in package
`allantools`.
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
pandas.DataFrame
Allan deviation and error for each sensor axis. DataFrame
index is the averaging time `tau` for each estimate.
"""
sensor_obj = getattr(self, sensor)
sampling_rate = sensor_obj.attrs["sampling_rate"]
sensor_std = preprocessing.scale(sensor_obj, with_std=False)
allan_l = []
for axis in sensor_std.T:
taus, adevs, errs, ns = allan.mdev(axis, rate=sampling_rate,
data_type="freq",
taus=taus)
# taus is common to all sensor axes
adevs_df = pd.DataFrame(np.column_stack((adevs, errs)),
columns=["allan_dev", "error"],
index=taus)
allan_l.append(adevs_df)
keys = [sensor + "_" + i for i in list("xyz")]
devs = pd.concat(allan_l, axis=1, keys=keys)
return devs
def allan_coefs(self, sensor, taus):
"""Estimate Allan deviation coefficients for each error type
This procedure implements the autonomous regression method for
Allan variance described in [1]_.
Given averaging intervals ``taus`` and corresponding Allan
deviation ``adevs``, compute the Allan deviation coefficient for
each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
sensor : str
Attribute name of the sensor of interest
taus : float, str
Tau value, in seconds, for which to compute statistic. Can be
one of "octave" or "decade" for automatic generation of the
value.
Returns
-------
coefs_all : pandas.DataFrame
Allan deviation coefficient and corresponding averaging time
for each sensor axis and error type.
adevs : pandas.DataFrame
`MultiIndex` DataFrame with Allan deviation, corresponding
averaging time, and fitted ARMAV model estimates of the
coefficients for each sensor axis and error type.
Notes
-----
Currently uses a modified Allan deviation formula.
.. [1] Jurado, J, Schubert Kabban, CM, Raquet, J (2019). A
regression-based methodology to improve estimation of
inertial sensor errors using Allan variance data. Navigation
66:251-263.
"""
adevs_errs = self._allan_deviation(sensor, taus)
taus = adevs_errs.index.to_numpy()
adevs = adevs_errs.xs("allan_dev", level=1, axis=1).to_numpy()
coefs_l = []
fitted_l = []
for adevs_i in adevs.T:
coefs_i, adevs_fitted = allan_coefs(taus, adevs_i)
# Parse output for dataframe
coefs_l.append(pd.Series(coefs_i))
fitted_l.append(adevs_fitted)
keys = [sensor + "_" + i for i in list("xyz")]
coefs_all = pd.concat(coefs_l, keys=keys, axis=1)
fitted_all = pd.DataFrame(np.column_stack(fitted_l), columns=keys,
index=taus)
fitted_all.columns = (pd.MultiIndex
.from_tuples([(c, "fitted")
for c in fitted_all]))
adevs = (pd.concat([adevs_errs, fitted_all], axis=1)
.sort_index(axis=1))
return (coefs_all, adevs)
def compute_orientation(self, method="Madgwick", **kwargs):
"""Compute the orientation of IMU tri-axial signals
The method must be one of the following estimators implemented in
Python module :mod:`ahrs.filters`:
- ``AngularRate``: Attitude from angular rate
- ``AQUA``: Algebraic quaternion algorithm
- ``Complementary``: Complementary filter
- ``Davenport``: Davenport's q-method
- ``EKF``: Extended Kalman filter
- ``FAAM``: Fast accelerometer-magnetometer combination
- ``FLAE``: Fast linear attitude estimator
- ``Fourati``: Fourati's nonlinear attitude estimation
- ``FQA``: Factored quaternion algorithm
- ``Madgwick``: Madgwick orientation filter
- ``Mahony``: Mahony orientation filter
- ``OLEQ``: Optimal linear estimator quaternion
- ``QUEST``
- ``ROLEQ``: Recursive optimal linear estimator of quaternion
- ``SAAM``: Super-fast attitude from accelerometer and magnetometer
- ``Tilt``: Attitude from gravity
The estimated quaternions are stored in the ``quats`` attribute.
Parameters
----------
method : str, optional
Name of the filtering method to use.
**kwargs : optional keyword arguments
Arguments passed to filtering method.
"""
orienter_cls = getattr(filters, method)
orienter = orienter_cls(acc=self.acceleration,
gyr=self.angular_velocity,
mag=self.magnetic_density,
Dt=self.sampling_interval,
**kwargs)
self.quats = orienter.Q
def dead_reckon(self, g=constants.g, Wn=1.0, k=1.0):
"""Calculate position assuming orientation is already known
Integrate dynamic acceleration in the body frame to calculate
velocity and position. If the IMU instance has a depth signal, it
is used in the integration instead of acceleration in the vertical
dimension.
Parameters
----------
g : float, optional
Assume gravity (:math:`m / s^2`) is equal to this value.
Default to standard gravity.
Wn : float, optional
Cutoff frequency for second-order Butterworth lowpass filter.
k : float, optional
Scalar to apply to scale lowpass-filtered dynamic acceleration.
This scaling has the effect of making position estimates
realistic for dead-reckoning tracking purposes.
Returns
-------
vel, pos : numpy.ndarray
Velocity and position 2D arrays.
"""
# Acceleration, velocity, and position from q and the measured
# acceleration, get the \frac{d^2x}{dt^2}. Retrieved sampling
# frequency assumes common frequency
fs = self.acceleration.attrs["sampling_rate"]
# Shift quaternions to scalar last to match convention
quats = np.roll(self.quats, -1, axis=1)
g_v = rotate_vector(np.array([0, 0, g]), quats, inverse=True)
acc_sensor = self.acceleration - g_v
acc_space = rotate_vector(acc_sensor, quats, inverse=False)
# Low-pass Butterworth filter design
b, a = signal.butter(2, Wn, btype="lowpass", output="ba", fs=fs)
acc_space_f = signal.filtfilt(b, a, acc_space, axis=0)
# Position and Velocity through integration, assuming 0-velocity at t=0
vel = integrate.cumulative_trapezoid(acc_space_f / k, dx=1.0 / fs,
initial=0, axis=0)
# Use depth derivative (on FLU) for the vertical dimension
if self.has_depth:
pos_z = self.depth
zdiff = np.append([0], np.diff(pos_z))
vel[:, -1] = -zdiff
pos = np.nan * np.ones_like(acc_space)
pos[:, -1] = pos_z
pos[:, :2] = (integrate
.cumulative_trapezoid(vel[:, :2], dx=1.0 / fs,
axis=0, initial=0))
else:
pos = integrate.cumulative_trapezoid(vel, dx=1.0 / fs,
axis=0, initial=0)
return vel, pos
def _get_acceleration(self):
# Acceleration name is the first
return self.imu[self.imu_var_names[0]]
acceleration = property(_get_acceleration)
"""Return acceleration array
Returns
-------
xarray.DataArray
"""
def _get_angular_velocity(self):
# Angular velocity name is the second
return self.imu[self.imu_var_names[1]]
angular_velocity = property(_get_angular_velocity)
"""Return angular velocity array
Returns
-------
xarray.DataArray
"""
def _get_magnetic_density(self):
# Magnetic density name is the last one
return self.imu[self.imu_var_names[-1]]
magnetic_density = property(_get_magnetic_density)
"""Return magnetic_density array
Returns
-------
xarray.DataArray
"""
def _get_depth(self):
return getattr(self.imu, self.depth_name)
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_sampling_interval(self):
# Retrieve sampling rate from one DataArray
sampling_rate = self.acceleration.attrs["sampling_rate"]
sampling_rate_units = (self.acceleration
.attrs["sampling_rate_units"])
if sampling_rate_units.lower() == "hz":
itvl = 1.0 / sampling_rate
else:
itvl = sampling_rate
return itvl
sampling_interval = property(_get_sampling_interval)
"""Return sampling interval
Assuming all `DataArray`s have the same interval, the sampling interval
is retrieved from the acceleration `DataArray`.
Returns
-------
xarray.DataArray
Warnings
--------
The sampling rate is retrieved from the attribute named `sampling_rate`
in the NetCDF file, which is assumed to be in Hz units.
""" | 0.91055 | 0.622746 |
import logging
import re
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.signal as signal
import xarray as xr
from skdiveMove.tdrsource import _load_dataset
from .imu import (IMUBase,
_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME, _DEPTH_NAME)
_TRIAXIAL_VARS = [_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME]
_MONOAXIAL_VARS = [_DEPTH_NAME, "light_levels"]
_AXIS_NAMES = list("xyz")
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class IMUcalibrate(IMUBase):
r"""Calibration framework for IMU measurements
Measurements from most IMU sensors are influenced by temperature, among
other artifacts. The IMUcalibrate class implements the following
procedure to remove the effects of temperature from IMU signals:
- For each axis, fit a piecewise or simple linear regression of
measured (lowpass-filtered) data against temperature.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to standardize all
measurements at.
- The standardized measurement (:math:`x_\sigma`) at :math:`T_{\alpha}`
is calculated as:
.. math::
:label: 5
x_\sigma = x - (\hat{x} - \hat{x}_{\alpha})
where :math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`\hat{x}_{\alpha}` is the predicted
value at :math:`T_{\alpha}`.
The models fit to signals from a *motionless* (i.e. experimental) IMU
device in the first step can subsequently be used to remove or minimize
temperature effects from an IMU device measuring motions of interest,
provided the temperature is within the range observed in experiments.
In addition to attributes in :class:`IMUBase`, ``IMUcalibrate`` adds
the attributes listed below.
Attributes
----------
periods : list
List of slices with the beginning and ending timestamps defining
periods in ``x_calib`` where valid calibration data are available.
Periods are assumed to be ordered chronologically.
models_l : list
List of dictionaries as long as there are periods, with each
element corresponding to a sensor, in turn containing another
dictionary with each element corresponding to each sensor axis.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z`` was
pointing in the same direction as gravity in each period in
``periods``.
Examples
--------
Construct IMUcalibrate from NetCDF file with samples of IMU signals and
a list with begining and ending timestamps for experimental periods:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "cats_temperature_calib.nc")))
>>> pers = [slice("2021-09-20T09:00:00", "2021-09-21T10:33:00"),
... slice("2021-09-21T10:40:00", "2021-09-22T11:55:00"),
... slice("2021-09-22T12:14:00", "2021-09-23T11:19:00")]
>>> imucal = (imutools.IMUcalibrate
... .read_netcdf(icdf, periods=pers,
... axis_order=list("zxy"),
... time_name="timestamp_utc"))
>>> print(imucal) # doctest: +ELLIPSIS
IMU -- Class IMUcalibrate object
Source File None
IMU: <xarray.Dataset>
Dimensions: (timestamp_utc: 268081, axis: 3)
Coordinates:
* axis (axis) object 'x' 'y' 'z'
* timestamp_utc (timestamp_utc) datetime64[ns] ...
Data variables:
acceleration (timestamp_utc, axis) float64 ...
angular_velocity (timestamp_utc, axis) float64 ...
magnetic_density (timestamp_utc, axis) float64 ...
depth (timestamp_utc) float64 ...
temperature (timestamp_utc) float64 ...
Attributes:...
history: Resampled from 20 Hz to 1 Hz
Periods:
0:['2021-09-20T09:00:00', '2021-09-21T10:33:00']
1:['2021-09-21T10:40:00', '2021-09-22T11:55:00']
2:['2021-09-22T12:14:00', '2021-09-23T11:19:00']
Plot signals from a given period:
>>> fig, axs, axs_temp = imucal.plot_experiment(0, var="acceleration")
Build temperature models for a given variable and chosen
:math:`T_{\alpha}`, without low-pass filtering the input signals:
>>> fs = 1.0
>>> acc_cal = imucal.build_tmodels("acceleration", T_alpha=8,
... use_axis_order=True,
... win_len=int(2 * 60 * fs) - 1)
Plot model of IMU variable against temperature:
>>> fig, axs = imucal.plot_var_model("acceleration",
... use_axis_order=True)
Notes
-----
This class redefines :meth:`IMUBase.read_netcdf`.
"""
def __init__(self, x_calib, periods, axis_order=list("xyz"),
**kwargs):
"""Set up attributes required for calibration
Parameters
----------
x_calib : xarray.Dataset
Dataset with temperature and tri-axial data from *motionless*
IMU experiments. Data are assumed to be in FLU coordinate
frame.
periods : list
List of slices with the beginning and ending timestamps
defining periods in `x_calib` where valid calibration data are
available. Periods are assumed to be ordered chronologically.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z``
was pointing in the same direction as gravity in each period in
``periods``.
**kwargs : optional keyword arguments
Arguments passed to the `IMUBase.__init__` for instantiation.
"""
super(IMUcalibrate, self).__init__(x_calib, **kwargs)
self.periods = periods
models_l = []
for period in periods:
models_1d = {i: dict() for i in _MONOAXIAL_VARS}
models_2d = dict.fromkeys(_TRIAXIAL_VARS)
for k in models_2d:
models_2d[k] = dict.fromkeys(axis_order)
models_l.append(dict(**models_1d, **models_2d))
self.models_l = models_l
self.axis_order = axis_order
# Private attribute collecting DataArrays with standardized data
self._stdda_l = []
@classmethod
def read_netcdf(cls, imu_nc, load_dataset_kwargs=dict(), **kwargs):
"""Create IMUcalibrate from NetCDF file and list of slices
This method redefines :meth:`IMUBase.read_netcdf`.
Parameters
----------
imu_nc : str
Path to NetCDF file.
load_dataset_kwargs : dict, optional
Dictionary of optional keyword arguments passed to
:func:`xarray.load_dataset`.
**kwargs : optional keyword arguments
Additional arguments passed to :meth:`IMUcalibrate.__init__`
method, except ``has_depth`` or ``imu_filename``. The input
``Dataset`` is assumed to have a depth ``DataArray``.
Returns
-------
out :
"""
imu = _load_dataset(imu_nc, **load_dataset_kwargs)
ocls = cls(imu, **kwargs)
return ocls
def __str__(self):
super_str = super(IMUcalibrate, self).__str__()
pers_ends = []
for per in self.periods:
pers_ends.append([per.start, per.stop])
msg = ("\n".join("{}:{}".format(i, per)
for i, per in enumerate(pers_ends)))
return super_str + "\nPeriods:\n{}".format(msg)
def savgol_filter(self, var, period_idx, win_len, polyorder=1):
"""Apply Savitzky-Golay filter on tri-axial IMU signals
Parameters
----------
var : str
Name of the variable in ``x`` with tri-axial signals.
period_idx : int
Index of period to plot (zero-based).
win_len : int
Window length for the low pass filter.
polyorder : int, optional
Polynomial order to use.
Returns
-------
xarray.DataArray
Array with filtered signals, with the same coordinates,
dimensions, and updated attributes.
"""
darray = self.subset_imu(period_idx)[var]
var_df = darray.to_dataframe().unstack()
var_sg = signal.savgol_filter(var_df, window_length=win_len,
polyorder=polyorder, axis=0)
new_history = (("{}: Savitzky-Golay filter: win_len={}, "
"polyorder={}\n")
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), win_len, polyorder))
darray_new = xr.DataArray(var_sg, coords=darray.coords,
dims=darray.dims, name=darray.name,
attrs=darray.attrs)
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def build_tmodels(self, var, T_alpha=None, T_brk=None,
use_axis_order=False, filter_sig=True, **kwargs):
r"""Build temperature models for experimental tri-axial IMU sensor signals
Perform thermal compensation on *motionless* tri-axial IMU sensor
data. A simple approach is used for the compensation:
- For each axis, build a piecewise or simple linear regression of
measured data against temperature. If a breakpoint is known,
as per manufacturer specifications or experimentation, use
piecewise regression.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to
standardize all measurements at.
- The standardized measurement at :math:`T_{\alpha}` is
calculated as :math:`x - (\hat{x} - x_{T_{\alpha}})`, where
:math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`x_{T_{\alpha}}` is the
predicted value at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable in `x` with tri-axial data.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Defaults to the mean temperature for each period,
rounded to the nearest integer.
T_brk : float, optional
Temperature change point separating data to be fit differently.
A piecewise regression model is fit. Default is a simple
linear model is fit.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
filter_sig : bool, optional
Whether to filter in the measured signal for thermal
correction. Default is to apply a Savitzky-Golay filter to the
signal for characterizing the temperature relationship, and to
calculate the standardized signal.
**kwargs : optional keyword arguments
Arguments passed to `savgol_filter` (e.g. ``win_len`` and
``polyorder``).
Returns
-------
list
List of tuples as long as there are periods, with tuple elements:
- Dictionary with regression model objects for each sensor
axis.
- DataFrame with hierarchical column index with sensor axis
label at the first level. The following columns are in the
second level:
- temperature
- var_name
- var_name_pred
- var_name_temp_refC
- var_name_adj
Notes
-----
A new DataArray with signal standardized at :math:`T_{\alpha}` is
added to the instance Dataset. These signals correspond to the
lowpass-filtered form of the input used to build the models.
See Also
--------
apply_model
"""
# Iterate through periods
per_l = [] # output list as long as periods
for idx in range(len(self.periods)):
per = self.subset_imu(idx)
# Subset the requested variable, smoothing if necessary
if filter_sig:
per_var = self.savgol_filter(var, idx, **kwargs)
else:
per_var = per[var]
per_temp = per["temperature"]
var_df = xr.merge([per_var, per_temp]).to_dataframe()
if T_alpha is None:
t_alpha = np.rint(per_temp.mean().to_numpy().item())
logger.info("Period {} T_alpha set to {:.2f}"
.format(idx, t_alpha))
else:
t_alpha = T_alpha
odata_l = []
models_d = self.models_l[idx]
if use_axis_order:
axis_names = [self.axis_order[idx]]
elif len(per_var.dims) > 1:
axis_names = per_var[per_var.dims[-1]].to_numpy()
else:
axis_names = [per_var.dims[0]]
std_colname = "{}_std".format(var)
pred_colname = "{}_pred".format(var)
for i, axis in enumerate(axis_names): # do all axes
if isinstance(var_df.index, pd.MultiIndex):
data_axis = var_df.xs(axis, level="axis").copy()
else:
data_axis = var_df.copy()
if T_brk is not None:
temp0 = (data_axis["temperature"]
.where(data_axis["temperature"] < T_brk, 0))
data_axis.loc[:, "temp0"] = temp0
temp1 = (data_axis["temperature"]
.where(data_axis["temperature"] > T_brk, 0))
data_axis.loc[:, "temp1"] = temp1
fmla = "{} ~ temperature + temp0 + temp1".format(var)
else:
fmla = "{} ~ temperature".format(var)
model_fit = smf.ols(formula=fmla, data=data_axis).fit()
models_d[var][axis] = model_fit
data_axis.loc[:, pred_colname] = model_fit.fittedvalues
# Data at reference temperature
ref_colname = "{}_{}C".format(var, t_alpha)
if T_brk is not None:
if t_alpha < T_brk:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=t_alpha, temp1=0)).to_numpy().item()
data_axis[ref_colname] = pred
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=0, temp1=t_alpha)).to_numpy().item()
data_axis[ref_colname] = pred
data_axis.drop(["temp0", "temp1"], axis=1, inplace=True)
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha)).to_numpy().item()
data_axis.loc[:, ref_colname] = pred
logger.info("Predicted {} ({}, rounded) at {:.2f}: {:.3f}"
.format(var, axis, t_alpha, pred))
data_axis[std_colname] = (data_axis[var] -
(data_axis[pred_colname] -
data_axis[ref_colname]))
odata_l.append(data_axis)
# Update instance models_l attribute
self.models_l[idx][var][axis] = model_fit
if var in _MONOAXIAL_VARS:
odata = pd.concat(odata_l)
std_data = xr.DataArray(odata.loc[:, std_colname],
name=std_colname)
else:
odata = pd.concat(odata_l, axis=1,
keys=axis_names[:i + 1],
names=["axis", "variable"])
std_data = xr.DataArray(odata.xs(std_colname,
axis=1, level=1),
name=std_colname)
per_l.append((models_d, odata))
std_data.attrs = per_var.attrs
new_description = ("{} standardized at {}C"
.format(std_data.attrs["description"],
t_alpha))
std_data.attrs["description"] = new_description
new_history = ("{}: temperature_model: temperature models\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d")))
std_data.attrs["history"] = (std_data.attrs["history"] +
new_history)
# Update instance _std_da_l attribute with DataArray having an
# additional dimension for the period index
std_data = std_data.expand_dims(period=[idx])
self._stdda_l.append(std_data)
return per_l
def plot_experiment(self, period_idx, var, units_label=None, **kwargs):
"""Plot experimental IMU
Parameters
----------
period_idx : int
Index of period to plot (zero-based).
var : str
Name of the variable in with tri-axial data.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_var_model
plot_standardized
"""
per_da = self.subset_imu(period_idx)
per_var = per_da[var]
per_temp = per_da["temperature"]
def _plot(var, temp, ax):
"""Plot variable and temperature"""
ax_temp = ax.twinx()
var.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5)
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(var.quantile(1e-5).to_numpy().item(),
var.quantile(1 - 1e-5).to_numpy().item())
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
if units_label is None:
units_label = per_var.attrs["units_label"]
ylabel_pre = "{} [{}]".format(per_var.attrs["full_name"],
units_label)
temp_label = "{} [{}]".format(per_temp.attrs["full_name"],
per_temp.attrs["units_label"])
ndims = len(per_var.dims)
axs_temp = []
if ndims == 1:
fig, axs = plt.subplots(**kwargs)
ax_temp = _plot(per_var, per_temp, axs)
axs.set_xlabel("")
axs.set_title("")
axs.set_ylabel(ylabel_pre)
ax_temp.set_ylabel(temp_label)
axs_temp.append(ax_temp)
else:
fig, axs = plt.subplots(3, 1, sharex=True, **kwargs)
ax_x, ax_y, ax_z = axs
axis_names = per_var[per_var.dims[-1]].to_numpy()
for i, axis in enumerate(axis_names):
ymeasured = per_var.sel(axis=axis)
ax_temp = _plot(ymeasured, per_temp, axs[i])
axs[i].set_title("")
axs[i].set_xlabel("")
axs[i].set_ylabel("{} {}".format(ylabel_pre, axis))
if i == 1:
ax_temp.set_ylabel(temp_label)
else:
ax_temp.set_ylabel("")
axs_temp.append(ax_temp)
ax_z.set_xlabel("")
return fig, axs, axs_temp
def plot_var_model(self, var, use_axis_order=True, units_label=None,
axs=None, **kwargs):
"""Plot IMU variable against temperature and fitted model
A multi-panel plot of the selected variable against temperature
from all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction. Ignored for uniaxial
variables.
units_label : str
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
See Also
--------
plot_experiment
plot_standardized
"""
def _plot_signal(x, y, idx, model_fit, ax):
ax.plot(x, y, ".", markersize=2, alpha=0.03,
label="Period {}".format(idx))
# Adjust ylim to exclude outliers
ax.set_ylim(np.quantile(y, 1e-3), np.quantile(y, 1 - 1e-3))
# Linear model
xpred = np.linspace(x.min(), x.max())
ypreds = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=xpred))
.summary_frame())
ypred_0 = ypreds["mean"]
ypred_l = ypreds["obs_ci_lower"]
ypred_u = ypreds["obs_ci_upper"]
ax.plot(xpred, ypred_0, color="k", alpha=0.5)
ax.plot(xpred, ypred_l, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
ax.plot(xpred, ypred_u, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
xlabel = "{} [{}]".format(per0["temperature"].attrs["full_name"],
per0["temperature"].attrs["units_label"])
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_temp = peri["temperature"]
xdata = per_temp.to_numpy()
ydata = per_var.to_numpy()
# Linear model
model_fit = self.get_model(var, period=per_i,
axis=per_var.dims[0])
ax_i = axs[per_i]
_plot_signal(x=xdata, y=ydata, idx=per_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_xlabel(xlabel)
axs[0].set_ylabel(ylabel_pre)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, **kwargs)
axs[-1].set_xlabel(xlabel)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
xdata = peri["temperature"].to_numpy()
ydata = peri[var].sel(axis=axis).to_numpy()
# Linear model
model_fit = self.get_model(var, period=idx, axis=axis)
ax_i = axs[i]
_plot_signal(xdata, y=ydata, idx=idx,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
ax_i.legend(loc=9, bbox_to_anchor=(0.5, 1),
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
for vert_i in range(nperiods):
peri = self.subset_imu(vert_i)
xdata = peri["temperature"].to_numpy()
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
ydata = (peri[var].sel(axis=axis).to_numpy())
# Linear model
model_fit = self.get_model(var, period=vert_i,
axis=axis)
ax_i = axs_xyz[i]
_plot_signal(xdata, y=ydata, idx=vert_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_xyz[0].set_title("Period {}".format(vert_i))
axs_xyz[-1].set_xlabel(xlabel)
return fig, axs
def plot_standardized(self, var, use_axis_order=True, units_label=None,
ref_val=None, axs=None, **kwargs):
r"""Plot IMU measured and standardized variable along with temperature
A multi-panel time series plot of the selected variable, measured
and standardized, for all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration). If provided, a horizontal line is included in
the plot for reference.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_experiment
plot_var_model
"""
def _plot_signal(ymeasured, ystd, temp, ax, neg_ref=False):
ax_temp = ax.twinx()
(ymeasured.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5))
(ystd.plot.line(ax=ax, label="standardized", color="b",
linewidth=0.5, alpha=0.5))
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
txt_desc = ystd.attrs["description"]
t_alpha_match = re.search(r'[-+]?\d+\.\d+', txt_desc)
ax_temp.axhline(float(txt_desc[t_alpha_match.start():
t_alpha_match.end()]),
linestyle="dashed", linewidth=1,
color="r", label=r"$T_\alpha$")
q0 = ymeasured.quantile(1e-5).to_numpy().item()
q1 = ymeasured.quantile(1 - 11e-5).to_numpy().item()
if ref_val is not None:
# Assumption of FLU with axes pointing against field
if neg_ref:
ref_i = -ref_val
else:
ref_i = ref_val
ax.axhline(ref_i, linestyle="dashdot", color="m",
linewidth=1, label="reference")
ylim0 = np.minimum(q0, ref_i)
ylim1 = np.maximum(q1, ref_i)
else:
ylim0 = q0
ylim1 = q1
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(ylim0, ylim1)
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
var_std = var + "_std"
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
std_ds = xr.merge(self._stdda_l)
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_std = std_ds.loc[dict(period=per_i)][var_std]
per_temp = peri["temperature"]
ax_i = axs[per_i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
ax_i.set_ylabel(ylabel_pre)
axs_temp[per_i] = ax_temp
# legend at center top panel
axs[1].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[1].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, sharex=False, **kwargs)
axs_temp = np.empty_like(axs)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=idx)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs[i]
if axis == "x":
neg_ref = True
else:
neg_ref = False
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i,
neg_ref=neg_ref)
ax_i.set_xlabel("Period {}".format(idx))
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_temp[i] = ax_temp
# legend at top panel
axs[0].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[i].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for vert_i in range(nperiods):
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
peri = self.subset_imu(vert_i)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=vert_i)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs_xyz[i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
axs_temp[i, vert_i] = ax_temp
if vert_i == 0:
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
else:
ax_i.set_ylabel("")
axs_xyz[0].set_title("Period {}".format(vert_i))
# legend at bottom panel
leg0 = axs[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.23),
ncol=3, frameon=False, borderaxespad=0)
# Temperature legend at bottom panel
leg1 = axs_temp[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.37),
ncol=2, frameon=False,
borderaxespad=0)
axs[-1, 1].add_artist(leg0)
axs_temp[-1, 1].add_artist(leg1)
return fig, axs, axs_temp
def get_model(self, var, period, axis=None):
"""Retrieve linear model for a given IMU sensor axis signal
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period containing calibration model to use.
axis : str, optional
Name of the sensor axis the signal comes from, if `var` is
tri-axial; ignored otherwise.
Returns
-------
RegressionResultsWrapper
"""
if var in _MONOAXIAL_VARS:
model_d = self.models_l[period][var]
model_fit = [*model_d.values()][0]
else:
model_fit = self.models_l[period][var][axis]
return model_fit
def get_offset(self, var, period, T_alpha, ref_val, axis=None):
"""Calculate signal ofset at given temperature from calibration model
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period (zero-based) containing calibration model to use.
T_alpha : float
Temperature at which to compute offset.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration).
axis : str, optional
Name of the sensor axis the signal comes from, if ``var`` is
tri-axial; ignored otherwise.
Returns
-------
float
Notes
-----
For obtaining offset and gain of magnetometer signals, the
ellipsoid method from the the ``ellipsoid`` module yields far more
accurate results, as it allows for the simultaneous
three-dimensional estimation of the offset.
"""
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=period)
else:
model_fit = self.get_model(var, period=period, axis=axis)
ypred = (model_fit.predict(exog=dict(temperature=T_alpha))
.to_numpy().item())
logger.info("Predicted {} ({}, rounded) at T_alpha: {:.3f}"
.format(var, axis, ypred))
offset = ypred - ref_val
return offset
def apply_model(self, var, dataset, T_alpha=None, ref_vals=None,
use_axis_order=True, model_idx=None):
"""Apply fitted temperature compensation model to Dataset
The selected models for tri-axial sensor data are applied to input
Dataset, standardizing signals at :math:`T_{\alpha}`, optionally
subtracting the offset at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable with tri-axial data.
dataset : xarray.Dataset
Dataset with temperature and tri-axial data from motionless IMU.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Default is the mean temperature in the input
dataset.
ref_vals : list, optional
Sequence of three floats with target values to compare against
the signal from each sensor axis. If provided, the offset of
each signal at :math:`T_{\alpha}` is computed and subtracted from
the temperature-standardized signal. The order should be the
same as in the `axis_order` attribute if `use_axis_order` is
True, or ``x``, ``y``, ``z`` otherwise.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, retrieve
model to apply using instance's ``axis_order`` attribute.
Otherwise, use the models defined by ``model_idx`` argument.
Ignored if `var` is monoaxial.
model_idx : list or int, optional
Sequence of three integers identifying the period (zero-based)
from which to retrieve the models for ``x``, ``y``, and ``z``
sensor axes, in that order. If ``var`` is monoaxial, an integer
specifying the period for the model to use. Ignored if
``use_axis_order`` is True.
Returns
-------
xarray.Dataset
"""
temp_obs = dataset["temperature"]
darray = dataset[var]
if T_alpha is None:
T_alpha = temp_obs.mean().item()
logger.info("T_alpha set to {:.2f}".format(T_alpha))
def _standardize_array(darray, model_fit, period_idx, axis=None):
x_hat = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=temp_obs))
.predicted_mean)
x_alpha = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=T_alpha))
.predicted_mean)
x_sigma = darray - (x_hat - x_alpha)
if ref_vals is not None:
off = self.get_offset(var, axis=axis, period=period_idx,
T_alpha=T_alpha,
ref_val=ref_vals[period_idx])
x_sigma -= off
return x_sigma
darray_l = []
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=model_idx)
x_sigma = _standardize_array(darray, model_fit=model_fit,
period_idx=model_idx)
darray_l.append(x_sigma)
elif use_axis_order:
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
model_fit = self.get_model(var, period=idx, axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=idx, axis=axis)
darray_l.append(x_sigma)
else:
for i, axis in enumerate(_AXIS_NAMES):
model_fit = self.get_model(var, period=model_idx[i],
axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=model_idx[i],
axis=axis)
darray_l.append(x_sigma)
if len(darray_l) > 1:
darray_new = xr.concat(darray_l, dim="axis").transpose()
else:
darray_new = darray_l[0]
darray_new.attrs = darray.attrs
new_history = ("{}: Applied temperature model at: T={}\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), T_alpha))
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def subset_imu(self, period_idx):
"""Subset IMU dataset given a period index
The dataset is subset using the slice corresponding to the period
index.
Parameters
----------
period_idx : int
Index of the experiment period to subset.
Returns
-------
xarray.Dataset
"""
time_name = self.time_name
return self.imu.loc[{time_name: self.periods[period_idx]}] | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/imucalibrate.py | imucalibrate.py | import logging
import re
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.signal as signal
import xarray as xr
from skdiveMove.tdrsource import _load_dataset
from .imu import (IMUBase,
_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME, _DEPTH_NAME)
_TRIAXIAL_VARS = [_ACCEL_NAME, _OMEGA_NAME, _MAGNT_NAME]
_MONOAXIAL_VARS = [_DEPTH_NAME, "light_levels"]
_AXIS_NAMES = list("xyz")
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class IMUcalibrate(IMUBase):
r"""Calibration framework for IMU measurements
Measurements from most IMU sensors are influenced by temperature, among
other artifacts. The IMUcalibrate class implements the following
procedure to remove the effects of temperature from IMU signals:
- For each axis, fit a piecewise or simple linear regression of
measured (lowpass-filtered) data against temperature.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to standardize all
measurements at.
- The standardized measurement (:math:`x_\sigma`) at :math:`T_{\alpha}`
is calculated as:
.. math::
:label: 5
x_\sigma = x - (\hat{x} - \hat{x}_{\alpha})
where :math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`\hat{x}_{\alpha}` is the predicted
value at :math:`T_{\alpha}`.
The models fit to signals from a *motionless* (i.e. experimental) IMU
device in the first step can subsequently be used to remove or minimize
temperature effects from an IMU device measuring motions of interest,
provided the temperature is within the range observed in experiments.
In addition to attributes in :class:`IMUBase`, ``IMUcalibrate`` adds
the attributes listed below.
Attributes
----------
periods : list
List of slices with the beginning and ending timestamps defining
periods in ``x_calib`` where valid calibration data are available.
Periods are assumed to be ordered chronologically.
models_l : list
List of dictionaries as long as there are periods, with each
element corresponding to a sensor, in turn containing another
dictionary with each element corresponding to each sensor axis.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z`` was
pointing in the same direction as gravity in each period in
``periods``.
Examples
--------
Construct IMUcalibrate from NetCDF file with samples of IMU signals and
a list with begining and ending timestamps for experimental periods:
>>> import pkg_resources as pkg_rsrc
>>> import os.path as osp
>>> import skdiveMove.imutools as imutools
>>> icdf = (pkg_rsrc
... .resource_filename("skdiveMove",
... osp.join("tests", "data",
... "cats_temperature_calib.nc")))
>>> pers = [slice("2021-09-20T09:00:00", "2021-09-21T10:33:00"),
... slice("2021-09-21T10:40:00", "2021-09-22T11:55:00"),
... slice("2021-09-22T12:14:00", "2021-09-23T11:19:00")]
>>> imucal = (imutools.IMUcalibrate
... .read_netcdf(icdf, periods=pers,
... axis_order=list("zxy"),
... time_name="timestamp_utc"))
>>> print(imucal) # doctest: +ELLIPSIS
IMU -- Class IMUcalibrate object
Source File None
IMU: <xarray.Dataset>
Dimensions: (timestamp_utc: 268081, axis: 3)
Coordinates:
* axis (axis) object 'x' 'y' 'z'
* timestamp_utc (timestamp_utc) datetime64[ns] ...
Data variables:
acceleration (timestamp_utc, axis) float64 ...
angular_velocity (timestamp_utc, axis) float64 ...
magnetic_density (timestamp_utc, axis) float64 ...
depth (timestamp_utc) float64 ...
temperature (timestamp_utc) float64 ...
Attributes:...
history: Resampled from 20 Hz to 1 Hz
Periods:
0:['2021-09-20T09:00:00', '2021-09-21T10:33:00']
1:['2021-09-21T10:40:00', '2021-09-22T11:55:00']
2:['2021-09-22T12:14:00', '2021-09-23T11:19:00']
Plot signals from a given period:
>>> fig, axs, axs_temp = imucal.plot_experiment(0, var="acceleration")
Build temperature models for a given variable and chosen
:math:`T_{\alpha}`, without low-pass filtering the input signals:
>>> fs = 1.0
>>> acc_cal = imucal.build_tmodels("acceleration", T_alpha=8,
... use_axis_order=True,
... win_len=int(2 * 60 * fs) - 1)
Plot model of IMU variable against temperature:
>>> fig, axs = imucal.plot_var_model("acceleration",
... use_axis_order=True)
Notes
-----
This class redefines :meth:`IMUBase.read_netcdf`.
"""
def __init__(self, x_calib, periods, axis_order=list("xyz"),
**kwargs):
"""Set up attributes required for calibration
Parameters
----------
x_calib : xarray.Dataset
Dataset with temperature and tri-axial data from *motionless*
IMU experiments. Data are assumed to be in FLU coordinate
frame.
periods : list
List of slices with the beginning and ending timestamps
defining periods in `x_calib` where valid calibration data are
available. Periods are assumed to be ordered chronologically.
axis_order : list
List of characters specifying which axis ``x``, ``y``, or ``z``
was pointing in the same direction as gravity in each period in
``periods``.
**kwargs : optional keyword arguments
Arguments passed to the `IMUBase.__init__` for instantiation.
"""
super(IMUcalibrate, self).__init__(x_calib, **kwargs)
self.periods = periods
models_l = []
for period in periods:
models_1d = {i: dict() for i in _MONOAXIAL_VARS}
models_2d = dict.fromkeys(_TRIAXIAL_VARS)
for k in models_2d:
models_2d[k] = dict.fromkeys(axis_order)
models_l.append(dict(**models_1d, **models_2d))
self.models_l = models_l
self.axis_order = axis_order
# Private attribute collecting DataArrays with standardized data
self._stdda_l = []
@classmethod
def read_netcdf(cls, imu_nc, load_dataset_kwargs=dict(), **kwargs):
"""Create IMUcalibrate from NetCDF file and list of slices
This method redefines :meth:`IMUBase.read_netcdf`.
Parameters
----------
imu_nc : str
Path to NetCDF file.
load_dataset_kwargs : dict, optional
Dictionary of optional keyword arguments passed to
:func:`xarray.load_dataset`.
**kwargs : optional keyword arguments
Additional arguments passed to :meth:`IMUcalibrate.__init__`
method, except ``has_depth`` or ``imu_filename``. The input
``Dataset`` is assumed to have a depth ``DataArray``.
Returns
-------
out :
"""
imu = _load_dataset(imu_nc, **load_dataset_kwargs)
ocls = cls(imu, **kwargs)
return ocls
def __str__(self):
super_str = super(IMUcalibrate, self).__str__()
pers_ends = []
for per in self.periods:
pers_ends.append([per.start, per.stop])
msg = ("\n".join("{}:{}".format(i, per)
for i, per in enumerate(pers_ends)))
return super_str + "\nPeriods:\n{}".format(msg)
def savgol_filter(self, var, period_idx, win_len, polyorder=1):
"""Apply Savitzky-Golay filter on tri-axial IMU signals
Parameters
----------
var : str
Name of the variable in ``x`` with tri-axial signals.
period_idx : int
Index of period to plot (zero-based).
win_len : int
Window length for the low pass filter.
polyorder : int, optional
Polynomial order to use.
Returns
-------
xarray.DataArray
Array with filtered signals, with the same coordinates,
dimensions, and updated attributes.
"""
darray = self.subset_imu(period_idx)[var]
var_df = darray.to_dataframe().unstack()
var_sg = signal.savgol_filter(var_df, window_length=win_len,
polyorder=polyorder, axis=0)
new_history = (("{}: Savitzky-Golay filter: win_len={}, "
"polyorder={}\n")
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), win_len, polyorder))
darray_new = xr.DataArray(var_sg, coords=darray.coords,
dims=darray.dims, name=darray.name,
attrs=darray.attrs)
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def build_tmodels(self, var, T_alpha=None, T_brk=None,
use_axis_order=False, filter_sig=True, **kwargs):
r"""Build temperature models for experimental tri-axial IMU sensor signals
Perform thermal compensation on *motionless* tri-axial IMU sensor
data. A simple approach is used for the compensation:
- For each axis, build a piecewise or simple linear regression of
measured data against temperature. If a breakpoint is known,
as per manufacturer specifications or experimentation, use
piecewise regression.
- Compute predicted signal from the model.
- Select a reference temperature :math:`T_{\alpha}` to
standardize all measurements at.
- The standardized measurement at :math:`T_{\alpha}` is
calculated as :math:`x - (\hat{x} - x_{T_{\alpha}})`, where
:math:`\hat{x}` is the value predicted from the model at the
measured temperature, and :math:`x_{T_{\alpha}}` is the
predicted value at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable in `x` with tri-axial data.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Defaults to the mean temperature for each period,
rounded to the nearest integer.
T_brk : float, optional
Temperature change point separating data to be fit differently.
A piecewise regression model is fit. Default is a simple
linear model is fit.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
filter_sig : bool, optional
Whether to filter in the measured signal for thermal
correction. Default is to apply a Savitzky-Golay filter to the
signal for characterizing the temperature relationship, and to
calculate the standardized signal.
**kwargs : optional keyword arguments
Arguments passed to `savgol_filter` (e.g. ``win_len`` and
``polyorder``).
Returns
-------
list
List of tuples as long as there are periods, with tuple elements:
- Dictionary with regression model objects for each sensor
axis.
- DataFrame with hierarchical column index with sensor axis
label at the first level. The following columns are in the
second level:
- temperature
- var_name
- var_name_pred
- var_name_temp_refC
- var_name_adj
Notes
-----
A new DataArray with signal standardized at :math:`T_{\alpha}` is
added to the instance Dataset. These signals correspond to the
lowpass-filtered form of the input used to build the models.
See Also
--------
apply_model
"""
# Iterate through periods
per_l = [] # output list as long as periods
for idx in range(len(self.periods)):
per = self.subset_imu(idx)
# Subset the requested variable, smoothing if necessary
if filter_sig:
per_var = self.savgol_filter(var, idx, **kwargs)
else:
per_var = per[var]
per_temp = per["temperature"]
var_df = xr.merge([per_var, per_temp]).to_dataframe()
if T_alpha is None:
t_alpha = np.rint(per_temp.mean().to_numpy().item())
logger.info("Period {} T_alpha set to {:.2f}"
.format(idx, t_alpha))
else:
t_alpha = T_alpha
odata_l = []
models_d = self.models_l[idx]
if use_axis_order:
axis_names = [self.axis_order[idx]]
elif len(per_var.dims) > 1:
axis_names = per_var[per_var.dims[-1]].to_numpy()
else:
axis_names = [per_var.dims[0]]
std_colname = "{}_std".format(var)
pred_colname = "{}_pred".format(var)
for i, axis in enumerate(axis_names): # do all axes
if isinstance(var_df.index, pd.MultiIndex):
data_axis = var_df.xs(axis, level="axis").copy()
else:
data_axis = var_df.copy()
if T_brk is not None:
temp0 = (data_axis["temperature"]
.where(data_axis["temperature"] < T_brk, 0))
data_axis.loc[:, "temp0"] = temp0
temp1 = (data_axis["temperature"]
.where(data_axis["temperature"] > T_brk, 0))
data_axis.loc[:, "temp1"] = temp1
fmla = "{} ~ temperature + temp0 + temp1".format(var)
else:
fmla = "{} ~ temperature".format(var)
model_fit = smf.ols(formula=fmla, data=data_axis).fit()
models_d[var][axis] = model_fit
data_axis.loc[:, pred_colname] = model_fit.fittedvalues
# Data at reference temperature
ref_colname = "{}_{}C".format(var, t_alpha)
if T_brk is not None:
if t_alpha < T_brk:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=t_alpha, temp1=0)).to_numpy().item()
data_axis[ref_colname] = pred
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha,
temp0=0, temp1=t_alpha)).to_numpy().item()
data_axis[ref_colname] = pred
data_axis.drop(["temp0", "temp1"], axis=1, inplace=True)
else:
pred = model_fit.predict(exog=dict(
temperature=t_alpha)).to_numpy().item()
data_axis.loc[:, ref_colname] = pred
logger.info("Predicted {} ({}, rounded) at {:.2f}: {:.3f}"
.format(var, axis, t_alpha, pred))
data_axis[std_colname] = (data_axis[var] -
(data_axis[pred_colname] -
data_axis[ref_colname]))
odata_l.append(data_axis)
# Update instance models_l attribute
self.models_l[idx][var][axis] = model_fit
if var in _MONOAXIAL_VARS:
odata = pd.concat(odata_l)
std_data = xr.DataArray(odata.loc[:, std_colname],
name=std_colname)
else:
odata = pd.concat(odata_l, axis=1,
keys=axis_names[:i + 1],
names=["axis", "variable"])
std_data = xr.DataArray(odata.xs(std_colname,
axis=1, level=1),
name=std_colname)
per_l.append((models_d, odata))
std_data.attrs = per_var.attrs
new_description = ("{} standardized at {}C"
.format(std_data.attrs["description"],
t_alpha))
std_data.attrs["description"] = new_description
new_history = ("{}: temperature_model: temperature models\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d")))
std_data.attrs["history"] = (std_data.attrs["history"] +
new_history)
# Update instance _std_da_l attribute with DataArray having an
# additional dimension for the period index
std_data = std_data.expand_dims(period=[idx])
self._stdda_l.append(std_data)
return per_l
def plot_experiment(self, period_idx, var, units_label=None, **kwargs):
"""Plot experimental IMU
Parameters
----------
period_idx : int
Index of period to plot (zero-based).
var : str
Name of the variable in with tri-axial data.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_var_model
plot_standardized
"""
per_da = self.subset_imu(period_idx)
per_var = per_da[var]
per_temp = per_da["temperature"]
def _plot(var, temp, ax):
"""Plot variable and temperature"""
ax_temp = ax.twinx()
var.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5)
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(var.quantile(1e-5).to_numpy().item(),
var.quantile(1 - 1e-5).to_numpy().item())
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
if units_label is None:
units_label = per_var.attrs["units_label"]
ylabel_pre = "{} [{}]".format(per_var.attrs["full_name"],
units_label)
temp_label = "{} [{}]".format(per_temp.attrs["full_name"],
per_temp.attrs["units_label"])
ndims = len(per_var.dims)
axs_temp = []
if ndims == 1:
fig, axs = plt.subplots(**kwargs)
ax_temp = _plot(per_var, per_temp, axs)
axs.set_xlabel("")
axs.set_title("")
axs.set_ylabel(ylabel_pre)
ax_temp.set_ylabel(temp_label)
axs_temp.append(ax_temp)
else:
fig, axs = plt.subplots(3, 1, sharex=True, **kwargs)
ax_x, ax_y, ax_z = axs
axis_names = per_var[per_var.dims[-1]].to_numpy()
for i, axis in enumerate(axis_names):
ymeasured = per_var.sel(axis=axis)
ax_temp = _plot(ymeasured, per_temp, axs[i])
axs[i].set_title("")
axs[i].set_xlabel("")
axs[i].set_ylabel("{} {}".format(ylabel_pre, axis))
if i == 1:
ax_temp.set_ylabel(temp_label)
else:
ax_temp.set_ylabel("")
axs_temp.append(ax_temp)
ax_z.set_xlabel("")
return fig, axs, axs_temp
def plot_var_model(self, var, use_axis_order=True, units_label=None,
axs=None, **kwargs):
"""Plot IMU variable against temperature and fitted model
A multi-panel plot of the selected variable against temperature
from all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction. Ignored for uniaxial
variables.
units_label : str
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
See Also
--------
plot_experiment
plot_standardized
"""
def _plot_signal(x, y, idx, model_fit, ax):
ax.plot(x, y, ".", markersize=2, alpha=0.03,
label="Period {}".format(idx))
# Adjust ylim to exclude outliers
ax.set_ylim(np.quantile(y, 1e-3), np.quantile(y, 1 - 1e-3))
# Linear model
xpred = np.linspace(x.min(), x.max())
ypreds = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=xpred))
.summary_frame())
ypred_0 = ypreds["mean"]
ypred_l = ypreds["obs_ci_lower"]
ypred_u = ypreds["obs_ci_upper"]
ax.plot(xpred, ypred_0, color="k", alpha=0.5)
ax.plot(xpred, ypred_l, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
ax.plot(xpred, ypred_u, color="k", linestyle="dashed",
linewidth=1, alpha=0.5)
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
xlabel = "{} [{}]".format(per0["temperature"].attrs["full_name"],
per0["temperature"].attrs["units_label"])
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_temp = peri["temperature"]
xdata = per_temp.to_numpy()
ydata = per_var.to_numpy()
# Linear model
model_fit = self.get_model(var, period=per_i,
axis=per_var.dims[0])
ax_i = axs[per_i]
_plot_signal(x=xdata, y=ydata, idx=per_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_xlabel(xlabel)
axs[0].set_ylabel(ylabel_pre)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, **kwargs)
axs[-1].set_xlabel(xlabel)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
xdata = peri["temperature"].to_numpy()
ydata = peri[var].sel(axis=axis).to_numpy()
# Linear model
model_fit = self.get_model(var, period=idx, axis=axis)
ax_i = axs[i]
_plot_signal(xdata, y=ydata, idx=idx,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
ax_i.legend(loc=9, bbox_to_anchor=(0.5, 1),
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
for vert_i in range(nperiods):
peri = self.subset_imu(vert_i)
xdata = peri["temperature"].to_numpy()
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
ydata = (peri[var].sel(axis=axis).to_numpy())
# Linear model
model_fit = self.get_model(var, period=vert_i,
axis=axis)
ax_i = axs_xyz[i]
_plot_signal(xdata, y=ydata, idx=vert_i,
model_fit=model_fit, ax=ax_i)
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_xyz[0].set_title("Period {}".format(vert_i))
axs_xyz[-1].set_xlabel(xlabel)
return fig, axs
def plot_standardized(self, var, use_axis_order=True, units_label=None,
ref_val=None, axs=None, **kwargs):
r"""Plot IMU measured and standardized variable along with temperature
A multi-panel time series plot of the selected variable, measured
and standardized, for all periods.
Parameters
----------
var : str
IMU variable to plot.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, only one
sensor axis per period is considered to have valid calibration
data for the correction. Otherwise, all three axes for each
period are used in the correction.
units_label : str, optional
Label for the units of the chosen variable. Defaults to the
"units_label" attribute available in the DataArray.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration). If provided, a horizontal line is included in
the plot for reference.
axs : array_like, optional
Array of Axes instances to plot in.
**kwargs : optional keyword arguments
Arguments passed to :func:`~matplotlib.pyplot.subplots`
(e.g. ``figsize``).
Returns
-------
fig : matplotlib.Figure
axs : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with IMU signal plots.
axs_temp : array_like
Array of :class:`~matplotlib.axes.Axes` instances in ``fig``
with temperature plots.
See Also
--------
plot_experiment
plot_var_model
"""
def _plot_signal(ymeasured, ystd, temp, ax, neg_ref=False):
ax_temp = ax.twinx()
(ymeasured.plot.line(ax=ax, label="measured", color="k",
linewidth=0.5))
(ystd.plot.line(ax=ax, label="standardized", color="b",
linewidth=0.5, alpha=0.5))
temp.plot.line(ax=ax_temp, label="temperature", color="r",
linewidth=0.5, alpha=0.5)
txt_desc = ystd.attrs["description"]
t_alpha_match = re.search(r'[-+]?\d+\.\d+', txt_desc)
ax_temp.axhline(float(txt_desc[t_alpha_match.start():
t_alpha_match.end()]),
linestyle="dashed", linewidth=1,
color="r", label=r"$T_\alpha$")
q0 = ymeasured.quantile(1e-5).to_numpy().item()
q1 = ymeasured.quantile(1 - 11e-5).to_numpy().item()
if ref_val is not None:
# Assumption of FLU with axes pointing against field
if neg_ref:
ref_i = -ref_val
else:
ref_i = ref_val
ax.axhline(ref_i, linestyle="dashdot", color="m",
linewidth=1, label="reference")
ylim0 = np.minimum(q0, ref_i)
ylim1 = np.maximum(q1, ref_i)
else:
ylim0 = q0
ylim1 = q1
ax.set_title("")
ax.set_xlabel("")
# Adjust ylim to exclude outliers
ax.set_ylim(ylim0, ylim1)
# Axis locators and formatters
dlocator = mdates.AutoDateLocator(minticks=3, maxticks=7)
dformatter = mdates.ConciseDateFormatter(dlocator)
ax.xaxis.set_major_locator(dlocator)
ax.xaxis.set_major_formatter(dformatter)
ax.xaxis.set_tick_params(rotation=0)
return ax_temp
per0 = self.subset_imu(0)
if units_label is None:
units_label = per0[var].attrs["units_label"]
ylabel_pre = "{} [{}]".format(per0[var].attrs["full_name"],
units_label)
var_std = var + "_std"
nperiods = len(self.periods)
if axs is not None:
fig = plt.gcf()
std_ds = xr.merge(self._stdda_l)
if var in _MONOAXIAL_VARS:
if axs is None:
fig, axs = plt.subplots(1, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for per_i in range(nperiods):
peri = self.subset_imu(per_i)
per_var = peri[var]
per_std = std_ds.loc[dict(period=per_i)][var_std]
per_temp = peri["temperature"]
ax_i = axs[per_i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
ax_i.set_ylabel(ylabel_pre)
axs_temp[per_i] = ax_temp
# legend at center top panel
axs[1].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[1].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
elif use_axis_order:
if axs is None:
fig, axs = plt.subplots(3, 1, sharex=False, **kwargs)
axs_temp = np.empty_like(axs)
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
peri = self.subset_imu(idx)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=idx)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs[i]
if axis == "x":
neg_ref = True
else:
neg_ref = False
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i,
neg_ref=neg_ref)
ax_i.set_xlabel("Period {}".format(idx))
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
axs_temp[i] = ax_temp
# legend at top panel
axs[0].legend(loc=9, bbox_to_anchor=(0.5, 1.15), ncol=3,
frameon=False, borderaxespad=0)
# Temperature legend at the bottom
axs_temp[i].legend(loc=9, bbox_to_anchor=(0.5, -0.23), ncol=2,
frameon=False, borderaxespad=0)
else:
if axs is None:
fig, axs = plt.subplots(3, nperiods, **kwargs)
axs_temp = np.empty_like(axs)
for vert_i in range(nperiods):
axs_xyz = axs[:, vert_i]
for i, axis in enumerate(_AXIS_NAMES):
peri = self.subset_imu(vert_i)
per_var = peri[var].sel(axis=axis, drop=True)
per_std = (std_ds.loc[dict(period=vert_i)][var_std]
.sel(axis=axis, drop=True))
per_temp = peri["temperature"]
ax_i = axs_xyz[i]
ax_temp = _plot_signal(per_var, ystd=per_std,
temp=per_temp, ax=ax_i)
axs_temp[i, vert_i] = ax_temp
if vert_i == 0:
ax_i.set_ylabel("{} {}".format(ylabel_pre, axis))
else:
ax_i.set_ylabel("")
axs_xyz[0].set_title("Period {}".format(vert_i))
# legend at bottom panel
leg0 = axs[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.23),
ncol=3, frameon=False, borderaxespad=0)
# Temperature legend at bottom panel
leg1 = axs_temp[-1, 1].legend(loc=9, bbox_to_anchor=(0.5, -0.37),
ncol=2, frameon=False,
borderaxespad=0)
axs[-1, 1].add_artist(leg0)
axs_temp[-1, 1].add_artist(leg1)
return fig, axs, axs_temp
def get_model(self, var, period, axis=None):
"""Retrieve linear model for a given IMU sensor axis signal
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period containing calibration model to use.
axis : str, optional
Name of the sensor axis the signal comes from, if `var` is
tri-axial; ignored otherwise.
Returns
-------
RegressionResultsWrapper
"""
if var in _MONOAXIAL_VARS:
model_d = self.models_l[period][var]
model_fit = [*model_d.values()][0]
else:
model_fit = self.models_l[period][var][axis]
return model_fit
def get_offset(self, var, period, T_alpha, ref_val, axis=None):
"""Calculate signal ofset at given temperature from calibration model
Parameters
----------
var : str
Name of the variable to calculate offset for.
period: int
Period (zero-based) containing calibration model to use.
T_alpha : float
Temperature at which to compute offset.
ref_val : float
Reference value for the chosen variable (e.g. gravity, for
acceleration).
axis : str, optional
Name of the sensor axis the signal comes from, if ``var`` is
tri-axial; ignored otherwise.
Returns
-------
float
Notes
-----
For obtaining offset and gain of magnetometer signals, the
ellipsoid method from the the ``ellipsoid`` module yields far more
accurate results, as it allows for the simultaneous
three-dimensional estimation of the offset.
"""
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=period)
else:
model_fit = self.get_model(var, period=period, axis=axis)
ypred = (model_fit.predict(exog=dict(temperature=T_alpha))
.to_numpy().item())
logger.info("Predicted {} ({}, rounded) at T_alpha: {:.3f}"
.format(var, axis, ypred))
offset = ypred - ref_val
return offset
def apply_model(self, var, dataset, T_alpha=None, ref_vals=None,
use_axis_order=True, model_idx=None):
"""Apply fitted temperature compensation model to Dataset
The selected models for tri-axial sensor data are applied to input
Dataset, standardizing signals at :math:`T_{\alpha}`, optionally
subtracting the offset at :math:`T_{\alpha}`.
Parameters
----------
var : str
Name of the variable with tri-axial data.
dataset : xarray.Dataset
Dataset with temperature and tri-axial data from motionless IMU.
T_alpha : float, optional
Reference temperature at which all measurements will be
adjusted to. Default is the mean temperature in the input
dataset.
ref_vals : list, optional
Sequence of three floats with target values to compare against
the signal from each sensor axis. If provided, the offset of
each signal at :math:`T_{\alpha}` is computed and subtracted from
the temperature-standardized signal. The order should be the
same as in the `axis_order` attribute if `use_axis_order` is
True, or ``x``, ``y``, ``z`` otherwise.
use_axis_order : bool, optional
Whether to use axis order from the instance. If True, retrieve
model to apply using instance's ``axis_order`` attribute.
Otherwise, use the models defined by ``model_idx`` argument.
Ignored if `var` is monoaxial.
model_idx : list or int, optional
Sequence of three integers identifying the period (zero-based)
from which to retrieve the models for ``x``, ``y``, and ``z``
sensor axes, in that order. If ``var`` is monoaxial, an integer
specifying the period for the model to use. Ignored if
``use_axis_order`` is True.
Returns
-------
xarray.Dataset
"""
temp_obs = dataset["temperature"]
darray = dataset[var]
if T_alpha is None:
T_alpha = temp_obs.mean().item()
logger.info("T_alpha set to {:.2f}".format(T_alpha))
def _standardize_array(darray, model_fit, period_idx, axis=None):
x_hat = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=temp_obs))
.predicted_mean)
x_alpha = (model_fit
.get_prediction(exog=dict(Intercept=1,
temperature=T_alpha))
.predicted_mean)
x_sigma = darray - (x_hat - x_alpha)
if ref_vals is not None:
off = self.get_offset(var, axis=axis, period=period_idx,
T_alpha=T_alpha,
ref_val=ref_vals[period_idx])
x_sigma -= off
return x_sigma
darray_l = []
if var in _MONOAXIAL_VARS:
model_fit = self.get_model(var, period=model_idx)
x_sigma = _standardize_array(darray, model_fit=model_fit,
period_idx=model_idx)
darray_l.append(x_sigma)
elif use_axis_order:
for i, axis in enumerate(_AXIS_NAMES):
idx = self.axis_order.index(axis)
model_fit = self.get_model(var, period=idx, axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=idx, axis=axis)
darray_l.append(x_sigma)
else:
for i, axis in enumerate(_AXIS_NAMES):
model_fit = self.get_model(var, period=model_idx[i],
axis=axis)
x_i = darray.sel(axis=axis)
x_sigma = _standardize_array(x_i, model_fit=model_fit,
period_idx=model_idx[i],
axis=axis)
darray_l.append(x_sigma)
if len(darray_l) > 1:
darray_new = xr.concat(darray_l, dim="axis").transpose()
else:
darray_new = darray_l[0]
darray_new.attrs = darray.attrs
new_history = ("{}: Applied temperature model at: T={}\n"
.format(pd.to_datetime("today")
.strftime("%Y-%m-%d"), T_alpha))
darray_new.attrs["history"] = (darray_new.attrs["history"] +
new_history)
return darray_new
def subset_imu(self, period_idx):
"""Subset IMU dataset given a period index
The dataset is subset using the slice corresponding to the period
index.
Parameters
----------
period_idx : int
Index of the experiment period to subset.
Returns
-------
xarray.Dataset
"""
time_name = self.time_name
return self.imu.loc[{time_name: self.periods[period_idx]}] | 0.785884 | 0.480662 |
import numpy as np
# Types of ellipsoid accepted fits
_ELLIPSOID_FTYPES = ["rxyz", "xyz", "xy", "xz", "yz", "sxyz"]
def fit_ellipsoid(vectors, f="rxyz"):
"""Fit a (non) rotated ellipsoid or sphere to 3D vector data
Parameters
----------
vectors: (N,3) array
Array of measured x, y, z vector components.
f: str
String indicating the model to fit (one of 'rxyz', 'xyz', 'xy',
'xz', 'yz', or 'sxyz'):
rxyz : rotated ellipsoid (any axes)
xyz : non-rotated ellipsoid
xy : radius x=y
xz : radius x=z
yz : radius y=z
sxyz : radius x=y=z sphere
Returns
-------
otuple: tuple
Tuple with offset, gain, and rotation matrix, in that order.
"""
if f not in _ELLIPSOID_FTYPES:
raise ValueError("f must be one of: {}"
.format(_ELLIPSOID_FTYPES))
x = vectors[:, 0, np.newaxis]
y = vectors[:, 1, np.newaxis]
z = vectors[:, 2, np.newaxis]
if f == "rxyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x * y, 2 * x * z, 2 * y * z,
2 * x, 2 * y, 2 * z))
elif f == "xyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xy":
D = np.hstack((x ** 2 + y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xz":
D = np.hstack((x ** 2 + z ** 2, y ** 2,
2 * x, 2 * y, 2 * z))
elif f == "yz":
D = np.hstack((y ** 2 + z ** 2, x ** 2,
2 * x, 2 * y, 2 * z))
else: # sxyz
D = np.hstack((x ** 2 + y ** 2 + z ** 2,
2 * x, 2 * y, 2 * z))
v = np.linalg.lstsq(D, np.ones(D.shape[0]), rcond=None)[0]
if f == "rxyz":
A = np.array([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], -1]])
ofs = np.linalg.lstsq(-A[:3, :3], v[[6, 7, 8]], rcond=None)[0]
Tmtx = np.eye(4)
Tmtx[3, :3] = ofs
AT = Tmtx @ A @ Tmtx.T # ellipsoid translated to 0, 0, 0
ev, rotM = np.linalg.eig(AT[:3, :3] / -AT[3, 3])
rotM = np.fliplr(rotM)
ev = np.flip(ev)
gain = np.sqrt(1.0 / ev)
else:
if f == "xyz":
v = np.array([v[0], v[1], v[2], 0, 0, 0, v[3], v[4], v[5]])
elif f == "xy":
v = np.array([v[0], v[0], v[1], 0, 0, 0, v[2], v[3], v[4]])
elif f == "xz":
v = np.array([v[0], v[1], v[0], 0, 0, 0, v[2], v[3], v[4]])
elif f == "yz":
v = np.array([v[1], v[0], v[0], 0, 0, 0, v[2], v[3], v[4]])
else:
v = np.array([v[0], v[0], v[0], 0, 0, 0, v[1], v[2], v[3]])
ofs = -(v[6:] / v[:3])
rotM = np.eye(3)
g = 1 + (v[6] ** 2 / v[0] + v[7] ** 2 / v[1] + v[8] ** 2 / v[2])
gain = (np.sqrt(g / v[:3]))
return (ofs, gain, rotM)
def _refine_ellipsoid_fit(gain, rotM):
"""Refine ellipsoid fit"""
# m = 0
# rm = 0
# cm = 0
pass
def apply_ellipsoid(vectors, offset, gain, rotM, ref_r):
"""Apply ellipsoid fit to vector array"""
vectors_new = vectors.copy() - offset
vectors_new = vectors_new @ rotM
# Scale to sphere
vectors_new = vectors_new / gain * ref_r
return vectors_new | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/ellipsoid.py | ellipsoid.py | import numpy as np
# Types of ellipsoid accepted fits
_ELLIPSOID_FTYPES = ["rxyz", "xyz", "xy", "xz", "yz", "sxyz"]
def fit_ellipsoid(vectors, f="rxyz"):
"""Fit a (non) rotated ellipsoid or sphere to 3D vector data
Parameters
----------
vectors: (N,3) array
Array of measured x, y, z vector components.
f: str
String indicating the model to fit (one of 'rxyz', 'xyz', 'xy',
'xz', 'yz', or 'sxyz'):
rxyz : rotated ellipsoid (any axes)
xyz : non-rotated ellipsoid
xy : radius x=y
xz : radius x=z
yz : radius y=z
sxyz : radius x=y=z sphere
Returns
-------
otuple: tuple
Tuple with offset, gain, and rotation matrix, in that order.
"""
if f not in _ELLIPSOID_FTYPES:
raise ValueError("f must be one of: {}"
.format(_ELLIPSOID_FTYPES))
x = vectors[:, 0, np.newaxis]
y = vectors[:, 1, np.newaxis]
z = vectors[:, 2, np.newaxis]
if f == "rxyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x * y, 2 * x * z, 2 * y * z,
2 * x, 2 * y, 2 * z))
elif f == "xyz":
D = np.hstack((x ** 2, y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xy":
D = np.hstack((x ** 2 + y ** 2, z ** 2,
2 * x, 2 * y, 2 * z))
elif f == "xz":
D = np.hstack((x ** 2 + z ** 2, y ** 2,
2 * x, 2 * y, 2 * z))
elif f == "yz":
D = np.hstack((y ** 2 + z ** 2, x ** 2,
2 * x, 2 * y, 2 * z))
else: # sxyz
D = np.hstack((x ** 2 + y ** 2 + z ** 2,
2 * x, 2 * y, 2 * z))
v = np.linalg.lstsq(D, np.ones(D.shape[0]), rcond=None)[0]
if f == "rxyz":
A = np.array([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], -1]])
ofs = np.linalg.lstsq(-A[:3, :3], v[[6, 7, 8]], rcond=None)[0]
Tmtx = np.eye(4)
Tmtx[3, :3] = ofs
AT = Tmtx @ A @ Tmtx.T # ellipsoid translated to 0, 0, 0
ev, rotM = np.linalg.eig(AT[:3, :3] / -AT[3, 3])
rotM = np.fliplr(rotM)
ev = np.flip(ev)
gain = np.sqrt(1.0 / ev)
else:
if f == "xyz":
v = np.array([v[0], v[1], v[2], 0, 0, 0, v[3], v[4], v[5]])
elif f == "xy":
v = np.array([v[0], v[0], v[1], 0, 0, 0, v[2], v[3], v[4]])
elif f == "xz":
v = np.array([v[0], v[1], v[0], 0, 0, 0, v[2], v[3], v[4]])
elif f == "yz":
v = np.array([v[1], v[0], v[0], 0, 0, 0, v[2], v[3], v[4]])
else:
v = np.array([v[0], v[0], v[0], 0, 0, 0, v[1], v[2], v[3]])
ofs = -(v[6:] / v[:3])
rotM = np.eye(3)
g = 1 + (v[6] ** 2 / v[0] + v[7] ** 2 / v[1] + v[8] ** 2 / v[2])
gain = (np.sqrt(g / v[:3]))
return (ofs, gain, rotM)
def _refine_ellipsoid_fit(gain, rotM):
"""Refine ellipsoid fit"""
# m = 0
# rm = 0
# cm = 0
pass
def apply_ellipsoid(vectors, offset, gain, rotM, ref_r):
"""Apply ellipsoid fit to vector array"""
vectors_new = vectors.copy() - offset
vectors_new = vectors_new @ rotM
# Scale to sphere
vectors_new = vectors_new / gain * ref_r
return vectors_new | 0.849441 | 0.810066 |
import logging
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from skdiveMove.helpers import rle_key
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def nlsLL(x, coefs):
r"""Generalized log-likelihood for Random Poisson mixtures
This is a generalized form taking any number of Poisson processes.
Parameters
----------
x : array_like
Independent data array described by the function
coefs : array_like
2-D array with coefficients ('a', :math:'\lambda') in rows for each
process of the model in columns.
Returns
-------
out : array_like
Same shape as `x` with the evaluated log-likelihood.
"""
def calc_term(params):
return params[0] * params[1] * np.exp(-params[1] * x)
terms = np.apply_along_axis(calc_term, 0, coefs)
terms_sum = terms.sum(1)
if np.any(terms_sum <= 0):
logger.warning("Negative values at: {}".format(coefs))
return np.log(terms_sum)
def calc_p(coefs):
r"""Calculate `p` (proportion) parameter from `a` coefficients
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
p : list
Proportion parameters implied in `coef`.
lambdas : pandas.Series
A series with with the :math:`\lambda` parameters from `coef`.
"""
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
p_ll = [] # build mixing ratios
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
a2 = coefs.loc["a", procn2]
p_i = a1 / (a1 + a2)
p_ll.append(p_i)
return (p_ll, coefs.loc["lambda"])
def ecdf(x, p, lambdas):
r"""Estimated cumulative frequency for Poisson mixture models
ECDF for two- or three-process mixture models.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : pandas.Series
Series with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
ncoefs = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas.iloc[0]
term0 = 1 - p0 * np.exp(-lda0 * x)
if ncoefs == 2:
lda1 = lambdas.iloc[1]
term1 = (1 - p0) * np.exp(-lda1 * x)
cdf = term0 - term1
elif ncoefs == 3:
p1 = p[1]
lda1 = lambdas.iloc[1]
term1 = p1 * (1 - p0) * np.exp(-lda1 * x)
lda2 = lambdas.iloc[2]
term2 = (1 - p0) * (1 - p1) * np.exp(-lda2 * x)
cdf = term0 - term1 - term2
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
return cdf
def label_bouts(x, bec, as_diff=False):
"""Classify data into bouts based on bout ending criteria
Parameters
----------
x : pandas.Series
Series with data to classify according to `bec`.
bec : array_like
Array with bout-ending criteria. It is assumed to be sorted.
as_diff : bool, optional
Whether to apply `diff` on `x` so it matches `bec`'s scale.
Returns
-------
out : numpy.ndarray
Integer array with the same shape as `x`.
"""
if as_diff:
xx = x.diff().fillna(0)
else:
xx = x.copy()
xx_min = np.array(xx.min())
xx_max = np.array(xx.max())
brks = np.append(np.append(xx_min, bec), xx_max)
xx_cat = pd.cut(xx, bins=brks, include_lowest=True)
xx_bouts = rle_key(xx_cat)
return xx_bouts
def _plot_bec(bec_x, bec_y, ax, xytext, horizontalalignment="left"):
"""Plot bout-ending criteria on `Axes`
Private helper function only for convenience here.
Parameters
----------
bec_x : numpy.ndarray, shape (n,)
x coordinate for bout-ending criteria.
bec_y : numpy.ndarray, shape (n,)
y coordinate for bout-ending criteria.
ax : matplotlib.Axes
An Axes instance to use as target.
xytext : 2-tuple
Argument passed to `matplotlib.annotate`; interpreted with
textcoords="offset points".
horizontalalignment : str
Argument passed to `matplotlib.annotate`.
"""
ylims = ax.get_ylim()
ax.vlines(bec_x, ylims[0], bec_y, linestyle="--")
ax.scatter(bec_x, bec_y, c="r", marker="v")
# Annotations
fmtstr = "bec_{0} = {1:.3f}"
if bec_x.size == 1:
bec_x = bec_x.item()
ax.annotate(fmtstr.format(0, bec_x),
(bec_x, bec_y), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
else:
for i, bec_i in enumerate(bec_x):
ax.annotate(fmtstr.format(i, bec_i),
(bec_i, bec_y[i]), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
class Bouts(metaclass=ABCMeta):
"""Abstract base class for models of log-transformed frequencies
This is a base class for other classes to build on, and do the model
fitting. `Bouts` is an abstract base class to set up bout
identification procedures. Subclasses must implement `fit` and `bec`
methods, or re-use the default NLS methods in `Bouts`.
Attributes
----------
x : array_like
1D array with input data.
method : str
Method used for calculating the histogram.
lnfreq : pandas.DataFrame
DataFrame with the centers of histogram bins, and corresponding
log-frequencies of `x`.
"""
def __init__(self, x, bw, method="standard"):
"""Histogram of log transformed frequencies of `x`
Parameters
----------
x : array_like
1D array with data where bouts will be identified based on
`method`.
bw : float
Bin width for the histogram
method : {"standard", "seq_diff"}, optional
Method to use for calculating the frequencies: "standard"
simply uses `x`, which "seq_diff" uses the sequential
differences method.
**kwargs : optional keywords
Passed to histogram
"""
self.x = x
self.method = method
if method == "standard":
upper = x.max()
brks = np.arange(x.min(), upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x, bins=brks)
elif method == "seq_diff":
x_diff = np.abs(np.diff(x))
upper = x_diff.max()
brks = np.arange(0, upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x_diff, bins=brks)
ctrs = edges[:-1] + np.diff(edges) / 2
ok = h > 0
ok_at = np.where(ok)[0] + 1 # 1-based indices
freq_adj = h[ok] / np.diff(np.insert(ok_at, 0, 0))
self.lnfreq = pd.DataFrame({"x": ctrs[ok],
"lnfreq": np.log(freq_adj)})
def __str__(self):
method = self.method
lnfreq = self.lnfreq
objcls = ("Class {} object\n".format(self.__class__.__name__))
meth_str = "{0:<20} {1}\n".format("histogram method: ", method)
lnfreq_str = ("{0:<20}\n{1}"
.format("log-frequency histogram:",
lnfreq.describe()))
return objcls + meth_str + lnfreq_str
def init_pars(self, x_break, plot=True, ax=None, **kwargs):
"""Find starting values for mixtures of random Poisson processes
Starting values are calculated using the "broken stick" method.
Parameters
----------
x_break : array_like
One- or two-element array with values determining the break(s)
for broken stick model, such that x < x_break[0] is first
process, x >= x_break[1] & x < x_break[2] is second process,
and x >= x_break[2] is third one.
plot : bool, optional
Whether to plot the broken stick model.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
**kwargs : optional keyword arguments
Passed to plotting function.
Returns
-------
out : pandas.DataFrame
DataFrame with coefficients for each process.
"""
nproc = len(x_break)
if (nproc > 2):
msg = "x_break must be length <= 2"
raise IndexError(msg)
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
xbins = [xmin]
xbins.extend(x_break)
xbins.extend([xmax])
procf = pd.cut(ctrs, bins=xbins, right=True,
include_lowest=True)
lnfreq_grp = lnfreq.groupby(procf)
coefs_ll = []
for name, grp in lnfreq_grp:
fit = smf.ols("lnfreq ~ x", data=grp).fit()
coefs_ll.append(fit.params.rename(name))
coefs = pd.concat(coefs_ll, axis=1)
def calculate_pars(p):
"""Poisson process parameters from linear model
"""
lda = -p["x"]
a = np.exp(p["Intercept"]) / lda
return pd.Series({"a": a, "lambda": lda})
pars = coefs.apply(calculate_pars)
if plot:
if ax is None:
ax = plt.gca()
freq_min = lnfreq["lnfreq"].min()
freq_max = lnfreq["lnfreq"].max()
for name, grp in lnfreq_grp:
ax.scatter(x="x", y="lnfreq", data=grp, label=name)
# Plot current "stick"
coef_i = coefs[name]
y_stick = coef_i["Intercept"] + ctrs * coef_i["x"]
# Limit the "stick" line to min/max of data
ok = (y_stick >= freq_min) & (y_stick <= freq_max)
ax.plot(ctrs[ok], y_stick[ok], linestyle="--")
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, pars)
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
ax.legend(loc="upper right")
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return pars
@abstractmethod
def fit(self, start, **kwargs):
"""Fit Poisson mixture model to log frequencies
Default is non-linear least squares method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
lnfreq = self.lnfreq
xdata = lnfreq["x"]
ydata = lnfreq["lnfreq"]
def _nlsLL(x, *args):
"""Wrapper to nlsLL to allow for array argument"""
# Pass in original shape, damn it! Note order="F" needed
coefs = np.array(args).reshape(start.shape, order="F")
return nlsLL(x, coefs)
# Rearrange starting values into a 1D array (needs to be flat)
init_flat = start.to_numpy().T.reshape((start.size,))
popt, pcov = curve_fit(_nlsLL, xdata, ydata,
p0=init_flat, **kwargs)
# Reshape coefs back into init shape
coefs = pd.DataFrame(popt.reshape(start.shape, order="F"),
columns=start.columns, index=start.index)
return (coefs, pcov)
@abstractmethod
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
Implementing default as from NLS method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# Find bec's per process by pairing columns
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
becs = []
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
lambda1 = coefs.loc["lambda", procn1]
a2 = coefs.loc["a", procn2]
lambda2 = coefs.loc["lambda", procn2]
bec = (np.log((a1 * lambda1) / (a2 * lambda2)) /
(lambda1 - lambda2))
becs.append(bec)
return np.array(becs)
def plot_fit(self, coefs, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
ax : matplotlib.Axes instance
An Axes instance to use as target.
Returns
-------
ax : `matplotlib.Axes`
"""
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, coefs)
if ax is None:
ax = plt.gca()
# Plot data
ax.scatter(x="x", y="lnfreq", data=lnfreq,
alpha=0.5, label="histogram")
# Plot predicted
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
# Plot BEC (note this plots all BECs in becx)
bec_x = self.bec(coefs) # need an array for nlsLL
bec_y = nlsLL(bec_x, coefs)
_plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def _plot_ecdf(x_pred_expm1, y_pred, ax):
"""Plot Empirical Frequency Distribution
Plot the ECDF at predicted x and corresponding y locations.
Parameters
----------
x_pred : numpy.ndarray, shape (n,)
Values of the variable at which to plot the ECDF.
y_pred : numpy.ndarray, shape (n,)
Values of the ECDF at `x_pred`.
ax : matplotlib.axes.Axes
An Axes instance to use as target.
"""
pass | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/bouts.py | bouts.py | import logging
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from skdiveMove.helpers import rle_key
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def nlsLL(x, coefs):
r"""Generalized log-likelihood for Random Poisson mixtures
This is a generalized form taking any number of Poisson processes.
Parameters
----------
x : array_like
Independent data array described by the function
coefs : array_like
2-D array with coefficients ('a', :math:'\lambda') in rows for each
process of the model in columns.
Returns
-------
out : array_like
Same shape as `x` with the evaluated log-likelihood.
"""
def calc_term(params):
return params[0] * params[1] * np.exp(-params[1] * x)
terms = np.apply_along_axis(calc_term, 0, coefs)
terms_sum = terms.sum(1)
if np.any(terms_sum <= 0):
logger.warning("Negative values at: {}".format(coefs))
return np.log(terms_sum)
def calc_p(coefs):
r"""Calculate `p` (proportion) parameter from `a` coefficients
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
p : list
Proportion parameters implied in `coef`.
lambdas : pandas.Series
A series with with the :math:`\lambda` parameters from `coef`.
"""
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
p_ll = [] # build mixing ratios
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
a2 = coefs.loc["a", procn2]
p_i = a1 / (a1 + a2)
p_ll.append(p_i)
return (p_ll, coefs.loc["lambda"])
def ecdf(x, p, lambdas):
r"""Estimated cumulative frequency for Poisson mixture models
ECDF for two- or three-process mixture models.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : pandas.Series
Series with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
ncoefs = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas.iloc[0]
term0 = 1 - p0 * np.exp(-lda0 * x)
if ncoefs == 2:
lda1 = lambdas.iloc[1]
term1 = (1 - p0) * np.exp(-lda1 * x)
cdf = term0 - term1
elif ncoefs == 3:
p1 = p[1]
lda1 = lambdas.iloc[1]
term1 = p1 * (1 - p0) * np.exp(-lda1 * x)
lda2 = lambdas.iloc[2]
term2 = (1 - p0) * (1 - p1) * np.exp(-lda2 * x)
cdf = term0 - term1 - term2
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
return cdf
def label_bouts(x, bec, as_diff=False):
"""Classify data into bouts based on bout ending criteria
Parameters
----------
x : pandas.Series
Series with data to classify according to `bec`.
bec : array_like
Array with bout-ending criteria. It is assumed to be sorted.
as_diff : bool, optional
Whether to apply `diff` on `x` so it matches `bec`'s scale.
Returns
-------
out : numpy.ndarray
Integer array with the same shape as `x`.
"""
if as_diff:
xx = x.diff().fillna(0)
else:
xx = x.copy()
xx_min = np.array(xx.min())
xx_max = np.array(xx.max())
brks = np.append(np.append(xx_min, bec), xx_max)
xx_cat = pd.cut(xx, bins=brks, include_lowest=True)
xx_bouts = rle_key(xx_cat)
return xx_bouts
def _plot_bec(bec_x, bec_y, ax, xytext, horizontalalignment="left"):
"""Plot bout-ending criteria on `Axes`
Private helper function only for convenience here.
Parameters
----------
bec_x : numpy.ndarray, shape (n,)
x coordinate for bout-ending criteria.
bec_y : numpy.ndarray, shape (n,)
y coordinate for bout-ending criteria.
ax : matplotlib.Axes
An Axes instance to use as target.
xytext : 2-tuple
Argument passed to `matplotlib.annotate`; interpreted with
textcoords="offset points".
horizontalalignment : str
Argument passed to `matplotlib.annotate`.
"""
ylims = ax.get_ylim()
ax.vlines(bec_x, ylims[0], bec_y, linestyle="--")
ax.scatter(bec_x, bec_y, c="r", marker="v")
# Annotations
fmtstr = "bec_{0} = {1:.3f}"
if bec_x.size == 1:
bec_x = bec_x.item()
ax.annotate(fmtstr.format(0, bec_x),
(bec_x, bec_y), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
else:
for i, bec_i in enumerate(bec_x):
ax.annotate(fmtstr.format(i, bec_i),
(bec_i, bec_y[i]), xytext=xytext,
textcoords="offset points",
horizontalalignment=horizontalalignment)
class Bouts(metaclass=ABCMeta):
"""Abstract base class for models of log-transformed frequencies
This is a base class for other classes to build on, and do the model
fitting. `Bouts` is an abstract base class to set up bout
identification procedures. Subclasses must implement `fit` and `bec`
methods, or re-use the default NLS methods in `Bouts`.
Attributes
----------
x : array_like
1D array with input data.
method : str
Method used for calculating the histogram.
lnfreq : pandas.DataFrame
DataFrame with the centers of histogram bins, and corresponding
log-frequencies of `x`.
"""
def __init__(self, x, bw, method="standard"):
"""Histogram of log transformed frequencies of `x`
Parameters
----------
x : array_like
1D array with data where bouts will be identified based on
`method`.
bw : float
Bin width for the histogram
method : {"standard", "seq_diff"}, optional
Method to use for calculating the frequencies: "standard"
simply uses `x`, which "seq_diff" uses the sequential
differences method.
**kwargs : optional keywords
Passed to histogram
"""
self.x = x
self.method = method
if method == "standard":
upper = x.max()
brks = np.arange(x.min(), upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x, bins=brks)
elif method == "seq_diff":
x_diff = np.abs(np.diff(x))
upper = x_diff.max()
brks = np.arange(0, upper, bw)
if brks[-1] < upper:
brks = np.append(brks, brks[-1] + bw)
h, edges = np.histogram(x_diff, bins=brks)
ctrs = edges[:-1] + np.diff(edges) / 2
ok = h > 0
ok_at = np.where(ok)[0] + 1 # 1-based indices
freq_adj = h[ok] / np.diff(np.insert(ok_at, 0, 0))
self.lnfreq = pd.DataFrame({"x": ctrs[ok],
"lnfreq": np.log(freq_adj)})
def __str__(self):
method = self.method
lnfreq = self.lnfreq
objcls = ("Class {} object\n".format(self.__class__.__name__))
meth_str = "{0:<20} {1}\n".format("histogram method: ", method)
lnfreq_str = ("{0:<20}\n{1}"
.format("log-frequency histogram:",
lnfreq.describe()))
return objcls + meth_str + lnfreq_str
def init_pars(self, x_break, plot=True, ax=None, **kwargs):
"""Find starting values for mixtures of random Poisson processes
Starting values are calculated using the "broken stick" method.
Parameters
----------
x_break : array_like
One- or two-element array with values determining the break(s)
for broken stick model, such that x < x_break[0] is first
process, x >= x_break[1] & x < x_break[2] is second process,
and x >= x_break[2] is third one.
plot : bool, optional
Whether to plot the broken stick model.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
**kwargs : optional keyword arguments
Passed to plotting function.
Returns
-------
out : pandas.DataFrame
DataFrame with coefficients for each process.
"""
nproc = len(x_break)
if (nproc > 2):
msg = "x_break must be length <= 2"
raise IndexError(msg)
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
xbins = [xmin]
xbins.extend(x_break)
xbins.extend([xmax])
procf = pd.cut(ctrs, bins=xbins, right=True,
include_lowest=True)
lnfreq_grp = lnfreq.groupby(procf)
coefs_ll = []
for name, grp in lnfreq_grp:
fit = smf.ols("lnfreq ~ x", data=grp).fit()
coefs_ll.append(fit.params.rename(name))
coefs = pd.concat(coefs_ll, axis=1)
def calculate_pars(p):
"""Poisson process parameters from linear model
"""
lda = -p["x"]
a = np.exp(p["Intercept"]) / lda
return pd.Series({"a": a, "lambda": lda})
pars = coefs.apply(calculate_pars)
if plot:
if ax is None:
ax = plt.gca()
freq_min = lnfreq["lnfreq"].min()
freq_max = lnfreq["lnfreq"].max()
for name, grp in lnfreq_grp:
ax.scatter(x="x", y="lnfreq", data=grp, label=name)
# Plot current "stick"
coef_i = coefs[name]
y_stick = coef_i["Intercept"] + ctrs * coef_i["x"]
# Limit the "stick" line to min/max of data
ok = (y_stick >= freq_min) & (y_stick <= freq_max)
ax.plot(ctrs[ok], y_stick[ok], linestyle="--")
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, pars)
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
ax.legend(loc="upper right")
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return pars
@abstractmethod
def fit(self, start, **kwargs):
"""Fit Poisson mixture model to log frequencies
Default is non-linear least squares method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
lnfreq = self.lnfreq
xdata = lnfreq["x"]
ydata = lnfreq["lnfreq"]
def _nlsLL(x, *args):
"""Wrapper to nlsLL to allow for array argument"""
# Pass in original shape, damn it! Note order="F" needed
coefs = np.array(args).reshape(start.shape, order="F")
return nlsLL(x, coefs)
# Rearrange starting values into a 1D array (needs to be flat)
init_flat = start.to_numpy().T.reshape((start.size,))
popt, pcov = curve_fit(_nlsLL, xdata, ydata,
p0=init_flat, **kwargs)
# Reshape coefs back into init shape
coefs = pd.DataFrame(popt.reshape(start.shape, order="F"),
columns=start.columns, index=start.index)
return (coefs, pcov)
@abstractmethod
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
Implementing default as from NLS method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# Find bec's per process by pairing columns
ncoefs = coefs.shape[1]
coef_arr = np.arange(ncoefs)
pairs = [(i, i + 1) for i in coef_arr[:-1]]
becs = []
for pair in pairs:
procn1 = coefs.columns[pair[0]] # name of process 1
procn2 = coefs.columns[pair[1]] # name of process 2
a1 = coefs.loc["a", procn1]
lambda1 = coefs.loc["lambda", procn1]
a2 = coefs.loc["a", procn2]
lambda2 = coefs.loc["lambda", procn2]
bec = (np.log((a1 * lambda1) / (a2 * lambda2)) /
(lambda1 - lambda2))
becs.append(bec)
return np.array(becs)
def plot_fit(self, coefs, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns, and indexed by
parameter names "a" and "lambda".
ax : matplotlib.Axes instance
An Axes instance to use as target.
Returns
-------
ax : `matplotlib.Axes`
"""
lnfreq = self.lnfreq
ctrs = lnfreq["x"]
xmin = ctrs.min()
xmax = ctrs.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
y_pred = nlsLL(x_pred, coefs)
if ax is None:
ax = plt.gca()
# Plot data
ax.scatter(x="x", y="lnfreq", data=lnfreq,
alpha=0.5, label="histogram")
# Plot predicted
ax.plot(x_pred, y_pred, alpha=0.5, label="model")
# Plot BEC (note this plots all BECs in becx)
bec_x = self.bec(coefs) # need an array for nlsLL
bec_y = nlsLL(bec_x, coefs)
_plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def _plot_ecdf(x_pred_expm1, y_pred, ax):
"""Plot Empirical Frequency Distribution
Plot the ECDF at predicted x and corresponding y locations.
Parameters
----------
x_pred : numpy.ndarray, shape (n,)
Values of the variable at which to plot the ECDF.
y_pred : numpy.ndarray, shape (n,)
Values of the ECDF at `x_pred`.
ax : matplotlib.axes.Axes
An Axes instance to use as target.
"""
pass | 0.929424 | 0.57678 |
import logging
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.special import logit, expit
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from . import bouts
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def mleLL(x, p, lambdas):
r"""Random Poisson processes function
The current implementation takes two or three random Poisson processes.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : array_like
1-D Array with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
logmsg = "p={0}, lambdas={1}".format(p, lambdas)
logger.info(logmsg)
nproc = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas[0]
term0 = p0 * lda0 * np.exp(-lda0 * x)
if nproc == 2:
lda1 = lambdas[1]
term1 = (1 - p0) * lda1 * np.exp(-lda1 * x)
res = term0 + term1
else: # 3 processes; capabilities enforced in mleLL
p1 = p[1]
lda1 = lambdas[1]
term1 = p1 * (1 - p0) * lda1 * np.exp(-lda1 * x)
lda2 = lambdas[2]
term2 = (1 - p1) * (1 - p0) * lda2 * np.exp(-lda2 * x)
res = term0 + term1 + term2
return np.log(res)
class BoutsMLE(bouts.Bouts):
r"""Maximum Likelihood estimation for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via maximum likelihood estimation [2]_, [3]_. Mixtures of
two or three Poisson processes are supported.
Even in these relatively simple cases, it is very important to provide
good starting values for the parameters.
One useful strategy to get good starting parameter values is to proceed
in 4 steps. First, fit a broken stick model to the log frequencies of
binned data (see :meth:`~Bouts.init_pars`), to obtain estimates of 4
parameters in a 2-process model [1]_, or 6 in a 3-process model.
Second, calculate parameter(s) :math:`p` from the :math:`\alpha`
parameters obtained by fitting the broken stick model, to get tentative
initial values as in [2]_. Third, obtain MLE estimates for these
parameters, but using a reparameterized version of the -log L2
function. Lastly, obtain the final MLE estimates for the three
parameters by using the estimates from step 3, un-transformed back to
their original scales, maximizing the original parameterization of the
-log L2 function.
:meth:`~Bouts.init_pars` can be used to perform step 1. Calculation of
the mixing parameters :math:`p` in step 2 is trivial from these
estimates. Method :meth:`negMLEll` calculates the negative
log-likelihood for a reparameterized version of the -log L2 function
given by [1]_, so can be used for step 3. This uses a logit
transformation of the mixing parameter :math:`p`, and log
transformations for density parameters :math:`\lambda`. Method
:meth:`negMLEll` is used again to compute the -log L2 function
corresponding to the un-transformed model for step 4.
The :meth:`fit` method performs the main job of maximizing the -log L2
functions, and is essentially a wrapper around
:func:`~scipy.optimize.minimize`. It only takes the -log L2 function,
a `DataFrame` of starting values, and the variable to be modelled, all
of which are passed to :func:`~scipy.optimize.minimize` for
optimization. Additionally, any other arguments are also passed to
:func:`~scipy.optimize.minimize`, hence great control is provided for
fitting any of the -log L2 functions.
In practice, step 3 does not pose major problems using the
reparameterized -log L2 function, but it might be useful to use method
'L-BFGS-B' with appropriate lower and upper bounds. Step 4 can be a
bit more problematic, because the parameters are usually on very
different scales and there can be multiple minima. Therefore, it is
almost always the rule to use method 'L-BFGS-B', again bounding the
parameter search, as well as other settings for controlling the
optimization.
References
----------
.. [2] Langton, S.; Collett, D. and Sibly, R. (1995) Splitting
behaviour into bouts; a maximum likelihood approach. Behaviour 132,
9-10.
.. [3] Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach
for identifying dive bouts improves accuracy, precision, and
objectivity. Behaviour, 144, 1315-1332.
Examples
--------
See :doc:`demo_simulbouts` for a detailed example.
"""
def negMLEll(self, params, x, istransformed=True):
r"""Log likelihood function of parameters given observed data
Parameters
----------
params : array_like
1-D array with parameters to fit. Currently must be either
3-length, with mixing parameter :math:`p`, density parameter
:math:`\lambda_f` and :math:`\lambda_s`, in that order, or
5-length, with :math:`p_f`, :math:`p_fs`, :math:`\lambda_f`,
:math:`\lambda_m`, :math:`\lambda_s`.
x : array_like
Independent data array described by model with parameters
`params`.
istransformed : bool
Whether `params` are transformed and need to be un-transformed
to calculate the likelihood.
Returns
-------
out :
"""
if len(params) == 3:
# Need list `p` for mle_fun
p = [params[0]]
lambdas = params[1:]
elif len(params) == 5:
p = params[:2]
lambdas = params[2:]
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
if istransformed:
p = expit(p)
lambdas = np.exp(lambdas)
ll = -sum(mleLL(x, p, lambdas))
logger.info("LL={}".format(ll))
return ll
def fit(self, start, fit1_opts=None, fit2_opts=None):
"""Maximum likelihood estimation of log frequencies
Parameters
----------
start : pandas.DataFrame
DataFrame with starting values for coefficients of each process
in columns. These can come from the "broken stick" method as
in :meth:`Bouts.init_pars`, and will be transformed to minimize
the first log likelihood function.
fit1_opts, fit2_opts : dict
Dictionaries with keywords to be passed to
:func:`scipy.optimize.minimize`, for the first and second fits.
Returns
-------
fit1, fit2 : scipy.optimize.OptimizeResult
Objects with the optimization result from the first and second
fit, having a `x` attribute with coefficients of the solution.
Notes
-----
Current implementation handles mixtures of two Poisson processes.
"""
# Calculate `p`
p0, lambda0 = bouts.calc_p(start)
# transform parameters for first fit
lambda0 = np.log(lambda0)
x0 = np.array([*logit(p0), *lambda0])
logger.info("Starting first fit")
if fit1_opts:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,),
**fit1_opts)
else:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,))
coef0 = fit1.x
start2 = [expit(coef0[0]), *np.exp(coef0[1:])]
logger.info("Starting second fit")
if fit2_opts:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False), **fit2_opts)
else:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False))
logger.info("N iter fit 1: {0}, fit 2: {1}"
.format(fit1.nit, fit2.nit))
return (fit1, fit2)
def bec(self, fit):
"""Calculate bout ending criteria from model coefficients
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
Returns
-------
out : numpy.ndarray
Notes
-----
Current implementation is for a two-process mixture, hence an array
of a single float is returned.
"""
coefs = fit.x
if len(coefs) == 3:
p_hat = coefs[0]
lda1_hat = coefs[1]
lda2_hat = coefs[2]
bec = (np.log((p_hat * lda1_hat) /
((1 - p_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
elif len(coefs) == 5:
p0_hat, p1_hat = coefs[:2]
lda0_hat, lda1_hat, lda2_hat = coefs[2:]
bec0 = (np.log((p0_hat * lda0_hat) /
((1 - p0_hat) * lda1_hat)) /
(lda0_hat - lda1_hat))
bec1 = (np.log((p1_hat * lda1_hat) /
((1 - p1_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
bec = [bec0, bec1]
return np.array(bec)
def plot_fit(self, fit, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
# Method is redefined from Bouts
x = self.x
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]]
lda_hat = coefs[1:]
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = coefs[2:]
xmin = x.min()
xmax = x.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
# Need to transpose to unpack columns rather than rows
y_pred = mleLL(x_pred, p_hat, lda_hat)
if ax is None:
ax = plt.gca()
# Data rug plot
ax.plot(x, np.ones_like(x) * y_pred.max(), "|",
color="k", label="observed")
# Plot predicted
ax.plot(x_pred, y_pred, label="model")
# Plot BEC
bec_x = self.bec(fit)
bec_y = mleLL(bec_x, p_hat, lda_hat)
bouts._plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def plot_ecdf(self, fit, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]] # list to bouts.ecdf()
lda_hat = pd.Series(coefs[1:], name="lambda")
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = pd.Series(coefs[2:], name="lambda")
y_mod = bouts.ecdf(x_pred_expm1, p_hat, lda_hat)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(fit)
bec_y = bouts.ecdf(bec_x, p=p_hat, lambdas=lda_hat)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsMLE(postdives_diff, 0.1)
# Get init parameters from broken stick model
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
# Knowing
p_bnd = (-2, None)
lda1_bnd = (-5, None)
lda2_bnd = (-10, None)
bd1 = (p_bnd, lda1_bnd, lda2_bnd)
p_bnd = (1e-8, None)
lda1_bnd = (1e-8, None)
lda2_bnd = (1e-8, None)
bd2 = (p_bnd, lda1_bnd, lda2_bnd)
fit1, fit2 = bouts_postdive.fit(bout_init_pars,
fit1_opts=dict(method="L-BFGS-B",
bounds=bd1),
fit2_opts=dict(method="L-BFGS-B",
bounds=bd2))
# BEC
becx = bouts_postdive.bec(fit2)
ax = bouts_postdive.plot_fit(fit2)
bouts_postdive.plot_ecdf(fit2) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/boutsmle.py | boutsmle.py | import logging
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.special import logit, expit
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from . import bouts
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def mleLL(x, p, lambdas):
r"""Random Poisson processes function
The current implementation takes two or three random Poisson processes.
Parameters
----------
x : array_like
Independent data array described by model with parameters `p`,
:math:`\lambda_f`, and :math:`\lambda_s`.
p : list
List with mixing parameters of the model.
lambdas : array_like
1-D Array with the density parameters (:math:`\lambda`) of the
model. Its length must be length(p) + 1.
Returns
-------
out : array_like
Same shape as `x` with the evaluated function.
"""
logmsg = "p={0}, lambdas={1}".format(p, lambdas)
logger.info(logmsg)
nproc = lambdas.size
# We assume at least two processes
p0 = p[0]
lda0 = lambdas[0]
term0 = p0 * lda0 * np.exp(-lda0 * x)
if nproc == 2:
lda1 = lambdas[1]
term1 = (1 - p0) * lda1 * np.exp(-lda1 * x)
res = term0 + term1
else: # 3 processes; capabilities enforced in mleLL
p1 = p[1]
lda1 = lambdas[1]
term1 = p1 * (1 - p0) * lda1 * np.exp(-lda1 * x)
lda2 = lambdas[2]
term2 = (1 - p1) * (1 - p0) * lda2 * np.exp(-lda2 * x)
res = term0 + term1 + term2
return np.log(res)
class BoutsMLE(bouts.Bouts):
r"""Maximum Likelihood estimation for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via maximum likelihood estimation [2]_, [3]_. Mixtures of
two or three Poisson processes are supported.
Even in these relatively simple cases, it is very important to provide
good starting values for the parameters.
One useful strategy to get good starting parameter values is to proceed
in 4 steps. First, fit a broken stick model to the log frequencies of
binned data (see :meth:`~Bouts.init_pars`), to obtain estimates of 4
parameters in a 2-process model [1]_, or 6 in a 3-process model.
Second, calculate parameter(s) :math:`p` from the :math:`\alpha`
parameters obtained by fitting the broken stick model, to get tentative
initial values as in [2]_. Third, obtain MLE estimates for these
parameters, but using a reparameterized version of the -log L2
function. Lastly, obtain the final MLE estimates for the three
parameters by using the estimates from step 3, un-transformed back to
their original scales, maximizing the original parameterization of the
-log L2 function.
:meth:`~Bouts.init_pars` can be used to perform step 1. Calculation of
the mixing parameters :math:`p` in step 2 is trivial from these
estimates. Method :meth:`negMLEll` calculates the negative
log-likelihood for a reparameterized version of the -log L2 function
given by [1]_, so can be used for step 3. This uses a logit
transformation of the mixing parameter :math:`p`, and log
transformations for density parameters :math:`\lambda`. Method
:meth:`negMLEll` is used again to compute the -log L2 function
corresponding to the un-transformed model for step 4.
The :meth:`fit` method performs the main job of maximizing the -log L2
functions, and is essentially a wrapper around
:func:`~scipy.optimize.minimize`. It only takes the -log L2 function,
a `DataFrame` of starting values, and the variable to be modelled, all
of which are passed to :func:`~scipy.optimize.minimize` for
optimization. Additionally, any other arguments are also passed to
:func:`~scipy.optimize.minimize`, hence great control is provided for
fitting any of the -log L2 functions.
In practice, step 3 does not pose major problems using the
reparameterized -log L2 function, but it might be useful to use method
'L-BFGS-B' with appropriate lower and upper bounds. Step 4 can be a
bit more problematic, because the parameters are usually on very
different scales and there can be multiple minima. Therefore, it is
almost always the rule to use method 'L-BFGS-B', again bounding the
parameter search, as well as other settings for controlling the
optimization.
References
----------
.. [2] Langton, S.; Collett, D. and Sibly, R. (1995) Splitting
behaviour into bouts; a maximum likelihood approach. Behaviour 132,
9-10.
.. [3] Luque, S.P. and Guinet, C. (2007) A maximum likelihood approach
for identifying dive bouts improves accuracy, precision, and
objectivity. Behaviour, 144, 1315-1332.
Examples
--------
See :doc:`demo_simulbouts` for a detailed example.
"""
def negMLEll(self, params, x, istransformed=True):
r"""Log likelihood function of parameters given observed data
Parameters
----------
params : array_like
1-D array with parameters to fit. Currently must be either
3-length, with mixing parameter :math:`p`, density parameter
:math:`\lambda_f` and :math:`\lambda_s`, in that order, or
5-length, with :math:`p_f`, :math:`p_fs`, :math:`\lambda_f`,
:math:`\lambda_m`, :math:`\lambda_s`.
x : array_like
Independent data array described by model with parameters
`params`.
istransformed : bool
Whether `params` are transformed and need to be un-transformed
to calculate the likelihood.
Returns
-------
out :
"""
if len(params) == 3:
# Need list `p` for mle_fun
p = [params[0]]
lambdas = params[1:]
elif len(params) == 5:
p = params[:2]
lambdas = params[2:]
else:
msg = "Only mixtures of <= 3 processes are implemented"
raise KeyError(msg)
if istransformed:
p = expit(p)
lambdas = np.exp(lambdas)
ll = -sum(mleLL(x, p, lambdas))
logger.info("LL={}".format(ll))
return ll
def fit(self, start, fit1_opts=None, fit2_opts=None):
"""Maximum likelihood estimation of log frequencies
Parameters
----------
start : pandas.DataFrame
DataFrame with starting values for coefficients of each process
in columns. These can come from the "broken stick" method as
in :meth:`Bouts.init_pars`, and will be transformed to minimize
the first log likelihood function.
fit1_opts, fit2_opts : dict
Dictionaries with keywords to be passed to
:func:`scipy.optimize.minimize`, for the first and second fits.
Returns
-------
fit1, fit2 : scipy.optimize.OptimizeResult
Objects with the optimization result from the first and second
fit, having a `x` attribute with coefficients of the solution.
Notes
-----
Current implementation handles mixtures of two Poisson processes.
"""
# Calculate `p`
p0, lambda0 = bouts.calc_p(start)
# transform parameters for first fit
lambda0 = np.log(lambda0)
x0 = np.array([*logit(p0), *lambda0])
logger.info("Starting first fit")
if fit1_opts:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,),
**fit1_opts)
else:
fit1 = minimize(self.negMLEll, x0=x0, args=(self.x,))
coef0 = fit1.x
start2 = [expit(coef0[0]), *np.exp(coef0[1:])]
logger.info("Starting second fit")
if fit2_opts:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False), **fit2_opts)
else:
fit2 = minimize(self.negMLEll, x0=start2,
args=(self.x, False))
logger.info("N iter fit 1: {0}, fit 2: {1}"
.format(fit1.nit, fit2.nit))
return (fit1, fit2)
def bec(self, fit):
"""Calculate bout ending criteria from model coefficients
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
Returns
-------
out : numpy.ndarray
Notes
-----
Current implementation is for a two-process mixture, hence an array
of a single float is returned.
"""
coefs = fit.x
if len(coefs) == 3:
p_hat = coefs[0]
lda1_hat = coefs[1]
lda2_hat = coefs[2]
bec = (np.log((p_hat * lda1_hat) /
((1 - p_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
elif len(coefs) == 5:
p0_hat, p1_hat = coefs[:2]
lda0_hat, lda1_hat, lda2_hat = coefs[2:]
bec0 = (np.log((p0_hat * lda0_hat) /
((1 - p0_hat) * lda1_hat)) /
(lda0_hat - lda1_hat))
bec1 = (np.log((p1_hat * lda1_hat) /
((1 - p1_hat) * lda2_hat)) /
(lda1_hat - lda2_hat))
bec = [bec0, bec1]
return np.array(bec)
def plot_fit(self, fit, ax=None):
"""Plot log frequency histogram and fitted model
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
# Method is redefined from Bouts
x = self.x
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]]
lda_hat = coefs[1:]
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = coefs[2:]
xmin = x.min()
xmax = x.max()
x_pred = np.linspace(xmin, xmax, num=101) # matches R's curve
# Need to transpose to unpack columns rather than rows
y_pred = mleLL(x_pred, p_hat, lda_hat)
if ax is None:
ax = plt.gca()
# Data rug plot
ax.plot(x, np.ones_like(x) * y_pred.max(), "|",
color="k", label="observed")
# Plot predicted
ax.plot(x_pred, y_pred, label="model")
# Plot BEC
bec_x = self.bec(fit)
bec_y = mleLL(bec_x, p_hat, lda_hat)
bouts._plot_bec(bec_x, bec_y, ax=ax, xytext=(5, 5))
ax.legend(loc=8, bbox_to_anchor=(0.5, 1), frameon=False,
borderaxespad=0.1, ncol=2)
ax.set_xlabel("x")
ax.set_ylabel("log frequency")
return ax
def plot_ecdf(self, fit, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
fit : scipy.optimize.OptimizeResult
Object with the optimization result, having a `x` attribute
with coefficients of the solution.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
coefs = fit.x
if len(coefs) == 3:
p_hat = [coefs[0]] # list to bouts.ecdf()
lda_hat = pd.Series(coefs[1:], name="lambda")
elif len(coefs) == 5:
p_hat = coefs[:2]
lda_hat = pd.Series(coefs[2:], name="lambda")
y_mod = bouts.ecdf(x_pred_expm1, p_hat, lda_hat)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(fit)
bec_y = bouts.ecdf(bec_x, p=p_hat, lambdas=lda_hat)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsMLE(postdives_diff, 0.1)
# Get init parameters from broken stick model
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
# Knowing
p_bnd = (-2, None)
lda1_bnd = (-5, None)
lda2_bnd = (-10, None)
bd1 = (p_bnd, lda1_bnd, lda2_bnd)
p_bnd = (1e-8, None)
lda1_bnd = (1e-8, None)
lda2_bnd = (1e-8, None)
bd2 = (p_bnd, lda1_bnd, lda2_bnd)
fit1, fit2 = bouts_postdive.fit(bout_init_pars,
fit1_opts=dict(method="L-BFGS-B",
bounds=bd1),
fit2_opts=dict(method="L-BFGS-B",
bounds=bd2))
# BEC
becx = bouts_postdive.bec(fit2)
ax = bouts_postdive.plot_fit(fit2)
bouts_postdive.plot_ecdf(fit2) | 0.92412 | 0.617916 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from statsmodels.distributions.empirical_distribution import ECDF
from . import bouts
class BoutsNLS(bouts.Bouts):
"""Nonlinear Least Squares fitting for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via nonlinear least squares [1]_.
References
----------
.. [1] Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour
into bouts Animal Behaviour 39, 63-69.
Examples
--------
Draw 1000 samples from a mixture where the first process occurs with
:math:`p < 0.7` and the second process occurs with the remaining
probability.
>>> from skdiveMove.tests import random_mixexp
>>> rng = np.random.default_rng(123)
>>> x2 = random_mixexp(1000, p=0.7, lda=np.array([0.05, 0.005]),
... rng=rng)
>>> xbouts2 = BoutsNLS(x2, bw=5)
>>> init_pars = xbouts2.init_pars([80], plot=False)
Fit the model and retrieve coefficients:
>>> coefs, pcov = xbouts2.fit(init_pars)
>>> print(np.round(coefs, 4))
(2.519, 80.0] (80.0, 1297.52]
a 3648.8547 1103.4423
lambda 0.0388 0.0032
Calculate bout-ending criterion (returns array):
>>> print(np.round(xbouts2.bec(coefs), 4))
[103.8648]
Plot observed and predicted data:
>>> xbouts2.plot_fit(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
Plot ECDF:
>>> xbouts2.plot_ecdf(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
"""
def fit(self, start, **kwargs):
"""Fit non-linear least squares model to log frequencies
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
return bouts.Bouts.fit(self, start, **kwargs)
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# The metaclass implements this method
return bouts.Bouts.bec(self, coefs)
def plot_ecdf(self, coefs, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
p, lambdas = bouts.calc_p(coefs)
y_mod = bouts.ecdf(x_pred_expm1, p, lambdas)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(coefs)
bec_y = bouts.ecdf(bec_x, p=p, lambdas=lambdas)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
import pandas as pd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsNLS(postdives_diff, 0.1)
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig1, ax1 = bouts_postdive.plot_ecdf(nls_coefs)
# Try 3 processes
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50, 550], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig2, ax2 = bouts_postdive.plot_ecdf(nls_coefs) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/boutsnls.py | boutsnls.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from statsmodels.distributions.empirical_distribution import ECDF
from . import bouts
class BoutsNLS(bouts.Bouts):
"""Nonlinear Least Squares fitting for models of Poisson process mixtures
Methods for modelling log-frequency data as a mixture of Poisson
processes via nonlinear least squares [1]_.
References
----------
.. [1] Sibly, R.; Nott, H. and Fletcher, D. (1990) Splitting behaviour
into bouts Animal Behaviour 39, 63-69.
Examples
--------
Draw 1000 samples from a mixture where the first process occurs with
:math:`p < 0.7` and the second process occurs with the remaining
probability.
>>> from skdiveMove.tests import random_mixexp
>>> rng = np.random.default_rng(123)
>>> x2 = random_mixexp(1000, p=0.7, lda=np.array([0.05, 0.005]),
... rng=rng)
>>> xbouts2 = BoutsNLS(x2, bw=5)
>>> init_pars = xbouts2.init_pars([80], plot=False)
Fit the model and retrieve coefficients:
>>> coefs, pcov = xbouts2.fit(init_pars)
>>> print(np.round(coefs, 4))
(2.519, 80.0] (80.0, 1297.52]
a 3648.8547 1103.4423
lambda 0.0388 0.0032
Calculate bout-ending criterion (returns array):
>>> print(np.round(xbouts2.bec(coefs), 4))
[103.8648]
Plot observed and predicted data:
>>> xbouts2.plot_fit(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
Plot ECDF:
>>> xbouts2.plot_ecdf(coefs) # doctest: +ELLIPSIS
<AxesSubplot:...>
"""
def fit(self, start, **kwargs):
"""Fit non-linear least squares model to log frequencies
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
start : pandas.DataFrame
DataFrame with coefficients for each process in columns.
**kwargs : optional keyword arguments
Passed to `scipy.optimize.curve_fit`.
Returns
-------
coefs : pandas.DataFrame
Coefficients of the model.
pcov : 2D array
Covariance of coefs.
"""
return bouts.Bouts.fit(self, start, **kwargs)
def bec(self, coefs):
"""Calculate bout ending criteria from model coefficients
The metaclass :class:`bouts.Bouts` implements this method.
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
Returns
-------
out : numpy.ndarray, shape (n,)
1-D array with BECs implied by `coefs`. Length is
coefs.shape[1]
"""
# The metaclass implements this method
return bouts.Bouts.bec(self, coefs)
def plot_ecdf(self, coefs, ax=None, **kwargs):
"""Plot observed and modelled empirical cumulative frequencies
Parameters
----------
coefs : pandas.DataFrame
DataFrame with model coefficients in columns.
ax : matplotlib.axes.Axes instance
An Axes instance to use as target.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.gca`.
Returns
-------
ax :
:class:`~matplotlib.axes.Axes` instances.
"""
x = self.x
xx = np.log1p(x)
x_ecdf = ECDF(xx)
x_pred = np.linspace(0, xx.max(), num=101)
x_pred_expm1 = np.expm1(x_pred)
y_pred = x_ecdf(x_pred)
if ax is None:
ax = plt.gca(**kwargs)
# Plot ECDF of data
ax.step(x_pred_expm1, y_pred, label="observed")
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xlim(np.exp(xx).min(), np.exp(xx).max())
# Plot estimated CDF
p, lambdas = bouts.calc_p(coefs)
y_mod = bouts.ecdf(x_pred_expm1, p, lambdas)
ax.plot(x_pred_expm1, y_mod, label="model")
# Add a little offset to ylim for visibility
yoffset = (0.05, 1.05)
ax.set_ylim(*yoffset) # add some spacing
# Plot BEC
bec_x = self.bec(coefs)
bec_y = bouts.ecdf(bec_x, p=p, lambdas=lambdas)
bouts._plot_bec(bec_x, bec_y=bec_y, ax=ax, xytext=(-5, 5),
horizontalalignment="right")
ax.legend(loc="upper left")
ax.set_xlabel("x")
ax.set_ylabel("ECDF [x]")
return ax
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
import pandas as pd
tdrX = diveMove2skd()
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"smooth_par": 0.1,
"knot_factor": 20,
"descent_crit_q": 0.01,
"ascent_crit_q": 0}
tdrX.calibrate(zoc_method="offset", offset=pars["offset_zoc"],
dry_thr=pars["dry_thr"], wet_thr=pars["dry_thr"],
dive_thr=pars["dive_thr"],
dive_model=pars["dive_model"],
smooth_par=pars["smooth_par"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
stats = tdrX.dive_stats()
stamps = tdrX.stamp_dives(ignore_z=True)
stats_tab = pd.concat((stamps, stats), axis=1)
# 2=4 here
postdives = stats_tab["postdive_dur"][stats_tab["phase_id"] == 4]
postdives_diff = postdives.dt.total_seconds().diff()[1:].abs()
# Remove isolated dives
postdives_diff = postdives_diff[postdives_diff < 2000]
# Set up instance
bouts_postdive = BoutsNLS(postdives_diff, 0.1)
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig1, ax1 = bouts_postdive.plot_ecdf(nls_coefs)
# Try 3 processes
# Get init parameters
bout_init_pars = bouts_postdive.init_pars([50, 550], plot=False)
nls_coefs, _ = bouts_postdive.fit(bout_init_pars)
# BEC
bouts_postdive.bec(nls_coefs)
bouts_postdive.plot_fit(nls_coefs)
# ECDF
fig2, ax2 = bouts_postdive.plot_ecdf(nls_coefs) | 0.936677 | 0.774328 |
r"""Tools and classes for the identification of behavioural bouts
A histogram of log-transformed frequencies of `x` with a chosen bin width
and upper limit forms the basis for models. Histogram bins following empty
ones have their frequencies averaged over the number of previous empty bins
plus one. Models attempt to discern the number of random Poisson
processes, and their parameters, generating the underlying distribution of
log-transformed frequencies.
The abstract class :class:`Bouts` provides basic methods.
Abstract class & methods summary
--------------------------------
.. autosummary::
Bouts
Bouts.init_pars
Bouts.fit
Bouts.bec
Bouts.plot_fit
Nonlinear least squares models
------------------------------
Currently, the model describing the histogram as it is built is implemented
in the :class:`BoutsNLS` class. For the case of a mixture of two Poisson
processes, this class would set up the model:
.. math::
:label: 1
y = log[N_f \lambda_f e^{-\lambda_f t} +
N_s \lambda_s e^{-\lambda_s t}]
where :math:`N_f` and :math:`N_s` are the number of events belonging to
process :math:`f` and :math:`s`, respectively; and :math:`\lambda_f` and
:math:`\lambda_s` are the probabilities of an event occurring in each
process. Mixtures of more processes can also be added to the model.
The bout-ending criterion (BEC) corresponding to equation :eq:`1` is:
.. math::
:label: 2
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{N_f \lambda_f}{N_s \lambda_s}
Note that there is one BEC per transition between Poisson processes.
The methods of this subclass are provided by the abstract super class
:class:`Bouts`, and defining those listed below.
Methods summary
---------------
.. autosummary::
BoutsNLS.plot_ecdf
Maximum likelihood models
-------------------------
This is the preferred approach to modelling mixtures of random Poisson
processes, as it does not rely on the subjective construction of a
histogram. The histogram is only used to generate reasonable starting
values, but the underlying paramters of the model are obtained via maximum
likelihood, so it is more robust.
For the case of a mixture of two processes, as above, the log likelihood of
all the :math:`N_t` in a mixture can be expressed as:
.. math::
:label: 3
log\ L_2 = \sum_{i=1}^{N_t} log[p \lambda_f e^{-\lambda_f t_i} +
(1-p) \lambda_s e^{-\lambda_s t_i}]
where :math:`p` is a mixing parameter indicating the proportion of fast to
slow process events in the sampled population.
The BEC in this case can be estimated as:
.. math::
:label: 4
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{p\lambda_f}{(1-p)\lambda_s}
The subclass :class:`BoutsMLE` offers the framework for these models.
Class & methods summary
-----------------------
.. autosummary::
BoutsMLE.negMLEll
BoutsMLE.fit
BoutsMLE.bec
BoutsMLE.plot_fit
BoutsMLE.plot_ecdf
API
---
"""
from .bouts import Bouts, label_bouts
from .boutsnls import BoutsNLS
from .boutsmle import BoutsMLE
from skdiveMove.tests import random_mixexp
__all__ = ["Bouts", "BoutsNLS", "BoutsMLE", "label_bouts",
"random_mixexp"] | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/bouts/__init__.py | __init__.py | r"""Tools and classes for the identification of behavioural bouts
A histogram of log-transformed frequencies of `x` with a chosen bin width
and upper limit forms the basis for models. Histogram bins following empty
ones have their frequencies averaged over the number of previous empty bins
plus one. Models attempt to discern the number of random Poisson
processes, and their parameters, generating the underlying distribution of
log-transformed frequencies.
The abstract class :class:`Bouts` provides basic methods.
Abstract class & methods summary
--------------------------------
.. autosummary::
Bouts
Bouts.init_pars
Bouts.fit
Bouts.bec
Bouts.plot_fit
Nonlinear least squares models
------------------------------
Currently, the model describing the histogram as it is built is implemented
in the :class:`BoutsNLS` class. For the case of a mixture of two Poisson
processes, this class would set up the model:
.. math::
:label: 1
y = log[N_f \lambda_f e^{-\lambda_f t} +
N_s \lambda_s e^{-\lambda_s t}]
where :math:`N_f` and :math:`N_s` are the number of events belonging to
process :math:`f` and :math:`s`, respectively; and :math:`\lambda_f` and
:math:`\lambda_s` are the probabilities of an event occurring in each
process. Mixtures of more processes can also be added to the model.
The bout-ending criterion (BEC) corresponding to equation :eq:`1` is:
.. math::
:label: 2
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{N_f \lambda_f}{N_s \lambda_s}
Note that there is one BEC per transition between Poisson processes.
The methods of this subclass are provided by the abstract super class
:class:`Bouts`, and defining those listed below.
Methods summary
---------------
.. autosummary::
BoutsNLS.plot_ecdf
Maximum likelihood models
-------------------------
This is the preferred approach to modelling mixtures of random Poisson
processes, as it does not rely on the subjective construction of a
histogram. The histogram is only used to generate reasonable starting
values, but the underlying paramters of the model are obtained via maximum
likelihood, so it is more robust.
For the case of a mixture of two processes, as above, the log likelihood of
all the :math:`N_t` in a mixture can be expressed as:
.. math::
:label: 3
log\ L_2 = \sum_{i=1}^{N_t} log[p \lambda_f e^{-\lambda_f t_i} +
(1-p) \lambda_s e^{-\lambda_s t_i}]
where :math:`p` is a mixing parameter indicating the proportion of fast to
slow process events in the sampled population.
The BEC in this case can be estimated as:
.. math::
:label: 4
BEC = \frac{1}{\lambda_f - \lambda_s}
log \frac{p\lambda_f}{(1-p)\lambda_s}
The subclass :class:`BoutsMLE` offers the framework for these models.
Class & methods summary
-----------------------
.. autosummary::
BoutsMLE.negMLEll
BoutsMLE.fit
BoutsMLE.bec
BoutsMLE.plot_fit
BoutsMLE.plot_ecdf
API
---
"""
from .bouts import Bouts, label_bouts
from .boutsnls import BoutsNLS
from .boutsmle import BoutsMLE
from skdiveMove.tests import random_mixexp
__all__ = ["Bouts", "BoutsNLS", "BoutsMLE", "label_bouts",
"random_mixexp"] | 0.916801 | 0.969584 |
# scikit-downscale
Statistical downscaling and postprocessing models for climate and weather model simulations.
[![CI](https://github.com/jhamman/scikit-downscale/workflows/CI/badge.svg)](https://github.com/jhamman/scikit-downscale/actions?query=workflow%3ACI+branch%3Amain+) [![Documentation Status](https://readthedocs.org/projects/scikit-downscale/badge/?version=latest)](https://scikit-downscale.readthedocs.io/en/latest/?badge=latest) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pangeo-data/scikit-downscale/HEAD)
[![](https://img.shields.io/pypi/v/scikit-downscale.svg)](https://pypi.org/pypi/name/)
![Conda (channel only)](https://img.shields.io/conda/vn/conda-forge/scikit-downscale)
| scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/README.md | README.md | # scikit-downscale
Statistical downscaling and postprocessing models for climate and weather model simulations.
[![CI](https://github.com/jhamman/scikit-downscale/workflows/CI/badge.svg)](https://github.com/jhamman/scikit-downscale/actions?query=workflow%3ACI+branch%3Amain+) [![Documentation Status](https://readthedocs.org/projects/scikit-downscale/badge/?version=latest)](https://scikit-downscale.readthedocs.io/en/latest/?badge=latest) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/pangeo-data/scikit-downscale/HEAD)
[![](https://img.shields.io/pypi/v/scikit-downscale.svg)](https://pypi.org/pypi/name/)
![Conda (channel only)](https://img.shields.io/conda/vn/conda-forge/scikit-downscale)
| 0.624523 | 0.51562 |
.. scikit-downscale documentation master file, created by
sphinx-quickstart on Wed Oct 9 13:59:33 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Scikit-downscale: toolkit for statistical downscaling
=====================================================
Scikit-downscale is a toolkit for statistical downscaling using Scikit-Learn_.
It is meant to support the development of new and existing downscaling
methods in a common framework. It implements Scikit-learn's `fit`/`predict` API
facilitating the development of a wide range of statitical downscaling models.
Utilities and a high-level API built on Xarray_ and Dask_ support both
point-wise and global downscaling applications.
.. _Xarray: http://xarray.pydata.org
.. _Scikit-Learn: https://scikit-learn.org
.. _Dask: https://dask.org
Under Active Development
~~~~~~~~~~~~~~~~~~~~~~~~
Scikit-downscale is under active development. We are looking for additional
contributors to help fill out the list of downscaling methods supported here.
We are also looking to find collaborators interested in using deep learning
to build global downscaling tools. Get in touch with us on our
`GitHub page <https://github.com/jhamman/scikit-downscale>`_.
.. toctree::
:maxdepth: 2
:caption: Contents:
roadmap
api
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/docs/index.rst | index.rst | .. scikit-downscale documentation master file, created by
sphinx-quickstart on Wed Oct 9 13:59:33 2019.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Scikit-downscale: toolkit for statistical downscaling
=====================================================
Scikit-downscale is a toolkit for statistical downscaling using Scikit-Learn_.
It is meant to support the development of new and existing downscaling
methods in a common framework. It implements Scikit-learn's `fit`/`predict` API
facilitating the development of a wide range of statitical downscaling models.
Utilities and a high-level API built on Xarray_ and Dask_ support both
point-wise and global downscaling applications.
.. _Xarray: http://xarray.pydata.org
.. _Scikit-Learn: https://scikit-learn.org
.. _Dask: https://dask.org
Under Active Development
~~~~~~~~~~~~~~~~~~~~~~~~
Scikit-downscale is under active development. We are looking for additional
contributors to help fill out the list of downscaling methods supported here.
We are also looking to find collaborators interested in using deep learning
to build global downscaling tools. Get in touch with us on our
`GitHub page <https://github.com/jhamman/scikit-downscale>`_.
.. toctree::
:maxdepth: 2
:caption: Contents:
roadmap
api
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.82379 | 0.348396 |
.. _roadmap:
Development Roadmap
===================
Author: Joe Hamman
Date:September 15, 2019
Background and scope
--------------------
Scikit-downscale is a toolkit for statistical downscaling using Xarray. It is
meant to support the development of new and existing downscaling methods in a
common framework. It implements a fit/predict API that accepts Xarray objects,
similiar to Python's Scikit-Learn, for building a range of downscaling models.
For example, implementing a BCSD workflow may look something like this:
.. code-block:: Python
from skdownscale.pointwise_models import PointWiseDownscaler
from skdownscale.models.bcsd import BCSDTemperature, bcsd_disaggregator
# da_temp_train: xarray.DataArray (monthly)
# da_temp_obs: xarray.DataArray (monthly)
# da_temp_obs_daily: xarray.DataArray (daily)
# da_temp_predict: xarray.DataArray (monthly)
# create a model
bcsd_model = PointWiseDownscaler(BCSDTemperature(), dim='time')
# train the model
bcsd_model.train(da_temp_train, da_temp_obs)
# predict with the model (downscaled_temp: xr.DataArray)
downscaled_temp = bcsd_model.predict(da_temp_predict)
# disaggregate the downscaled data (final: xr.DataArray)
final = bcsd_disaggregator(downscaled_temp, da_temp_obs_daily)
We are currently envisioning the project having three componenets (described
in the components section below). While we haven't started work on the deep
learning models component, this is certainly a central motivation to this
package and I am looking forward to starting on this work soon.
Principles
----------
- Open - aim to take the sausage making out downscaling; open-source methods,
comparable, extensible
- Scalable - plug into existing frameworks (e.g. dask/pangeo) to scale up,
allow for use a single points to scale down
- Portable - unopionated when it comes to compute platform
- Tested - Rigourously tested, both on the computational and scientific
implementation
Components
----------
1. `pointwise_models`: a collection of linear models that are intended to be
applied point-by-point. These may be sklearn Pipelines or custom sklearn-like
models (e.g. BCSDTemperature).
2. `global_models`: (not implemented) concept space for deep learning-based
models.
3. `metrics`: (not implemented) concept space for a benchmarking suite
Models
------
Scikit-downscale should provide a collection of a common set of downscaling
models and the building blocks to construct new models. As a starter, I intend
to implement the following models:
Pointwise models
~~~~~~~~~~~~~~~~
1. BCSD_[Temperature, Precipitation]: Wood et al 2002
2. ARRM: Stoner et al 2012
3. Delta Method
4. Hybrid Delta Method
5. GARD: https://github.com/NCAR/GARD
6. ?
Other methods, like LOCA, MACA, BCCA, etc, should also be possible.
Global models
~~~~~~~~~~~~~
This category of methods is really what is motivating the development of this
package. We've seen some early work from TJ Vandal in this area but there is
more work to be done. For now, I'll just jot down what a possible API might
look like:
.. code-block:: Python
from skdownscale.global_models import GlobalDownscaler
from skdownscale.global_models.deepsd import DeepSD
# ...
# create a model
model = GlobalDownscaler(DeepSD())
# train the model
model.train(da_temp_train, da_temp_obs)
# predict with the model (downscaled_temp: xr.DataArray)
downscaled_temp = model.predict(da_temp_predict)
Dependencies
------------
- Core: Xarray, Pandas, Dask, Scikit-learn, Numpy, Scipy
- Optional: Statsmodels, Keras, PyTorch, Tensorflow, etc.
Related projects
----------------
- FUDGE: https://github.com/NOAA-GFDL/FUDGE
- GARD: https://github.com/NCAR/GARD
- DeepSD: https://github.com/tjvandal/deepsd
| scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/docs/roadmap.rst | roadmap.rst | .. _roadmap:
Development Roadmap
===================
Author: Joe Hamman
Date:September 15, 2019
Background and scope
--------------------
Scikit-downscale is a toolkit for statistical downscaling using Xarray. It is
meant to support the development of new and existing downscaling methods in a
common framework. It implements a fit/predict API that accepts Xarray objects,
similiar to Python's Scikit-Learn, for building a range of downscaling models.
For example, implementing a BCSD workflow may look something like this:
.. code-block:: Python
from skdownscale.pointwise_models import PointWiseDownscaler
from skdownscale.models.bcsd import BCSDTemperature, bcsd_disaggregator
# da_temp_train: xarray.DataArray (monthly)
# da_temp_obs: xarray.DataArray (monthly)
# da_temp_obs_daily: xarray.DataArray (daily)
# da_temp_predict: xarray.DataArray (monthly)
# create a model
bcsd_model = PointWiseDownscaler(BCSDTemperature(), dim='time')
# train the model
bcsd_model.train(da_temp_train, da_temp_obs)
# predict with the model (downscaled_temp: xr.DataArray)
downscaled_temp = bcsd_model.predict(da_temp_predict)
# disaggregate the downscaled data (final: xr.DataArray)
final = bcsd_disaggregator(downscaled_temp, da_temp_obs_daily)
We are currently envisioning the project having three componenets (described
in the components section below). While we haven't started work on the deep
learning models component, this is certainly a central motivation to this
package and I am looking forward to starting on this work soon.
Principles
----------
- Open - aim to take the sausage making out downscaling; open-source methods,
comparable, extensible
- Scalable - plug into existing frameworks (e.g. dask/pangeo) to scale up,
allow for use a single points to scale down
- Portable - unopionated when it comes to compute platform
- Tested - Rigourously tested, both on the computational and scientific
implementation
Components
----------
1. `pointwise_models`: a collection of linear models that are intended to be
applied point-by-point. These may be sklearn Pipelines or custom sklearn-like
models (e.g. BCSDTemperature).
2. `global_models`: (not implemented) concept space for deep learning-based
models.
3. `metrics`: (not implemented) concept space for a benchmarking suite
Models
------
Scikit-downscale should provide a collection of a common set of downscaling
models and the building blocks to construct new models. As a starter, I intend
to implement the following models:
Pointwise models
~~~~~~~~~~~~~~~~
1. BCSD_[Temperature, Precipitation]: Wood et al 2002
2. ARRM: Stoner et al 2012
3. Delta Method
4. Hybrid Delta Method
5. GARD: https://github.com/NCAR/GARD
6. ?
Other methods, like LOCA, MACA, BCCA, etc, should also be possible.
Global models
~~~~~~~~~~~~~
This category of methods is really what is motivating the development of this
package. We've seen some early work from TJ Vandal in this area but there is
more work to be done. For now, I'll just jot down what a possible API might
look like:
.. code-block:: Python
from skdownscale.global_models import GlobalDownscaler
from skdownscale.global_models.deepsd import DeepSD
# ...
# create a model
model = GlobalDownscaler(DeepSD())
# train the model
model.train(da_temp_train, da_temp_obs)
# predict with the model (downscaled_temp: xr.DataArray)
downscaled_temp = model.predict(da_temp_predict)
Dependencies
------------
- Core: Xarray, Pandas, Dask, Scikit-learn, Numpy, Scipy
- Optional: Statsmodels, Keras, PyTorch, Tensorflow, etc.
Related projects
----------------
- FUDGE: https://github.com/NOAA-GFDL/FUDGE
- GARD: https://github.com/NCAR/GARD
- DeepSD: https://github.com/tjvandal/deepsd
| 0.900846 | 0.672424 |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import BcsdPrecipitation, BcsdTemperature
# utilities for plotting cdfs
def plot_cdf(ax=None, **kwargs):
if ax:
plt.sca(ax)
else:
ax = plt.gca()
for label, X in kwargs.items():
vals = np.sort(X, axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.legend()
return ax
def plot_cdf_by_month(ax=None, **kwargs):
fig, axes = plt.subplots(4, 3, sharex=True, sharey=False, figsize=(12, 8))
for label, X in kwargs.items():
for month, ax in zip(range(1, 13), axes.flat):
vals = np.sort(X[X.index.month == month], axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.set_title(month)
ax.legend()
return ax
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]].resample("MS").mean() - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]].resample("MS").sum() * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]].resample("MS").mean()
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]].resample("MS").sum()
display(y_temp.head(), y_pcp.head())
# Fit/predict the BCSD Temperature model
bcsd_temp = BcsdTemperature()
bcsd_temp.fit(X_temp, y_temp)
out = bcsd_temp.predict(X_temp) + X_temp
plot_cdf(X=X_temp, y=y_temp, out=out)
out.plot()
plot_cdf_by_month(X=X_temp, y=y_temp, out=out)
# Fit/predict the BCSD Precipitation model
bcsd_pcp = BcsdPrecipitation()
bcsd_pcp.fit(X_pcp, y_pcp)
out = bcsd_pcp.predict(X_pcp) * X_pcp
plot_cdf(X=X_pcp, y=y_pcp, out=out)
plot_cdf_by_month(X=X_pcp, y=y_pcp, out=out)
```
| scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/bcsd_example.ipynb | bcsd_example.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import BcsdPrecipitation, BcsdTemperature
# utilities for plotting cdfs
def plot_cdf(ax=None, **kwargs):
if ax:
plt.sca(ax)
else:
ax = plt.gca()
for label, X in kwargs.items():
vals = np.sort(X, axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.legend()
return ax
def plot_cdf_by_month(ax=None, **kwargs):
fig, axes = plt.subplots(4, 3, sharex=True, sharey=False, figsize=(12, 8))
for label, X in kwargs.items():
for month, ax in zip(range(1, 13), axes.flat):
vals = np.sort(X[X.index.month == month], axis=0)
pp = scipy.stats.mstats.plotting_positions(vals)
ax.plot(pp, vals, label=label)
ax.set_title(month)
ax.legend()
return ax
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]].resample("MS").mean() - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]].resample("MS").sum() * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]].resample("MS").mean()
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]].resample("MS").sum()
display(y_temp.head(), y_pcp.head())
# Fit/predict the BCSD Temperature model
bcsd_temp = BcsdTemperature()
bcsd_temp.fit(X_temp, y_temp)
out = bcsd_temp.predict(X_temp) + X_temp
plot_cdf(X=X_temp, y=y_temp, out=out)
out.plot()
plot_cdf_by_month(X=X_temp, y=y_temp, out=out)
# Fit/predict the BCSD Precipitation model
bcsd_pcp = BcsdPrecipitation()
bcsd_pcp.fit(X_pcp, y_pcp)
out = bcsd_pcp.predict(X_pcp) * X_pcp
plot_cdf(X=X_pcp, y=y_pcp, out=out)
plot_cdf_by_month(X=X_pcp, y=y_pcp, out=out) | 0.525612 | 0.648209 |
import numpy as np
import pandas as pd
import probscale
import scipy
import seaborn as sns
import xarray as xr
from matplotlib import pyplot as plt
def get_sample_data(kind):
if kind == 'training':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
df = (
data.isel(point=0)
.to_dataframe()[['T2max', 'PREC_TOT']]
.rename(columns={'T2max': 'tmax', 'PREC_TOT': 'pcp'})
)
df['tmax'] -= 273.13
df['pcp'] *= 24
return df.resample('1d').first()
elif kind == 'targets':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
return (
data.isel(point=0)
.to_dataframe()[['Tmax', 'Prec']]
.rename(columns={'Tmax': 'tmax', 'Prec': 'pcp'})
)
elif kind == 'wind-hist':
return (
xr.open_dataset(
'../data/uas/uas.hist.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19801990.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-obs':
return (
xr.open_dataset('../data/uas/uas.gridMET.NAM-44i.Colorado.19801990.nc')['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-rcp':
return (
xr.open_dataset(
'../data/uas/uas.rcp85.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19902000.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
else:
raise ValueError(kind)
return df
def prob_plots(x, y, y_hat, shape=(2, 2), figsize=(8, 8)):
fig, axes = plt.subplots(*shape, sharex=True, sharey=True, figsize=figsize)
scatter_kws = dict(label='', marker=None, linestyle='-')
common_opts = dict(plottype='qq', problabel='', datalabel='')
for ax, (label, series) in zip(axes.flat, y_hat.items()):
scatter_kws['label'] = 'original'
fig = probscale.probplot(x, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'target'
fig = probscale.probplot(y, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'corrected'
fig = probscale.probplot(series, ax=ax, scatter_kws=scatter_kws, **common_opts)
ax.set_title(label)
ax.legend()
[ax.set_xlabel('Standard Normal Quantiles') for ax in axes[-1]]
[ax.set_ylabel('Temperature [C]') for ax in axes[:, 0]]
[fig.delaxes(ax) for ax in axes.flat[len(y_hat.keys()) :]]
fig.tight_layout()
return fig
def zscore_ds_plot(training, target, future, corrected):
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
alpha = 0.5
time_target = pd.date_range('1980-01-01', '1989-12-31', freq='D')
time_training = time_target[~((time_target.month == 2) & (time_target.day == 29))]
time_future = pd.date_range('1990-01-01', '1999-12-31', freq='D')
time_future = time_future[~((time_future.month == 2) & (time_future.day == 29))]
plt.figure(figsize=(8, 4))
plt.plot(time_training, training.uas, label='training', alpha=alpha, c=colors['training'])
plt.plot(time_target, target.uas, label='target', alpha=alpha, c=colors['target'])
plt.plot(time_future, future.uas, label='future', alpha=alpha, c=colors['future'])
plt.plot(
time_future,
corrected.uas,
label='corrected',
alpha=alpha,
c=colors['corrected'],
)
plt.xlabel('Time')
plt.ylabel('Eastward Near-Surface Wind (m s-1)')
plt.legend()
return
def zscore_correction_plot(zscore):
training_mean = zscore.fit_stats_dict_['X_mean']
training_std = zscore.fit_stats_dict_['X_std']
target_mean = zscore.fit_stats_dict_['y_mean']
target_std = zscore.fit_stats_dict_['y_std']
future_mean = zscore.predict_stats_dict_['meani']
future_mean = future_mean.groupby(future_mean.index.dayofyear).mean()
future_std = zscore.predict_stats_dict_['stdi']
future_std = future_std.groupby(future_std.index.dayofyear).mean()
corrected_mean = zscore.predict_stats_dict_['meanf']
corrected_mean = corrected_mean.groupby(corrected_mean.index.dayofyear).mean()
corrected_std = zscore.predict_stats_dict_['stdf']
corrected_std = corrected_std.groupby(corrected_std.index.dayofyear).mean()
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
doy = 20
plt.figure()
x, y = _gaus(training_mean, training_std, doy)
plt.plot(x, y, c=colors['training'], label='training')
x, y = _gaus(target_mean, target_std, doy)
plt.plot(x, y, c=colors['target'], label='target')
x, y = _gaus(future_mean, future_std, doy)
plt.plot(x, y, c=colors['future'], label='future')
x, y = _gaus(corrected_mean, corrected_std, doy)
plt.plot(x, y, c=colors['corrected'], label='corrected')
plt.legend()
return
def _gaus(mean, std, doy):
mu = mean[doy]
sigma = std[doy]
x = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 100)
y = scipy.stats.norm.pdf(x, mu, sigma)
return x, y | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/utils.py | utils.py | import numpy as np
import pandas as pd
import probscale
import scipy
import seaborn as sns
import xarray as xr
from matplotlib import pyplot as plt
def get_sample_data(kind):
if kind == 'training':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
df = (
data.isel(point=0)
.to_dataframe()[['T2max', 'PREC_TOT']]
.rename(columns={'T2max': 'tmax', 'PREC_TOT': 'pcp'})
)
df['tmax'] -= 273.13
df['pcp'] *= 24
return df.resample('1d').first()
elif kind == 'targets':
data = xr.open_zarr('../data/downscale_test_data.zarr.zip', group=kind)
# extract 1 point of training data for precipitation and temperature
return (
data.isel(point=0)
.to_dataframe()[['Tmax', 'Prec']]
.rename(columns={'Tmax': 'tmax', 'Prec': 'pcp'})
)
elif kind == 'wind-hist':
return (
xr.open_dataset(
'../data/uas/uas.hist.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19801990.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-obs':
return (
xr.open_dataset('../data/uas/uas.gridMET.NAM-44i.Colorado.19801990.nc')['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
elif kind == 'wind-rcp':
return (
xr.open_dataset(
'../data/uas/uas.rcp85.CanESM2.CRCM5-UQAM.day.NAM-44i.raw.Colorado.19902000.nc'
)['uas']
.sel(lat=40.25, lon=-109.2, method='nearest')
.squeeze()
.to_dataframe()[['uas']]
)
else:
raise ValueError(kind)
return df
def prob_plots(x, y, y_hat, shape=(2, 2), figsize=(8, 8)):
fig, axes = plt.subplots(*shape, sharex=True, sharey=True, figsize=figsize)
scatter_kws = dict(label='', marker=None, linestyle='-')
common_opts = dict(plottype='qq', problabel='', datalabel='')
for ax, (label, series) in zip(axes.flat, y_hat.items()):
scatter_kws['label'] = 'original'
fig = probscale.probplot(x, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'target'
fig = probscale.probplot(y, ax=ax, scatter_kws=scatter_kws, **common_opts)
scatter_kws['label'] = 'corrected'
fig = probscale.probplot(series, ax=ax, scatter_kws=scatter_kws, **common_opts)
ax.set_title(label)
ax.legend()
[ax.set_xlabel('Standard Normal Quantiles') for ax in axes[-1]]
[ax.set_ylabel('Temperature [C]') for ax in axes[:, 0]]
[fig.delaxes(ax) for ax in axes.flat[len(y_hat.keys()) :]]
fig.tight_layout()
return fig
def zscore_ds_plot(training, target, future, corrected):
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
alpha = 0.5
time_target = pd.date_range('1980-01-01', '1989-12-31', freq='D')
time_training = time_target[~((time_target.month == 2) & (time_target.day == 29))]
time_future = pd.date_range('1990-01-01', '1999-12-31', freq='D')
time_future = time_future[~((time_future.month == 2) & (time_future.day == 29))]
plt.figure(figsize=(8, 4))
plt.plot(time_training, training.uas, label='training', alpha=alpha, c=colors['training'])
plt.plot(time_target, target.uas, label='target', alpha=alpha, c=colors['target'])
plt.plot(time_future, future.uas, label='future', alpha=alpha, c=colors['future'])
plt.plot(
time_future,
corrected.uas,
label='corrected',
alpha=alpha,
c=colors['corrected'],
)
plt.xlabel('Time')
plt.ylabel('Eastward Near-Surface Wind (m s-1)')
plt.legend()
return
def zscore_correction_plot(zscore):
training_mean = zscore.fit_stats_dict_['X_mean']
training_std = zscore.fit_stats_dict_['X_std']
target_mean = zscore.fit_stats_dict_['y_mean']
target_std = zscore.fit_stats_dict_['y_std']
future_mean = zscore.predict_stats_dict_['meani']
future_mean = future_mean.groupby(future_mean.index.dayofyear).mean()
future_std = zscore.predict_stats_dict_['stdi']
future_std = future_std.groupby(future_std.index.dayofyear).mean()
corrected_mean = zscore.predict_stats_dict_['meanf']
corrected_mean = corrected_mean.groupby(corrected_mean.index.dayofyear).mean()
corrected_std = zscore.predict_stats_dict_['stdf']
corrected_std = corrected_std.groupby(corrected_std.index.dayofyear).mean()
labels = ['training', 'future', 'target', 'corrected']
colors = {k: c for (k, c) in zip(labels, sns.color_palette('Set2', n_colors=4))}
doy = 20
plt.figure()
x, y = _gaus(training_mean, training_std, doy)
plt.plot(x, y, c=colors['training'], label='training')
x, y = _gaus(target_mean, target_std, doy)
plt.plot(x, y, c=colors['target'], label='target')
x, y = _gaus(future_mean, future_std, doy)
plt.plot(x, y, c=colors['future'], label='future')
x, y = _gaus(corrected_mean, corrected_std, doy)
plt.plot(x, y, c=colors['corrected'], label='corrected')
plt.legend()
return
def _gaus(mean, std, doy):
mu = mean[doy]
sigma = std[doy]
x = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 100)
y = scipy.stats.norm.pdf(x, mu, sigma)
return x, y | 0.519521 | 0.40392 |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import AnalogRegression, PureAnalog
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]] - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]] * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]]
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]]
display(y_temp.head(), y_pcp.head())
# Fit/predict using the PureAnalog class
for kind in ["best_analog", "sample_analogs", "weight_analogs", "mean_analogs"]:
pure_analog = PureAnalog(kind=kind, n_analogs=10)
pure_analog.fit(X_temp[:1000], y_temp[:1000])
out = pure_analog.predict(X_temp[1000:])
plt.plot(out[:300], label=kind)
# Fit/predict using the AnalogRegression class
analog_reg = AnalogRegression(n_analogs=100)
analog_reg.fit(X_temp[:1000], y_temp[:1000])
out = analog_reg.predict(X_temp[1000:])
plt.plot(out[:300], label="AnalogRegression")
plt.legend()
```
| scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/examples/gard_example.ipynb | gard_example.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import xarray as xr
from skdownscale.pointwise_models import AnalogRegression, PureAnalog
# open a small dataset for training
training = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="training")
training
# open a small dataset of observations (targets)
targets = xr.open_zarr("../data/downscale_test_data.zarr.zip", group="targets")
targets
# extract 1 point of training data for precipitation and temperature
X_temp = training.isel(point=0).to_dataframe()[["T2max"]] - 273.13
X_pcp = training.isel(point=0).to_dataframe()[["PREC_TOT"]] * 24
display(X_temp.head(), X_pcp.head())
# extract 1 point of target data for precipitation and temperature
y_temp = targets.isel(point=0).to_dataframe()[["Tmax"]]
y_pcp = targets.isel(point=0).to_dataframe()[["Prec"]]
display(y_temp.head(), y_pcp.head())
# Fit/predict using the PureAnalog class
for kind in ["best_analog", "sample_analogs", "weight_analogs", "mean_analogs"]:
pure_analog = PureAnalog(kind=kind, n_analogs=10)
pure_analog.fit(X_temp[:1000], y_temp[:1000])
out = pure_analog.predict(X_temp[1000:])
plt.plot(out[:300], label=kind)
# Fit/predict using the AnalogRegression class
analog_reg = AnalogRegression(n_analogs=100)
analog_reg.fit(X_temp[:1000], y_temp[:1000])
out = analog_reg.predict(X_temp[1000:])
plt.plot(out[:300], label="AnalogRegression")
plt.legend() | 0.483892 | 0.701432 |
import gc
import os
import click
import pandas as pd
import xarray as xr
from xsd.bcsd import bcsd, disagg
@click.command()
@click.option('--obs', type=str, help='Obs filename')
@click.option('--ref', type=str, help='Reference filename')
@click.option('--predict', type=str, help='Predict filename')
@click.option('--out_prefix', type=str, help='output_prefix')
@click.option('--kind', type=str, help='domain flag')
def main(obs, ref, predict, out_prefix, kind):
obs = obs.replace('\\', '')
ref = ref.replace('\\', '')
predict = predict.replace('\\', '')
print(obs, ref, predict)
# make out directory
dirname = os.path.dirname(out_prefix)
os.makedirs(dirname, exist_ok=True)
if kind == 'ak':
anoms, out = run_ak(obs, ref, predict)
elif kind == 'hi':
anoms, out = run_hi(obs, ref, predict)
# watch output files
anoms.load().to_netcdf(os.path.join(out_prefix + 'anoms.nc'))
out.load().to_netcdf(os.path.join(out_prefix + 'out.nc'))
def run_ak(obs_fname, train_fname, predict_fname):
# Alaska
varnames = {'tmax': 'tasmax', 'tmin': 'tasmin', 'pcp': 'pr'}
chunks = None
if 'hist' in predict_fname:
predict_time_bounds = slice('1950', '2006')
else:
predict_time_bounds = slice('2006', '2099')
anoms = xr.Dataset()
out = xr.Dataset()
# get variables from the obs/training/prediction datasets
ds_obs = xr.open_mfdataset(obs_fname, chunks=chunks, concat_dim='time', data_vars='minimal')
time_bounds = slice(ds_obs.indexes['time'][0], ds_obs.indexes['time'][-1])
ds_obs.coords['xc'] = ds_obs['xc'].where(ds_obs['xc'] >= 0, ds_obs.coords['xc'] + 360)
attrs_to_delete = [
'grid_mapping',
'cell_methods',
'remap',
'FieldType',
'MemoryOrder',
'stagger',
'sr_x',
'sr_y',
]
for obs_var, gcm_var in varnames.items():
obs_keep_vars = [obs_var, 'xc', 'yc', 'xv', 'yv']
ds_obs_daily = ds_obs[obs_keep_vars]
ds_obs_daily[obs_var] = ds_obs_daily[obs_var].astype('f4')
times = pd.date_range('1980-01-01', '2017-12-31', freq='D')
ds_obs_daily = ds_obs_daily.reindex(time=times, method='nearest')
ds_obs_1var = ds_obs_daily.resample(time='MS', keep_attrs=True).mean('time').load()
for i, v in enumerate(obs_keep_vars):
ds_obs_1var[v].attrs = ds_obs[v].attrs
if i:
ds_obs_1var[v].encoding['_FillValue'] = None
for v in ds_obs_1var:
for attr in attrs_to_delete:
if attr in ds_obs_1var[v].attrs:
del ds_obs_1var[v].attrs[attr]
if 'time' in ds_obs_1var['xv'].dims:
ds_obs_1var['xv'] = ds_obs_1var['xv'].isel(time=0)
ds_obs_1var['yv'] = ds_obs_1var['yv'].isel(time=0)
print('ds_obs_1var')
ds_obs_1var.info()
da_train = (
xr.open_mfdataset(
train_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
da_predict = (
xr.open_mfdataset(
predict_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=predict_time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
anoms[obs_var] = bcsd(
ds_obs_1var,
da_train.to_dataset(name=obs_var),
da_predict.to_dataset(name=obs_var),
var=obs_var,
)
out[obs_var] = disagg(ds_obs_daily[obs_var], anoms[obs_var], var=obs_var)
out['xv'] = ds_obs_1var['xv']
out['yv'] = ds_obs_1var['yv']
anoms['xv'] = ds_obs_1var['xv']
anoms['yv'] = ds_obs_1var['yv']
gc.collect()
return anoms, out
def run_hi(obs_fname, train_fname, predict_fname):
varnames = {'tmax': 'tasmax', 'tmin': 'tasmin', 'pcp': 'pr'}
chunks = None
if 'hist' in predict_fname:
predict_time_bounds = slice('1950', '2006')
else:
predict_time_bounds = slice('2006', '2099')
anoms = xr.Dataset()
out = xr.Dataset()
# get variables from the obs/training/prediction datasets
ds_obs = xr.open_mfdataset(obs_fname, chunks=chunks, concat_dim='time', data_vars='minimal')
time_bounds = slice(ds_obs.indexes['time'][0], ds_obs.indexes['time'][-1])
for obs_var, gcm_var in varnames.items():
obs_keep_vars = [obs_var, 'lon', 'lat']
ds_obs_daily = ds_obs[obs_keep_vars]
ds_obs_daily[obs_var] = ds_obs_daily[obs_var].astype('f4')
times = pd.date_range('1990-01-01', '2014-12-31', freq='D')
ds_obs_daily = ds_obs_daily.reindex(time=times, method='nearest')
ds_obs_1var = ds_obs_daily.resample(time='MS', keep_attrs=True).mean('time').load()
for i, v in enumerate(obs_keep_vars):
ds_obs_1var[v].attrs = ds_obs[v].attrs
if i:
ds_obs_1var[v].encoding['_FillValue'] = None
print('ds_obs_1var')
ds_obs_1var.info()
da_train = (
xr.open_mfdataset(
train_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
da_predict = (
xr.open_mfdataset(
predict_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=predict_time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
anoms[obs_var] = bcsd(
ds_obs_1var,
da_train.to_dataset(name=obs_var),
da_predict.to_dataset(name=obs_var),
var=obs_var,
)
out[obs_var] = disagg(ds_obs_daily[obs_var], anoms[obs_var], var=obs_var)
gc.collect()
return anoms, out
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/scripts/run_bcsd.py | run_bcsd.py |
import gc
import os
import click
import pandas as pd
import xarray as xr
from xsd.bcsd import bcsd, disagg
@click.command()
@click.option('--obs', type=str, help='Obs filename')
@click.option('--ref', type=str, help='Reference filename')
@click.option('--predict', type=str, help='Predict filename')
@click.option('--out_prefix', type=str, help='output_prefix')
@click.option('--kind', type=str, help='domain flag')
def main(obs, ref, predict, out_prefix, kind):
obs = obs.replace('\\', '')
ref = ref.replace('\\', '')
predict = predict.replace('\\', '')
print(obs, ref, predict)
# make out directory
dirname = os.path.dirname(out_prefix)
os.makedirs(dirname, exist_ok=True)
if kind == 'ak':
anoms, out = run_ak(obs, ref, predict)
elif kind == 'hi':
anoms, out = run_hi(obs, ref, predict)
# watch output files
anoms.load().to_netcdf(os.path.join(out_prefix + 'anoms.nc'))
out.load().to_netcdf(os.path.join(out_prefix + 'out.nc'))
def run_ak(obs_fname, train_fname, predict_fname):
# Alaska
varnames = {'tmax': 'tasmax', 'tmin': 'tasmin', 'pcp': 'pr'}
chunks = None
if 'hist' in predict_fname:
predict_time_bounds = slice('1950', '2006')
else:
predict_time_bounds = slice('2006', '2099')
anoms = xr.Dataset()
out = xr.Dataset()
# get variables from the obs/training/prediction datasets
ds_obs = xr.open_mfdataset(obs_fname, chunks=chunks, concat_dim='time', data_vars='minimal')
time_bounds = slice(ds_obs.indexes['time'][0], ds_obs.indexes['time'][-1])
ds_obs.coords['xc'] = ds_obs['xc'].where(ds_obs['xc'] >= 0, ds_obs.coords['xc'] + 360)
attrs_to_delete = [
'grid_mapping',
'cell_methods',
'remap',
'FieldType',
'MemoryOrder',
'stagger',
'sr_x',
'sr_y',
]
for obs_var, gcm_var in varnames.items():
obs_keep_vars = [obs_var, 'xc', 'yc', 'xv', 'yv']
ds_obs_daily = ds_obs[obs_keep_vars]
ds_obs_daily[obs_var] = ds_obs_daily[obs_var].astype('f4')
times = pd.date_range('1980-01-01', '2017-12-31', freq='D')
ds_obs_daily = ds_obs_daily.reindex(time=times, method='nearest')
ds_obs_1var = ds_obs_daily.resample(time='MS', keep_attrs=True).mean('time').load()
for i, v in enumerate(obs_keep_vars):
ds_obs_1var[v].attrs = ds_obs[v].attrs
if i:
ds_obs_1var[v].encoding['_FillValue'] = None
for v in ds_obs_1var:
for attr in attrs_to_delete:
if attr in ds_obs_1var[v].attrs:
del ds_obs_1var[v].attrs[attr]
if 'time' in ds_obs_1var['xv'].dims:
ds_obs_1var['xv'] = ds_obs_1var['xv'].isel(time=0)
ds_obs_1var['yv'] = ds_obs_1var['yv'].isel(time=0)
print('ds_obs_1var')
ds_obs_1var.info()
da_train = (
xr.open_mfdataset(
train_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
da_predict = (
xr.open_mfdataset(
predict_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=predict_time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
anoms[obs_var] = bcsd(
ds_obs_1var,
da_train.to_dataset(name=obs_var),
da_predict.to_dataset(name=obs_var),
var=obs_var,
)
out[obs_var] = disagg(ds_obs_daily[obs_var], anoms[obs_var], var=obs_var)
out['xv'] = ds_obs_1var['xv']
out['yv'] = ds_obs_1var['yv']
anoms['xv'] = ds_obs_1var['xv']
anoms['yv'] = ds_obs_1var['yv']
gc.collect()
return anoms, out
def run_hi(obs_fname, train_fname, predict_fname):
varnames = {'tmax': 'tasmax', 'tmin': 'tasmin', 'pcp': 'pr'}
chunks = None
if 'hist' in predict_fname:
predict_time_bounds = slice('1950', '2006')
else:
predict_time_bounds = slice('2006', '2099')
anoms = xr.Dataset()
out = xr.Dataset()
# get variables from the obs/training/prediction datasets
ds_obs = xr.open_mfdataset(obs_fname, chunks=chunks, concat_dim='time', data_vars='minimal')
time_bounds = slice(ds_obs.indexes['time'][0], ds_obs.indexes['time'][-1])
for obs_var, gcm_var in varnames.items():
obs_keep_vars = [obs_var, 'lon', 'lat']
ds_obs_daily = ds_obs[obs_keep_vars]
ds_obs_daily[obs_var] = ds_obs_daily[obs_var].astype('f4')
times = pd.date_range('1990-01-01', '2014-12-31', freq='D')
ds_obs_daily = ds_obs_daily.reindex(time=times, method='nearest')
ds_obs_1var = ds_obs_daily.resample(time='MS', keep_attrs=True).mean('time').load()
for i, v in enumerate(obs_keep_vars):
ds_obs_1var[v].attrs = ds_obs[v].attrs
if i:
ds_obs_1var[v].encoding['_FillValue'] = None
print('ds_obs_1var')
ds_obs_1var.info()
da_train = (
xr.open_mfdataset(
train_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
da_predict = (
xr.open_mfdataset(
predict_fname.format(gcm_var=gcm_var),
chunks=chunks,
concat_dim='time',
data_vars='minimal',
)[gcm_var]
.sel(time=predict_time_bounds)
.astype('f4')
.resample(time='MS')
.mean('time')
.load()
)
anoms[obs_var] = bcsd(
ds_obs_1var,
da_train.to_dataset(name=obs_var),
da_predict.to_dataset(name=obs_var),
var=obs_var,
)
out[obs_var] = disagg(ds_obs_daily[obs_var], anoms[obs_var], var=obs_var)
gc.collect()
return anoms, out
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter | 0.407216 | 0.125977 |
import numpy as np
import pandas as pd
from .utils import default_none_kwargs
class GroupedRegressor:
"""Grouped Regressor
Wrapper supporting fitting seperate estimators distinct groups
Parameters
----------
estimator : object
Estimator object such as derived from `BaseEstimator`. This estimator will be fit to each group
fit_grouper : object
Grouper object, such as `pd.Grouper` or `PaddedDOYGrouper` used to split data into groups during fitting.
predict_grouper : object, func, str
Grouper object, such as `pd.Grouper` used to split data into groups during prediction.
estimator_kwargs : dict
Keyword arguments to pass onto the `estimator`'s contructor.
fit_grouper_kwargs : dict
Keyword arguments to pass onto the `fit_grouper`s contructor.
predict_grouper_kwargs : dict
Keyword arguments to pass onto the `predict_grouper`s contructor.
"""
def __init__(
self,
estimator,
fit_grouper,
predict_grouper,
estimator_kwargs=None,
fit_grouper_kwargs=None,
predict_grouper_kwargs=None,
):
self.estimator = estimator
self.estimator_kwargs = estimator_kwargs
self.fit_grouper = fit_grouper
self.fit_grouper_kwargs = fit_grouper_kwargs
self.predict_grouper = predict_grouper
self.predict_grouper_kwargs = predict_grouper_kwargs
def fit(self, X, y, **fit_kwargs):
"""Fit the grouped regressor
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
y : pd.Series or pd.DataFrame, shape (n_samples, ) or (n_samples, n_targets)
Target values
**fit_kwargs
Additional keyword arguments to pass onto the estimator's fit method
Returns
-------
self : returns an instance of self.
"""
fit_grouper_kwargs = default_none_kwargs(self.fit_grouper_kwargs)
x_groups = self.fit_grouper(X.index, **fit_grouper_kwargs).groups
y_groups = self.fit_grouper(y.index, **fit_grouper_kwargs).groups
self.targets_ = list(y.keys())
estimator_kwargs = default_none_kwargs(self.estimator_kwargs)
self.estimators_ = {key: self.estimator(**estimator_kwargs) for key in x_groups}
for x_key, x_inds in x_groups.items():
y_inds = y_groups[x_key]
self.estimators_[x_key].fit(X.iloc[x_inds], y.iloc[y_inds], **fit_kwargs)
return self
def predict(self, X):
"""Predict estimator target for X
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
predict_grouper_kwargs = default_none_kwargs(self.predict_grouper_kwargs)
grouper = X.groupby(self.predict_grouper, **predict_grouper_kwargs)
result = np.empty((len(X), len(self.targets_)))
for key, inds in grouper.indices.items():
result[inds, ...] = self.estimators_[key].predict(X.iloc[inds])
return result
class PaddedDOYGrouper:
"""Grouper to group an Index by day-of-year +/ pad
Parameters
----------
index : pd.DatetimeIndex
Pandas DatetimeIndex to be grouped.
window : int
Size of the padded offset for each day of year.
"""
def __init__(self, index, window):
self.index = index
self.window = window
idoy = index.dayofyear
n = idoy.max()
# day-of-year x day-of-year groups
temp_groups = np.zeros((n, n), dtype=np.bool)
for i in range(n):
inds = np.arange(i - self.window, i + self.window + 1)
inds[inds < 0] += n
inds[inds > n - 1] -= n
temp_groups[i, inds] = True
arr = temp_groups[idoy - 1]
self._groups = {doy: np.nonzero(arr[:, doy - 1])[0] for doy in range(1, n + 1)}
@property
def groups(self):
"""Dict {doy -> group indicies}."""
return self._groups | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/grouping.py | grouping.py | import numpy as np
import pandas as pd
from .utils import default_none_kwargs
class GroupedRegressor:
"""Grouped Regressor
Wrapper supporting fitting seperate estimators distinct groups
Parameters
----------
estimator : object
Estimator object such as derived from `BaseEstimator`. This estimator will be fit to each group
fit_grouper : object
Grouper object, such as `pd.Grouper` or `PaddedDOYGrouper` used to split data into groups during fitting.
predict_grouper : object, func, str
Grouper object, such as `pd.Grouper` used to split data into groups during prediction.
estimator_kwargs : dict
Keyword arguments to pass onto the `estimator`'s contructor.
fit_grouper_kwargs : dict
Keyword arguments to pass onto the `fit_grouper`s contructor.
predict_grouper_kwargs : dict
Keyword arguments to pass onto the `predict_grouper`s contructor.
"""
def __init__(
self,
estimator,
fit_grouper,
predict_grouper,
estimator_kwargs=None,
fit_grouper_kwargs=None,
predict_grouper_kwargs=None,
):
self.estimator = estimator
self.estimator_kwargs = estimator_kwargs
self.fit_grouper = fit_grouper
self.fit_grouper_kwargs = fit_grouper_kwargs
self.predict_grouper = predict_grouper
self.predict_grouper_kwargs = predict_grouper_kwargs
def fit(self, X, y, **fit_kwargs):
"""Fit the grouped regressor
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
y : pd.Series or pd.DataFrame, shape (n_samples, ) or (n_samples, n_targets)
Target values
**fit_kwargs
Additional keyword arguments to pass onto the estimator's fit method
Returns
-------
self : returns an instance of self.
"""
fit_grouper_kwargs = default_none_kwargs(self.fit_grouper_kwargs)
x_groups = self.fit_grouper(X.index, **fit_grouper_kwargs).groups
y_groups = self.fit_grouper(y.index, **fit_grouper_kwargs).groups
self.targets_ = list(y.keys())
estimator_kwargs = default_none_kwargs(self.estimator_kwargs)
self.estimators_ = {key: self.estimator(**estimator_kwargs) for key in x_groups}
for x_key, x_inds in x_groups.items():
y_inds = y_groups[x_key]
self.estimators_[x_key].fit(X.iloc[x_inds], y.iloc[y_inds], **fit_kwargs)
return self
def predict(self, X):
"""Predict estimator target for X
Parameters
----------
X : pd.DataFrame, shape (n_samples, n_features)
Training data
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
predict_grouper_kwargs = default_none_kwargs(self.predict_grouper_kwargs)
grouper = X.groupby(self.predict_grouper, **predict_grouper_kwargs)
result = np.empty((len(X), len(self.targets_)))
for key, inds in grouper.indices.items():
result[inds, ...] = self.estimators_[key].predict(X.iloc[inds])
return result
class PaddedDOYGrouper:
"""Grouper to group an Index by day-of-year +/ pad
Parameters
----------
index : pd.DatetimeIndex
Pandas DatetimeIndex to be grouped.
window : int
Size of the padded offset for each day of year.
"""
def __init__(self, index, window):
self.index = index
self.window = window
idoy = index.dayofyear
n = idoy.max()
# day-of-year x day-of-year groups
temp_groups = np.zeros((n, n), dtype=np.bool)
for i in range(n):
inds = np.arange(i - self.window, i + self.window + 1)
inds[inds < 0] += n
inds[inds > n - 1] -= n
temp_groups[i, inds] = True
arr = temp_groups[idoy - 1]
self._groups = {doy: np.nonzero(arr[:, doy - 1])[0] for doy in range(1, n + 1)}
@property
def groups(self):
"""Dict {doy -> group indicies}."""
return self._groups | 0.868771 | 0.543651 |
import warnings
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
class TimeSynchronousDownscaler(BaseEstimator):
def _check_X_y(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame) and isinstance(X, pd.DataFrame):
assert X.index.equals(y.index)
check_X_y(X, y) # this may be inefficient
else:
X, y = check_X_y(X, y)
warnings.warn('X and y do not have pandas DateTimeIndexes, making one up...')
index = pd.date_range(periods=len(X), start='1950', freq='MS')
X = pd.DataFrame(X, index=index)
y = pd.DataFrame(y, index=index)
return X, y
def _check_array(self, array, **kwargs):
if isinstance(array, pd.DataFrame):
check_array(array)
else:
array = check_array(array)
warnings.warn('array does not have a pandas DateTimeIndex, making one up...')
index = pd.date_range(periods=len(array), start='1950', freq=self._timestep)
array = pd.DataFrame(array, index=index)
return array
def _validate_data(self, X, y=None, reset=True, validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,), default=None
The targets. If None, `check_array` is called on `X` and
`check_X_y` is called otherwise.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f'This {self.__class__.__name__} estimator '
f'requires y to be passed, but the target y is None.'
)
X = self._check_array(X, **check_params)
out = X
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = self._check_array(X, **check_X_params)
y = self._check_array(y, **check_y_params)
else:
X, y = self._check_X_y(X, y, **check_params)
out = X, y
# TO-DO: add check_n_features attribute
if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
return out | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/base.py | base.py | import warnings
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
class TimeSynchronousDownscaler(BaseEstimator):
def _check_X_y(self, X, y, **kwargs):
if isinstance(X, pd.DataFrame) and isinstance(X, pd.DataFrame):
assert X.index.equals(y.index)
check_X_y(X, y) # this may be inefficient
else:
X, y = check_X_y(X, y)
warnings.warn('X and y do not have pandas DateTimeIndexes, making one up...')
index = pd.date_range(periods=len(X), start='1950', freq='MS')
X = pd.DataFrame(X, index=index)
y = pd.DataFrame(y, index=index)
return X, y
def _check_array(self, array, **kwargs):
if isinstance(array, pd.DataFrame):
check_array(array)
else:
array = check_array(array)
warnings.warn('array does not have a pandas DateTimeIndex, making one up...')
index = pd.date_range(periods=len(array), start='1950', freq=self._timestep)
array = pd.DataFrame(array, index=index)
return array
def _validate_data(self, X, y=None, reset=True, validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,), default=None
The targets. If None, `check_array` is called on `X` and
`check_X_y` is called otherwise.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
if y is None:
if self._get_tags()['requires_y']:
raise ValueError(
f'This {self.__class__.__name__} estimator '
f'requires y to be passed, but the target y is None.'
)
X = self._check_array(X, **check_params)
out = X
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
X = self._check_array(X, **check_X_params)
y = self._check_array(y, **check_y_params)
else:
X, y = self._check_X_y(X, y, **check_params)
out = X, y
# TO-DO: add check_n_features attribute
if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
return out | 0.880116 | 0.561996 |
import collections
import copy
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from .trend import LinearTrendTransformer
from .utils import check_max_features, default_none_kwargs
SYNTHETIC_MIN = -1e20
SYNTHETIC_MAX = 1e20
Cdf = collections.namedtuple('CDF', ['pp', 'vals'])
def plotting_positions(n, alpha=0.4, beta=0.4):
'''Returns a monotonic array of plotting positions.
Parameters
----------
n : int
Length of plotting positions to return.
alpha, beta : float
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : ndarray
Quantile mapped data with shape from `input_data` and probability
distribution from `data_to_match`.
See Also
--------
scipy.stats.mstats.plotting_positions
'''
return (np.arange(1, n + 1) - alpha) / (n + 1.0 - alpha - beta)
class QuantileMapper(TransformerMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
detrend : boolean, optional
If True, detrend the data before quantile mapping and add the trend
back after transforming. Default is False.
lt_kwargs : dict, optional
Dictionary of keyword arguments to pass to the LinearTrendTransformer
qm_kwargs : dict, optional
Dictionary of keyword arguments to pass to the QuantileMapper
Attributes
----------
x_cdf_fit_ : QuantileTransformer
QuantileTranform for fit(X)
"""
_fit_attributes = ['x_cdf_fit_']
def __init__(self, detrend=False, lt_kwargs=None, qt_kwargs=None):
self.detrend = detrend
self.lt_kwargs = lt_kwargs
self.qt_kwargs = qt_kwargs
def fit(self, X, y=None):
"""Fit the quantile mapping model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data
"""
# TO-DO: fix validate data fctn
X = self._validate_data(X)
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
# maybe detrend the input datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
self.x_trend_fit_ = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = self.x_trend_fit_.transform(X)
else:
x_to_cdf = X
# calculate the cdfs for X
qt = CunnaneTransformer(**qt_kws)
self.x_cdf_fit_ = qt.fit(x_to_cdf)
return self
def transform(self, X):
"""Perform the quantile mapping.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
# validate input data
check_is_fitted(self)
# TO-DO: fix validate_data fctn
X = self._validate_data(X)
# maybe detrend the datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
x_trend = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = x_trend.transform(X)
else:
x_to_cdf = X
# do the final mapping
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
x_quantiles = CunnaneTransformer(**qt_kws).fit_transform(x_to_cdf)
x_qmapped = self.x_cdf_fit_.inverse_transform(x_quantiles)
# add the trend back
if self.detrend:
x_qmapped = x_trend.inverse_transform(x_qmapped)
# reset the baseline (remove bias)
x_qmapped -= x_trend.lr_model_.intercept_ - self.x_trend_fit_.lr_model_.intercept_
return x_qmapped
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMapper only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMapper only suppers 1 feature',
'check_transformer_data_not_an_array': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMapper only suppers 1 feature',
'check_dtype_object': 'QuantileMapper only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMapper only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMapper only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMapper only suppers 1 feature',
'check_estimators_pickle': 'QuantileMapper only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMapper only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMapper only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMapper only suppers 1 feature',
'check_dict_unchanged': 'QuantileMapper only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMapper only suppers 1 feature',
'check_fit_idempotent': 'QuantileMapper only suppers 1 feature',
'check_n_features_in': 'QuantileMapper only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'QuantileMapper only suppers 1 feature',
'check_transformer_general': 'QuantileMapper only suppers 1 feature',
'check_transformer_preserve_dtypes': 'QuantileMapper only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMapper only suppers 1 feature',
},
}
class QuantileMappingReressor(RegressorMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, extrapolate=None, n_endpoints=10):
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def fit(self, X, y, **kwargs):
"""Fit the quantile mapping regression model.
Parameters
----------
X : array-like, shape [n_samples, 1]
Training data.
Returns
-------
self : object
"""
X = check_array(
X, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=True
)
y = check_array(
y, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=False
)
X = check_max_features(X, n=1)
self._X_cdf = self._calc_extrapolated_cdf(X, sort=True, extrapolate=self.extrapolate)
self._y_cdf = self._calc_extrapolated_cdf(y, sort=True, extrapolate=self.extrapolate)
return self
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
# Fill value for when x < xp[0] or x > xp[-1] (i.e. when X_cdf vals are out of range for self._X_cdf vals)
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
# For all values in future X, find the corresponding percentile in historical X
X_cdf.pp[:] = np.interp(
X_cdf.vals, self._X_cdf.vals, self._X_cdf.pp, left=left, right=right
)
# Extrapolate the tails beyond 1.0 to handle "new extremes", only triggered when the new extremes are even more drastic then
# the linear extrapolation result from historical X at SYNTHETIC_MIN and SYNTHETIC_MAX
if np.isinf(X_cdf.pp).any():
lower_inds = np.nonzero(-np.inf == X_cdf.pp)[0]
upper_inds = np.nonzero(np.inf == X_cdf.pp)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(lower_inds[-1] + 1, lower_inds[-1] + 1 + self.n_endpoints)
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[lower_inds] = model.predict(X_cdf.vals[lower_inds].reshape(-1, 1))
if len(upper_inds):
s = slice(upper_inds[0] - self.n_endpoints, upper_inds[0])
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[upper_inds] = model.predict(X_cdf.vals[upper_inds].reshape(-1, 1))
# do the full quantile mapping
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = np.interp(X_cdf.pp, self._y_cdf.pp, self._y_cdf.vals)[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
def _extrapolate_1to1(self, X, y_hat):
X_fit_len = len(self._X_cdf.vals)
X_fit_min = self._X_cdf.vals[0]
X_fit_max = self._X_cdf.vals[-1]
y_fit_len = len(self._y_cdf.vals)
y_fit_min = self._y_cdf.vals[0]
y_fit_max = self._y_cdf.vals[-1]
# adjust values over fit max
inds = X > X_fit_max
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_max + (X[inds] - X_fit_max)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_max = np.interp(self._y_cdf.pp[-1], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = y_fit_max + (X[inds] - X_fit_at_y_fit_max)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_max = np.interp(self._X_cdf.pp[-1], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_max + (X[inds] - X_fit_max)
# adjust values under fit min
inds = X < X_fit_min
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_min + (X[inds] - X_fit_min)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_min = np.interp(self._y_cdf.pp[0], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = X_fit_min + (X[inds] - X_fit_at_y_fit_min)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_min = np.interp(self._X_cdf.pp[0], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_min + (X[inds] - X_fit_min)
return y_hat
def _calc_extrapolated_cdf(
self, data, sort=True, extrapolate=None, pp_min=SYNTHETIC_MIN, pp_max=SYNTHETIC_MAX
):
"""Calculate a new extrapolated cdf
The goal of this function is to create a CDF with bounds outside the [0, 1] range.
This allows for quantile mapping beyond observed data points.
Parameters
----------
data : array_like, shape [n_samples, 1]
Input data (can be unsorted)
sort : bool
If true, sort the data before building the CDF
extrapolate : str or None
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
pp_min, pp_max : float
Plotting position min/max values.
Returns
-------
cdf : Cdf (NamedTuple)
"""
n = len(data)
# plotting positions
pp = np.empty(n + 2)
pp[1:-1] = plotting_positions(n)
# extended data values (sorted)
if data.ndim == 2:
data = data[:, 0]
if sort:
data = np.sort(data)
vals = np.full(n + 2, np.nan)
vals[1:-1] = data
vals[0] = data[0]
vals[-1] = data[-1]
# Add endpoints to the vector of plotting positions
if extrapolate in [None, '1to1']:
pp[0] = pp[1]
pp[-1] = pp[-2]
elif extrapolate == 'both':
pp[0] = pp_min
pp[-1] = pp_max
elif extrapolate == 'max':
pp[0] = pp[1]
pp[-1] = pp_max
elif extrapolate == 'min':
pp[0] = pp_min
pp[-1] = pp[-2]
else:
raise ValueError('unknown value for extrapolate: %s' % extrapolate)
if extrapolate in ['min', 'max', 'both']:
model = LinearRegression()
# extrapolate lower end point
if extrapolate in ['min', 'both']:
s = slice(1, self.n_endpoints + 1)
# fit linear model to first n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[0]
vals[0] = model.predict(pp[0].reshape(-1, 1))
# extrapolate upper end point
if extrapolate in ['max', 'both']:
s = slice(-self.n_endpoints - 1, -1)
# fit linear model to last n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[-1]
vals[-1] = model.predict(pp[-1].reshape(-1, 1))
return Cdf(pp, vals)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_dtype_object': 'QuantileMappingReressor only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_pickle': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMappingReressor only suppers 1 feature',
'check_dict_unchanged': 'QuantileMappingReressor only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_idempotent': 'QuantileMappingReressor only suppers 1 feature',
'check_n_features_in': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_regressors_train': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True,X_dtype=float32)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressor_data_not_an_array': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_no_decision_function': 'QuantileMappingReressor only suppers 1 feature',
'check_supervised_y_2d': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_int': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_check_is_fitted': 'QuantileMappingReressor only suppers 1 feature',
'check_requires_y_none': 'QuantileMappingReressor only suppers 1 feature',
},
}
class CunnaneTransformer(TransformerMixin, BaseEstimator):
"""Quantile transform using Cunnane plotting positions with optional extrapolation.
Parameters
----------
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}. Default is None.
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf. Usused if ``extrapolate`` is None. Default is 10.
Attributes
----------
cdf_ : Cdf
NamedTuple representing the fit cdf
"""
_fit_attributes = ['cdf_']
def __init__(self, *, alpha=0.4, beta=0.4, extrapolate='both', n_endpoints=10):
self.alpha = alpha
self.beta = beta
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
def fit(self, X, y=None):
"""Compute CDF and plotting positions for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, 1)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.fit() only supports a single feature')
X = X[:, 0]
self.cdf_ = Cdf(plotting_positions(len(X)), np.sort(X))
return self
def transform(self, X):
"""Perform the quantile transform.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.transform() only supports a single feature')
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
pps = np.interp(X, self.cdf_.vals, self.cdf_.pp, left=left, right=right)
if np.isinf(pps).any():
lower_inds = np.nonzero(-np.inf == pps)[0]
upper_inds = np.nonzero(np.inf == pps)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[lower_inds] = model.predict(X[lower_inds].values.reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[upper_inds] = model.predict(X[upper_inds].values.reshape(-1, 1)).squeeze()
return pps.reshape(-1, 1)
def fit_transform(self, X, y=None):
"""Fit `CunnaneTransform` to `X`, then transform `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to generate the fit CDF.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Transformed data.
"""
return self.fit(X).transform(X)
def inverse_transform(self, X):
X = check_array(X, ensure_2d=True)
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
vals = np.interp(X, self.cdf_.pp, self.cdf_.vals, left=left, right=right)
if np.isinf(vals).any():
lower_inds = np.nonzero(-np.inf == vals)[0]
upper_inds = np.nonzero(np.inf == vals)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[lower_inds] = model.predict(X[lower_inds].reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[upper_inds] = model.predict(X[upper_inds].reshape(-1, 1)).squeeze()
return vals.reshape(-1, 1)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_fit_score_takes_y': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_data_not_an_array': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'CunnaneTransformer only suppers 1 feature',
'check_dtype_object': 'CunnaneTransformer only suppers 1 feature',
'check_pipeline_consistency': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_nan_inf': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_overwrite_params': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_pickle': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_predict1d': 'CunnaneTransformer only suppers 1 feature',
'check_methods_subset_invariance': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_1sample': 'CunnaneTransformer only suppers 1 feature',
'check_dict_unchanged': 'CunnaneTransformer only suppers 1 feature',
'check_dont_overwrite_parameters': 'CunnaneTransformer only suppers 1 feature',
'check_fit_idempotent': 'CunnaneTransformer only suppers 1 feature',
'check_n_features_in': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_general': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_preserve_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_methods_sample_order_invariance': 'CunnaneTransformer only suppers 1 feature',
},
}
class EquidistantCdfMatcher(QuantileMappingReressor):
"""Transform features using equidistant CDF matching, a version of quantile mapping that preserves the difference or ratio between X_test and X_train.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, kind='difference', extrapolate=None, n_endpoints=10, max_ratio=None):
if kind not in ['difference', 'ratio']:
raise NotImplementedError('kind must be either difference or ratio')
self.kind = kind
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
# MACA seems to have a max ratio for precip at 5.0
self.max_ratio = max_ratio
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
X_train_vals = np.interp(x=X_cdf.pp, xp=self._X_cdf.pp, fp=self._X_cdf.vals)
# generate y value as historical y plus/multiply by quantile difference
if self.kind == 'difference':
diff = X_cdf.vals - X_train_vals
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) + diff
elif self.kind == 'ratio':
ratio = X_cdf.vals / X_train_vals
if self.max_ratio is not None:
ratio = np.min(ratio, self.max_ratio)
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) * ratio
# put things into the right order
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = sorted_y_hat[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
class TrendAwareQuantileMappingRegressor(RegressorMixin, BaseEstimator):
"""Experimental meta estimator for performing trend-aware quantile mapping
Parameters
----------
qm_estimator : object, default=None
Regressor object such as ``QuantileMappingReressor``.
"""
def __init__(self, qm_estimator=None, trend_transformer=None):
self.qm_estimator = qm_estimator
if trend_transformer is None:
self.trend_transformer = LinearTrendTransformer()
def fit(self, X, y):
"""Fit the model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
Returns
-------
self : object
"""
self._X_mean_fit = X.mean()
self._y_mean_fit = y.mean()
y_trend = copy.deepcopy(self.trend_transformer)
y_detrend = y_trend.fit_transform(y)
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
self.qm_estimator.fit(x_detrend, y_detrend)
return self
def predict(self, X):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
y_hat = self.qm_estimator.predict(x_detrend).reshape(-1, 1)
# add the mean and trend back
# delta: X (predict) - X (fit) + y --> projected change + historical obs mean
delta = (X.mean().values - self._X_mean_fit.values) + self._y_mean_fit.values
# calculate the trendline
# TODO: think about how this would need to change if we're using a rolling average trend
trendline = X_trend.trendline(X)
trendline -= trendline.mean() # center at 0
# apply the trend and delta
y_hat += trendline + delta
return y_hat | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/quantile.py | quantile.py | import collections
import copy
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from .trend import LinearTrendTransformer
from .utils import check_max_features, default_none_kwargs
SYNTHETIC_MIN = -1e20
SYNTHETIC_MAX = 1e20
Cdf = collections.namedtuple('CDF', ['pp', 'vals'])
def plotting_positions(n, alpha=0.4, beta=0.4):
'''Returns a monotonic array of plotting positions.
Parameters
----------
n : int
Length of plotting positions to return.
alpha, beta : float
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : ndarray
Quantile mapped data with shape from `input_data` and probability
distribution from `data_to_match`.
See Also
--------
scipy.stats.mstats.plotting_positions
'''
return (np.arange(1, n + 1) - alpha) / (n + 1.0 - alpha - beta)
class QuantileMapper(TransformerMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
detrend : boolean, optional
If True, detrend the data before quantile mapping and add the trend
back after transforming. Default is False.
lt_kwargs : dict, optional
Dictionary of keyword arguments to pass to the LinearTrendTransformer
qm_kwargs : dict, optional
Dictionary of keyword arguments to pass to the QuantileMapper
Attributes
----------
x_cdf_fit_ : QuantileTransformer
QuantileTranform for fit(X)
"""
_fit_attributes = ['x_cdf_fit_']
def __init__(self, detrend=False, lt_kwargs=None, qt_kwargs=None):
self.detrend = detrend
self.lt_kwargs = lt_kwargs
self.qt_kwargs = qt_kwargs
def fit(self, X, y=None):
"""Fit the quantile mapping model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data
"""
# TO-DO: fix validate data fctn
X = self._validate_data(X)
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
# maybe detrend the input datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
self.x_trend_fit_ = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = self.x_trend_fit_.transform(X)
else:
x_to_cdf = X
# calculate the cdfs for X
qt = CunnaneTransformer(**qt_kws)
self.x_cdf_fit_ = qt.fit(x_to_cdf)
return self
def transform(self, X):
"""Perform the quantile mapping.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
# validate input data
check_is_fitted(self)
# TO-DO: fix validate_data fctn
X = self._validate_data(X)
# maybe detrend the datasets
if self.detrend:
lt_kwargs = default_none_kwargs(self.lt_kwargs)
x_trend = LinearTrendTransformer(**lt_kwargs).fit(X)
x_to_cdf = x_trend.transform(X)
else:
x_to_cdf = X
# do the final mapping
qt_kws = default_none_kwargs(self.qt_kwargs, copy=True)
x_quantiles = CunnaneTransformer(**qt_kws).fit_transform(x_to_cdf)
x_qmapped = self.x_cdf_fit_.inverse_transform(x_quantiles)
# add the trend back
if self.detrend:
x_qmapped = x_trend.inverse_transform(x_qmapped)
# reset the baseline (remove bias)
x_qmapped -= x_trend.lr_model_.intercept_ - self.x_trend_fit_.lr_model_.intercept_
return x_qmapped
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMapper only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMapper only suppers 1 feature',
'check_transformer_data_not_an_array': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMapper only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMapper only suppers 1 feature',
'check_dtype_object': 'QuantileMapper only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMapper only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMapper only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMapper only suppers 1 feature',
'check_estimators_pickle': 'QuantileMapper only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMapper only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMapper only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMapper only suppers 1 feature',
'check_dict_unchanged': 'QuantileMapper only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMapper only suppers 1 feature',
'check_fit_idempotent': 'QuantileMapper only suppers 1 feature',
'check_n_features_in': 'QuantileMapper only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'QuantileMapper only suppers 1 feature',
'check_transformer_general': 'QuantileMapper only suppers 1 feature',
'check_transformer_preserve_dtypes': 'QuantileMapper only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMapper only suppers 1 feature',
},
}
class QuantileMappingReressor(RegressorMixin, BaseEstimator):
"""Transform features using quantile mapping.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, extrapolate=None, n_endpoints=10):
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def fit(self, X, y, **kwargs):
"""Fit the quantile mapping regression model.
Parameters
----------
X : array-like, shape [n_samples, 1]
Training data.
Returns
-------
self : object
"""
X = check_array(
X, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=True
)
y = check_array(
y, dtype='numeric', ensure_min_samples=2 * self.n_endpoints + 1, ensure_2d=False
)
X = check_max_features(X, n=1)
self._X_cdf = self._calc_extrapolated_cdf(X, sort=True, extrapolate=self.extrapolate)
self._y_cdf = self._calc_extrapolated_cdf(y, sort=True, extrapolate=self.extrapolate)
return self
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
# Fill value for when x < xp[0] or x > xp[-1] (i.e. when X_cdf vals are out of range for self._X_cdf vals)
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
# For all values in future X, find the corresponding percentile in historical X
X_cdf.pp[:] = np.interp(
X_cdf.vals, self._X_cdf.vals, self._X_cdf.pp, left=left, right=right
)
# Extrapolate the tails beyond 1.0 to handle "new extremes", only triggered when the new extremes are even more drastic then
# the linear extrapolation result from historical X at SYNTHETIC_MIN and SYNTHETIC_MAX
if np.isinf(X_cdf.pp).any():
lower_inds = np.nonzero(-np.inf == X_cdf.pp)[0]
upper_inds = np.nonzero(np.inf == X_cdf.pp)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(lower_inds[-1] + 1, lower_inds[-1] + 1 + self.n_endpoints)
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[lower_inds] = model.predict(X_cdf.vals[lower_inds].reshape(-1, 1))
if len(upper_inds):
s = slice(upper_inds[0] - self.n_endpoints, upper_inds[0])
model.fit(X_cdf.pp[s].reshape(-1, 1), X_cdf.vals[s].reshape(-1, 1))
X_cdf.pp[upper_inds] = model.predict(X_cdf.vals[upper_inds].reshape(-1, 1))
# do the full quantile mapping
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = np.interp(X_cdf.pp, self._y_cdf.pp, self._y_cdf.vals)[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
def _extrapolate_1to1(self, X, y_hat):
X_fit_len = len(self._X_cdf.vals)
X_fit_min = self._X_cdf.vals[0]
X_fit_max = self._X_cdf.vals[-1]
y_fit_len = len(self._y_cdf.vals)
y_fit_min = self._y_cdf.vals[0]
y_fit_max = self._y_cdf.vals[-1]
# adjust values over fit max
inds = X > X_fit_max
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_max + (X[inds] - X_fit_max)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_max = np.interp(self._y_cdf.pp[-1], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = y_fit_max + (X[inds] - X_fit_at_y_fit_max)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_max = np.interp(self._X_cdf.pp[-1], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_max + (X[inds] - X_fit_max)
# adjust values under fit min
inds = X < X_fit_min
if inds.any():
if X_fit_len == y_fit_len:
y_hat[inds] = y_fit_min + (X[inds] - X_fit_min)
elif X_fit_len > y_fit_len:
X_fit_at_y_fit_min = np.interp(self._y_cdf.pp[0], self._X_cdf.pp, self._X_cdf.vals)
y_hat[inds] = X_fit_min + (X[inds] - X_fit_at_y_fit_min)
elif X_fit_len < y_fit_len:
y_fit_at_X_fit_min = np.interp(self._X_cdf.pp[0], self._y_cdf.pp, self._y_cdf.vals)
y_hat[inds] = y_fit_at_X_fit_min + (X[inds] - X_fit_min)
return y_hat
def _calc_extrapolated_cdf(
self, data, sort=True, extrapolate=None, pp_min=SYNTHETIC_MIN, pp_max=SYNTHETIC_MAX
):
"""Calculate a new extrapolated cdf
The goal of this function is to create a CDF with bounds outside the [0, 1] range.
This allows for quantile mapping beyond observed data points.
Parameters
----------
data : array_like, shape [n_samples, 1]
Input data (can be unsorted)
sort : bool
If true, sort the data before building the CDF
extrapolate : str or None
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
pp_min, pp_max : float
Plotting position min/max values.
Returns
-------
cdf : Cdf (NamedTuple)
"""
n = len(data)
# plotting positions
pp = np.empty(n + 2)
pp[1:-1] = plotting_positions(n)
# extended data values (sorted)
if data.ndim == 2:
data = data[:, 0]
if sort:
data = np.sort(data)
vals = np.full(n + 2, np.nan)
vals[1:-1] = data
vals[0] = data[0]
vals[-1] = data[-1]
# Add endpoints to the vector of plotting positions
if extrapolate in [None, '1to1']:
pp[0] = pp[1]
pp[-1] = pp[-2]
elif extrapolate == 'both':
pp[0] = pp_min
pp[-1] = pp_max
elif extrapolate == 'max':
pp[0] = pp[1]
pp[-1] = pp_max
elif extrapolate == 'min':
pp[0] = pp_min
pp[-1] = pp[-2]
else:
raise ValueError('unknown value for extrapolate: %s' % extrapolate)
if extrapolate in ['min', 'max', 'both']:
model = LinearRegression()
# extrapolate lower end point
if extrapolate in ['min', 'both']:
s = slice(1, self.n_endpoints + 1)
# fit linear model to first n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[0]
vals[0] = model.predict(pp[0].reshape(-1, 1))
# extrapolate upper end point
if extrapolate in ['max', 'both']:
s = slice(-self.n_endpoints - 1, -1)
# fit linear model to last n_endpoints
model.fit(pp[s].reshape(-1, 1), vals[s].reshape(-1, 1))
# calculate the data value pp[-1]
vals[-1] = model.predict(pp[-1].reshape(-1, 1))
return Cdf(pp, vals)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_score_takes_y': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_dtype_object': 'QuantileMappingReressor only suppers 1 feature',
'check_pipeline_consistency': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_nan_inf': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_overwrite_params': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_pickle': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_predict1d': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_subset_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit2d_1sample': 'QuantileMappingReressor only suppers 1 feature',
'check_dict_unchanged': 'QuantileMappingReressor only suppers 1 feature',
'check_dont_overwrite_parameters': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_idempotent': 'QuantileMappingReressor only suppers 1 feature',
'check_n_features_in': 'QuantileMappingReressor only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_regressors_train': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_train(readonly_memmap=True,X_dtype=float32)': 'QuantileMappingReressor only suppers 1 feature',
'check_regressor_data_not_an_array': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_no_decision_function': 'QuantileMappingReressor only suppers 1 feature',
'check_supervised_y_2d': 'QuantileMappingReressor only suppers 1 feature',
'check_regressors_int': 'QuantileMappingReressor only suppers 1 feature',
'check_methods_sample_order_invariance': 'QuantileMappingReressor only suppers 1 feature',
'check_fit_check_is_fitted': 'QuantileMappingReressor only suppers 1 feature',
'check_requires_y_none': 'QuantileMappingReressor only suppers 1 feature',
},
}
class CunnaneTransformer(TransformerMixin, BaseEstimator):
"""Quantile transform using Cunnane plotting positions with optional extrapolation.
Parameters
----------
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}. Default is None.
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf. Usused if ``extrapolate`` is None. Default is 10.
Attributes
----------
cdf_ : Cdf
NamedTuple representing the fit cdf
"""
_fit_attributes = ['cdf_']
def __init__(self, *, alpha=0.4, beta=0.4, extrapolate='both', n_endpoints=10):
self.alpha = alpha
self.beta = beta
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
def fit(self, X, y=None):
"""Compute CDF and plotting positions for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, 1)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
y : None
Ignored.
Returns
-------
self : object
Fitted transformer.
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.fit() only supports a single feature')
X = X[:, 0]
self.cdf_ = Cdf(plotting_positions(len(X)), np.sort(X))
return self
def transform(self, X):
"""Perform the quantile transform.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Transformed data
"""
X = check_array(X, ensure_2d=True)
if X.shape[1] > 1:
raise ValueError('CunnaneTransformer.transform() only supports a single feature')
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
pps = np.interp(X, self.cdf_.vals, self.cdf_.pp, left=left, right=right)
if np.isinf(pps).any():
lower_inds = np.nonzero(-np.inf == pps)[0]
upper_inds = np.nonzero(np.inf == pps)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[lower_inds] = model.predict(X[lower_inds].values.reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.vals[s].reshape(-1, 1), self.cdf_.pp[s].reshape(-1, 1))
pps[upper_inds] = model.predict(X[upper_inds].values.reshape(-1, 1)).squeeze()
return pps.reshape(-1, 1)
def fit_transform(self, X, y=None):
"""Fit `CunnaneTransform` to `X`, then transform `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to generate the fit CDF.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Transformed data.
"""
return self.fit(X).transform(X)
def inverse_transform(self, X):
X = check_array(X, ensure_2d=True)
X = X[:, 0]
left = -np.inf if self.extrapolate in ['min', 'both'] else None
right = np.inf if self.extrapolate in ['max', 'both'] else None
vals = np.interp(X, self.cdf_.pp, self.cdf_.vals, left=left, right=right)
if np.isinf(vals).any():
lower_inds = np.nonzero(-np.inf == vals)[0]
upper_inds = np.nonzero(np.inf == vals)[0]
model = LinearRegression()
if len(lower_inds):
s = slice(None, self.n_endpoints)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[lower_inds] = model.predict(X[lower_inds].reshape(-1, 1)).squeeze()
if len(upper_inds):
s = slice(-self.n_endpoints, None)
model.fit(self.cdf_.pp[s].reshape(-1, 1), self.cdf_.vals[s].reshape(-1, 1))
vals[upper_inds] = model.predict(X[upper_inds].reshape(-1, 1)).squeeze()
return vals.reshape(-1, 1)
def _more_tags(self):
return {
'_xfail_checks': {
'check_estimators_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_fit_score_takes_y': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_data_not_an_array': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_fit_returns_self(readonly_memmap=True)': 'CunnaneTransformer only suppers 1 feature',
'check_dtype_object': 'CunnaneTransformer only suppers 1 feature',
'check_pipeline_consistency': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_nan_inf': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_overwrite_params': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_pickle': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_predict1d': 'CunnaneTransformer only suppers 1 feature',
'check_methods_subset_invariance': 'CunnaneTransformer only suppers 1 feature',
'check_fit2d_1sample': 'CunnaneTransformer only suppers 1 feature',
'check_dict_unchanged': 'CunnaneTransformer only suppers 1 feature',
'check_dont_overwrite_parameters': 'CunnaneTransformer only suppers 1 feature',
'check_fit_idempotent': 'CunnaneTransformer only suppers 1 feature',
'check_n_features_in': 'CunnaneTransformer only suppers 1 feature',
'check_estimators_empty_data_messages': 'skip due to odd sklearn string matching in unit test',
'check_fit_check_is_fitted': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_general': 'CunnaneTransformer only suppers 1 feature',
'check_transformer_preserve_dtypes': 'CunnaneTransformer only suppers 1 feature',
'check_methods_sample_order_invariance': 'CunnaneTransformer only suppers 1 feature',
},
}
class EquidistantCdfMatcher(QuantileMappingReressor):
"""Transform features using equidistant CDF matching, a version of quantile mapping that preserves the difference or ratio between X_test and X_train.
Parameters
----------
extrapolate : str, optional
How to extend the cdfs at the tails. Valid options include {`'min'`, `'max'`, `'both'`, `'1to1'`, `None`}
n_endpoints : int
Number of endpoints to include when extrapolating the tails of the cdf
Attributes
----------
_X_cdf : Cdf
NamedTuple representing the fit's X cdf
_y_cdf : Cdf
NamedTuple representing the fit's y cdf
"""
_fit_attributes = ['_X_cdf', '_y_cdf']
def __init__(self, kind='difference', extrapolate=None, n_endpoints=10, max_ratio=None):
if kind not in ['difference', 'ratio']:
raise NotImplementedError('kind must be either difference or ratio')
self.kind = kind
self.extrapolate = extrapolate
self.n_endpoints = n_endpoints
# MACA seems to have a max ratio for precip at 5.0
self.max_ratio = max_ratio
if self.n_endpoints < 2:
raise ValueError('Invalid number of n_endpoints, must be >= 2')
def predict(self, X, **kwargs):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, 1]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
check_is_fitted(self, self._fit_attributes)
X = check_array(X, ensure_2d=True)
X = X[:, 0]
sort_inds = np.argsort(X)
X_cdf = self._calc_extrapolated_cdf(X[sort_inds], sort=False, extrapolate=self.extrapolate)
X_train_vals = np.interp(x=X_cdf.pp, xp=self._X_cdf.pp, fp=self._X_cdf.vals)
# generate y value as historical y plus/multiply by quantile difference
if self.kind == 'difference':
diff = X_cdf.vals - X_train_vals
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) + diff
elif self.kind == 'ratio':
ratio = X_cdf.vals / X_train_vals
if self.max_ratio is not None:
ratio = np.min(ratio, self.max_ratio)
sorted_y_hat = np.interp(x=X_cdf.pp, xp=self._y_cdf.pp, fp=self._y_cdf.vals) * ratio
# put things into the right order
y_hat = np.full_like(X, np.nan)
y_hat[sort_inds] = sorted_y_hat[1:-1]
# If extrapolate is 1to1, apply the offset between X and y to the
# tails of y_hat
if self.extrapolate == '1to1':
y_hat = self._extrapolate_1to1(X, y_hat)
return y_hat
class TrendAwareQuantileMappingRegressor(RegressorMixin, BaseEstimator):
"""Experimental meta estimator for performing trend-aware quantile mapping
Parameters
----------
qm_estimator : object, default=None
Regressor object such as ``QuantileMappingReressor``.
"""
def __init__(self, qm_estimator=None, trend_transformer=None):
self.qm_estimator = qm_estimator
if trend_transformer is None:
self.trend_transformer = LinearTrendTransformer()
def fit(self, X, y):
"""Fit the model.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
Returns
-------
self : object
"""
self._X_mean_fit = X.mean()
self._y_mean_fit = y.mean()
y_trend = copy.deepcopy(self.trend_transformer)
y_detrend = y_trend.fit_transform(y)
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
self.qm_estimator.fit(x_detrend, y_detrend)
return self
def predict(self, X):
"""Predict regression for target X.
Parameters
----------
X : array_like, shape [n_samples, n_features]
Samples.
Returns
-------
y : ndarray of shape (n_samples, )
Predicted data.
"""
X_trend = copy.deepcopy(self.trend_transformer)
x_detrend = X_trend.fit_transform(X)
y_hat = self.qm_estimator.predict(x_detrend).reshape(-1, 1)
# add the mean and trend back
# delta: X (predict) - X (fit) + y --> projected change + historical obs mean
delta = (X.mean().values - self._X_mean_fit.values) + self._y_mean_fit.values
# calculate the trendline
# TODO: think about how this would need to change if we're using a rolling average trend
trendline = X_trend.trendline(X)
trendline -= trendline.mean() # center at 0
# apply the trend and delta
y_hat += trendline + delta
return y_hat | 0.905947 | 0.528168 |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import check_is_fitted
from .utils import default_none_kwargs
class LinearTrendTransformer(TransformerMixin, BaseEstimator):
"""Transform features by removing linear trends.
Uses Ordinary least squares Linear Regression as implemented in
sklear.linear_model.LinearRegression.
Parameters
----------
**lr_kwargs
Keyword arguments to pass to sklearn.linear_model.LinearRegression
Attributes
----------
lr_model_ : sklearn.linear_model.LinearRegression
Linear Regression object.
"""
def __init__(self, lr_kwargs=None):
self.lr_kwargs = lr_kwargs
def fit(self, X, y=None):
"""Compute the linear trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
"""
X = self._validate_data(X)
kwargs = default_none_kwargs(self.lr_kwargs)
self.lr_model_ = LinearRegression(**kwargs)
self.lr_model_.fit(np.arange(len(X)).reshape(-1, 1), X)
return self
def transform(self, X):
"""Perform transformation by removing the trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be detrended.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X - self.trendline(X)
def inverse_transform(self, X):
"""Add the trend back to the data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be transformed back.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X + self.trendline(X)
def trendline(self, X):
"""helper function to calculate a linear trendline"""
X = self._validate_data(X)
return self.lr_model_.predict(np.arange(len(X)).reshape(-1, 1))
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_subset_invariance': 'because',
'check_methods_sample_order_invariance': 'temporal order matters',
}
} | scikit-downscale | /scikit-downscale-0.1.5.tar.gz/scikit-downscale-0.1.5/skdownscale/pointwise_models/trend.py | trend.py | import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import check_is_fitted
from .utils import default_none_kwargs
class LinearTrendTransformer(TransformerMixin, BaseEstimator):
"""Transform features by removing linear trends.
Uses Ordinary least squares Linear Regression as implemented in
sklear.linear_model.LinearRegression.
Parameters
----------
**lr_kwargs
Keyword arguments to pass to sklearn.linear_model.LinearRegression
Attributes
----------
lr_model_ : sklearn.linear_model.LinearRegression
Linear Regression object.
"""
def __init__(self, lr_kwargs=None):
self.lr_kwargs = lr_kwargs
def fit(self, X, y=None):
"""Compute the linear trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
"""
X = self._validate_data(X)
kwargs = default_none_kwargs(self.lr_kwargs)
self.lr_model_ = LinearRegression(**kwargs)
self.lr_model_.fit(np.arange(len(X)).reshape(-1, 1), X)
return self
def transform(self, X):
"""Perform transformation by removing the trend.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be detrended.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X - self.trendline(X)
def inverse_transform(self, X):
"""Add the trend back to the data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data that should be transformed back.
"""
# validate input data
check_is_fitted(self)
X = self._validate_data(X)
return X + self.trendline(X)
def trendline(self, X):
"""helper function to calculate a linear trendline"""
X = self._validate_data(X)
return self.lr_model_.predict(np.arange(len(X)).reshape(-1, 1))
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_subset_invariance': 'because',
'check_methods_sample_order_invariance': 'temporal order matters',
}
} | 0.928433 | 0.530236 |
__all__ = ['sedumi2sdpa']
from scipy.sparse import csc_matrix, csr_matrix
from scipy import sparse
from numpy import matrix
def sedumi2sdpa(filename, A, b, c, K):
"""Convert from Sedumi format to SDPA sparse format
Arguments:
filename: A string of output sdpa file name
A, b, c, K: Input in sedumi format, where A,b,c are in Scipy matrices format
"""
A = sparse.csc_matrix(A)
b = sparse.csc_matrix(b)
c = sparse.csc_matrix(c)
if c.get_shape()[1]>1: c=c.transpose()
if not sparse.isspmatrix_csc(A):
A = A.tocsc()
if not sparse.isspmatrix_csc(b):
b = b.tocsc()
if not sparse.isspmatrix_csc(c):
c = c.tocsc()
if not 'l' in K.keys():
K['l'] = 0
fp = open(filename, "w")
# write mDim
mDim = A.get_shape()[0]
fp.write(str(mDim) + "\n")
# write nBlock
if K['l']>0:
fp.write(str(1+len(K['s'])) + "\n")
else:
fp.write(str(len(K['s'])) + "\n")
# write blockStruct
if K['l']>0:
fp.write(str(-K['l']) + " ")
fp.write(" ".join([ str(x) for x in K['s'] ]) + "\n")
# write b
fp.write(" ".join([ str(-b[i,0]) for i in range(b.shape[0])]) + "\n")
# write C
len_s = sum([x ** 2 for x in K['s'] ])
matnum = 0
blocknum = 0
curind = 0
# block one for linear cone
if K['l']>0:
blocknum += 1
c_l = -c[0:K['l'], :]
list_row = [str(x + 1) for x in list(c_l.nonzero()[0])]
list_val = [x for x in list(c_l.data)]
length = len(list_row)
for i in range(length):
fp.write(" ".join((str(matnum), str(blocknum), list_row[i],
list_row[i], list_val[i])) + "\n")
curind = curind + K['l']
if len_s > 0:
offset = 0
c_s = -c[curind :(curind + len_s), :]
for blockSize in K['s']:
blocknum += 1
list_row = list(c_s[offset:(offset + blockSize * blockSize),
:].nonzero()[0])
list_val = [x for x in
list(c_s[offset:(offset + blockSize * blockSize),
:].data)]
length = len(list_row)
for i in range(length):
setCol_row = (list_row[i] // blockSize) + 1
setCol_col = (list_row[i] % blockSize) + 1
if setCol_row <= setCol_col:
fp.write(" ".join(("0", str(blocknum), str(setCol_row),
str(setCol_col), str(list_val[i]))) + "\n")
offset += blockSize * blockSize
# write A
blocknum = 0
curind = 0
if K['l'] > 0:
blocknum += 1
A_l = -A[:, 0:K['l']]
list_row = [str(x + 1) for x in list(A_l.nonzero()[0])]
list_col = [str(x + 1) for x in list(A_l.nonzero()[1])]
list_val = [x for x in list(A_l.data)]
length = len(list_row)
for i in range(length):
fp.write(" ".join((list_row[i], str(blocknum), list_col[i],
list_col[i], list_val[i])) + "\n")
curind = curind + K['l']
if len_s > 0:
offset = 0
A_s = -A[:, curind :(curind + len_s)]
for blockSize in K['s']:
blocknum += 1
list_row = [str(x + 1) for x in
list(A_s[:, offset:(offset + blockSize * blockSize)].
nonzero()[0])]
list_col = list(A_s[:, offset:(offset + blockSize * blockSize)].
nonzero()[1])
list_val = [x for x in
list(A_s[:, offset:(offset + blockSize * blockSize)].
data)]
length = len(list_row)
for i in range(length):
setCol_row = (list_col[i] // blockSize) + 1
setCol_col = (list_col[i] % blockSize) + 1
if setCol_row <= setCol_col:
fp.write(" ".join((list_row[i], str(blocknum),
str(setCol_row), str(setCol_col),
str(list_val[i]))) + "\n")
offset += blockSize * blockSize
fp.close()
return | scikit-dsdp | /scikit-dsdp-0.0.1.tar.gz/scikit-dsdp-0.0.1/dsdp/python/convert.py | convert.py | __all__ = ['sedumi2sdpa']
from scipy.sparse import csc_matrix, csr_matrix
from scipy import sparse
from numpy import matrix
def sedumi2sdpa(filename, A, b, c, K):
"""Convert from Sedumi format to SDPA sparse format
Arguments:
filename: A string of output sdpa file name
A, b, c, K: Input in sedumi format, where A,b,c are in Scipy matrices format
"""
A = sparse.csc_matrix(A)
b = sparse.csc_matrix(b)
c = sparse.csc_matrix(c)
if c.get_shape()[1]>1: c=c.transpose()
if not sparse.isspmatrix_csc(A):
A = A.tocsc()
if not sparse.isspmatrix_csc(b):
b = b.tocsc()
if not sparse.isspmatrix_csc(c):
c = c.tocsc()
if not 'l' in K.keys():
K['l'] = 0
fp = open(filename, "w")
# write mDim
mDim = A.get_shape()[0]
fp.write(str(mDim) + "\n")
# write nBlock
if K['l']>0:
fp.write(str(1+len(K['s'])) + "\n")
else:
fp.write(str(len(K['s'])) + "\n")
# write blockStruct
if K['l']>0:
fp.write(str(-K['l']) + " ")
fp.write(" ".join([ str(x) for x in K['s'] ]) + "\n")
# write b
fp.write(" ".join([ str(-b[i,0]) for i in range(b.shape[0])]) + "\n")
# write C
len_s = sum([x ** 2 for x in K['s'] ])
matnum = 0
blocknum = 0
curind = 0
# block one for linear cone
if K['l']>0:
blocknum += 1
c_l = -c[0:K['l'], :]
list_row = [str(x + 1) for x in list(c_l.nonzero()[0])]
list_val = [x for x in list(c_l.data)]
length = len(list_row)
for i in range(length):
fp.write(" ".join((str(matnum), str(blocknum), list_row[i],
list_row[i], list_val[i])) + "\n")
curind = curind + K['l']
if len_s > 0:
offset = 0
c_s = -c[curind :(curind + len_s), :]
for blockSize in K['s']:
blocknum += 1
list_row = list(c_s[offset:(offset + blockSize * blockSize),
:].nonzero()[0])
list_val = [x for x in
list(c_s[offset:(offset + blockSize * blockSize),
:].data)]
length = len(list_row)
for i in range(length):
setCol_row = (list_row[i] // blockSize) + 1
setCol_col = (list_row[i] % blockSize) + 1
if setCol_row <= setCol_col:
fp.write(" ".join(("0", str(blocknum), str(setCol_row),
str(setCol_col), str(list_val[i]))) + "\n")
offset += blockSize * blockSize
# write A
blocknum = 0
curind = 0
if K['l'] > 0:
blocknum += 1
A_l = -A[:, 0:K['l']]
list_row = [str(x + 1) for x in list(A_l.nonzero()[0])]
list_col = [str(x + 1) for x in list(A_l.nonzero()[1])]
list_val = [x for x in list(A_l.data)]
length = len(list_row)
for i in range(length):
fp.write(" ".join((list_row[i], str(blocknum), list_col[i],
list_col[i], list_val[i])) + "\n")
curind = curind + K['l']
if len_s > 0:
offset = 0
A_s = -A[:, curind :(curind + len_s)]
for blockSize in K['s']:
blocknum += 1
list_row = [str(x + 1) for x in
list(A_s[:, offset:(offset + blockSize * blockSize)].
nonzero()[0])]
list_col = list(A_s[:, offset:(offset + blockSize * blockSize)].
nonzero()[1])
list_val = [x for x in
list(A_s[:, offset:(offset + blockSize * blockSize)].
data)]
length = len(list_row)
for i in range(length):
setCol_row = (list_col[i] // blockSize) + 1
setCol_col = (list_col[i] % blockSize) + 1
if setCol_row <= setCol_col:
fp.write(" ".join((list_row[i], str(blocknum),
str(setCol_row), str(setCol_col),
str(list_val[i]))) + "\n")
offset += blockSize * blockSize
fp.close()
return | 0.379953 | 0.232768 |
__all__ = ['dsdp', 'dsdp_readsdpa']
from pydsdp.convert import *
from numpy import *
from pydsdp.pydsdp5 import pyreadsdpa
from os import remove, path
from tempfile import NamedTemporaryFile
def dsdp(A, b, c, K, OPTIONS={}):
tempdataF = NamedTemporaryFile(delete=False)
data_filename=tempdataF.name
tempdataF.close()
sedumi2sdpa(data_filename, A, b, c, K)
# tempoptionsF = None
options_filename = ""
if len(OPTIONS)>0:
tempoptionsF = NamedTemporaryFile(delete=False)
options_filename = tempoptionsF.name
tempoptionsF.close()
write_options_file(options_filename, OPTIONS)
# solve the problem by dsdp5
[y,X,STATS] = dsdp_readsdpa(data_filename,options_filename)
if path.isfile(data_filename):
remove(data_filename)
if path.isfile(options_filename):
remove(options_filename)
if not 'l' in K: K['l']=0
if not 's' in K: K['s']=()
Xout = []
if K['l']>0: Xout.append(X[0:K['l']])
index = K['l'];
if 's' in K:
for d in K['s']:
Xout.append(matrix(reshape(array(X[index:index+d*d]), [d,d])))
index = index+d*d
if (STATS[0]==1):
STATS[0] = "PDFeasible"
elif (STATS[0]==3):
STATS[0] = "Unbounded"
elif (STATS[0]==4):
STATS[0] = "InFeasible"
STATSout = {}
# Assign the name to STATS output, should be consistent with Rreadsdpa.c
if (len(STATS)>3):
STATSout=dict( zip(["stype", "dobj","pobj","r","mu","pstep","dstep","pnorm"],STATS) )
else:
STATSout=dict( zip(["stype", "dobj","pobj"], STATS) )
return dict(zip(['y', 'X','STATS'],[y,Xout,STATSout]))
def dsdp_readsdpa(data_filename,options_filename):
# solve the problem by pyreadsdpa
# result = [y,X,STATS]
result = []
if ( path.isfile(data_filename) and (options_filename=="" or path.isfile(options_filename)) ):
result = pyreadsdpa(data_filename,options_filename)
return result
def write_options_file(filename, OPTIONS):
# write OPTIONS to file
with open(filename, "a") as f:
for option in OPTIONS.keys():
f.write("-" + option + " " +str(OPTIONS[option]) + "\n")
f.close() | scikit-dsdp | /scikit-dsdp-0.0.1.tar.gz/scikit-dsdp-0.0.1/dsdp/python/dsdp5.py | dsdp5.py |
__all__ = ['dsdp', 'dsdp_readsdpa']
from pydsdp.convert import *
from numpy import *
from pydsdp.pydsdp5 import pyreadsdpa
from os import remove, path
from tempfile import NamedTemporaryFile
def dsdp(A, b, c, K, OPTIONS={}):
tempdataF = NamedTemporaryFile(delete=False)
data_filename=tempdataF.name
tempdataF.close()
sedumi2sdpa(data_filename, A, b, c, K)
# tempoptionsF = None
options_filename = ""
if len(OPTIONS)>0:
tempoptionsF = NamedTemporaryFile(delete=False)
options_filename = tempoptionsF.name
tempoptionsF.close()
write_options_file(options_filename, OPTIONS)
# solve the problem by dsdp5
[y,X,STATS] = dsdp_readsdpa(data_filename,options_filename)
if path.isfile(data_filename):
remove(data_filename)
if path.isfile(options_filename):
remove(options_filename)
if not 'l' in K: K['l']=0
if not 's' in K: K['s']=()
Xout = []
if K['l']>0: Xout.append(X[0:K['l']])
index = K['l'];
if 's' in K:
for d in K['s']:
Xout.append(matrix(reshape(array(X[index:index+d*d]), [d,d])))
index = index+d*d
if (STATS[0]==1):
STATS[0] = "PDFeasible"
elif (STATS[0]==3):
STATS[0] = "Unbounded"
elif (STATS[0]==4):
STATS[0] = "InFeasible"
STATSout = {}
# Assign the name to STATS output, should be consistent with Rreadsdpa.c
if (len(STATS)>3):
STATSout=dict( zip(["stype", "dobj","pobj","r","mu","pstep","dstep","pnorm"],STATS) )
else:
STATSout=dict( zip(["stype", "dobj","pobj"], STATS) )
return dict(zip(['y', 'X','STATS'],[y,Xout,STATSout]))
def dsdp_readsdpa(data_filename,options_filename):
# solve the problem by pyreadsdpa
# result = [y,X,STATS]
result = []
if ( path.isfile(data_filename) and (options_filename=="" or path.isfile(options_filename)) ):
result = pyreadsdpa(data_filename,options_filename)
return result
def write_options_file(filename, OPTIONS):
# write OPTIONS to file
with open(filename, "a") as f:
for option in OPTIONS.keys():
f.write("-" + option + " " +str(OPTIONS[option]) + "\n")
f.close() | 0.219588 | 0.151624 |
BSD 2-Clause License
Copyright (c) 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/LICENSE.md | LICENSE.md | BSD 2-Clause License
Copyright (c) 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0.642208 | 0.08733 |
![Logo](logo.png)
# scikit-dsp-comm
[![pypi](https://img.shields.io/pypi/v/scikit-dsp-comm.svg)](https://pypi.python.org/pypi/scikit-dsp-comm)
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/scikit-dsp-comm/badges/version.svg)](https://anaconda.org/conda-forge/scikit-dsp-comm)
[![Docs](https://readthedocs.org/projects/scikit-dsp-comm/badge/?version=latest)](http://scikit-dsp-comm.readthedocs.io/en/latest/?badge=latest)
## Background
The origin of this package comes from the writing the book Signals and Systems for Dummies, published by Wiley in 2013. The original module for this book is named `ssd.py`. In `scikit-dsp-comm` this module is renamed to `sigsys.py` to better reflect the fact that signal processing and communications theory is founded in signals and systems, a traditional subject in electrical engineering curricula.
## Package High Level Overview
This package is a collection of functions and classes to support signal processing and communications theory teaching and research. The foundation for this package is `scipy.signal`. The code in particular currently requires Python `>=3.5x`.
**There are presently ten modules that make up scikit-dsp-comm:**
1. `sigsys.py` for basic signals and systems functions both continuous-time and discrete-time, including graphical display tools such as pole-zero plots, up-sampling and down-sampling.
2. `digitalcomm.py` for digital modulation theory components, including asynchronous resampling and variable time delay functions, both useful in advanced modem testing.
3. `synchronization.py` which contains phase-locked loop simulation functions and functions for carrier and phase synchronization of digital communications waveforms.
4. `fec_conv.py` for the generation rate one-half and one-third convolutional codes and soft decision Viterbi algorithm decoding, including soft and hard decisions, trellis and trellis-traceback display functions, and puncturing.
5. `fir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using the Kaiser window and equal-ripple designs, also includes a list plotting function for easily comparing magnitude, phase, and group delay frequency responses.
6. `iir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using scipy.signal Butterworth, Chebyshev I and II, and elliptical designs, including the use of the cascade of second-order sections (SOS) topology from scipy.signal, also includes a list plotting function for easily comparing of magnitude, phase, and group delay frequency responses.
7. `multirate.py` that encapsulate digital filters into objects for filtering, interpolation by an integer factor, and decimation by an integer factor.
8. `coeff2header.py` write `C/C++` header files for FIR and IIR filters implemented in `C/C++`, using the cascade of second-order section representation for the IIR case. This last module find use in real-time signal processing on embedded systems, but can be used for simulation models in `C/C++`.
Presently the collection of modules contains about 125 functions and classes. The authors/maintainers are working to get more detailed documentation in place.
## Documentation
Documentation is now housed on `readthedocs` which you can get to by clicking the docs badge near the top of this `README`. Example notebooks can be viewed on [GitHub pages](https://mwickert.github.io/scikit-dsp-comm/). In time more notebook postings will be extracted from [Dr. Wickert's Info Center](http://www.eas.uccs.edu/~mwickert/).
## Getting Set-up on Your System
The best way to use this package is to clone this repository and then install it.
```bash
git clone https://github.com/mwickert/scikit-dsp-comm.git
```
There are package dependencies for some modules that you may want to avoid. Specifically these are whenever hardware interfacing is involved. Specific hardware and software configuration details are discussed in [wiki pages](https://github.com/mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm/wiki).
For Windows users `pip` install takes care of almost everything. I assume below you have Python on your path, so for example with [Anaconda](https://www.anaconda.com/download/#macos), I suggest letting the installer set these paths up for you.
### Editable Install with Dependencies
With the terminal in the root directory of the cloned repo perform an editable `pip` install using
```bash
pip install -e .
```
### Why an Editable Install?
The advantage of the editable `pip` install is that it is very easy to keep `scikit-dsp-comm ` up to date. If you know that updates have been pushed to the master branch, you simply go to your local repo folder and
```bash
git pull origin master
```
This will update you local repo and automatically update the Python install without the need to run `pip` again. **Note**: If you have any Python kernels running, such as a Jupyter Notebook, you will need to restart the kernel to insure any module changes get reloaded.
| scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/README.md | README.md | git clone https://github.com/mwickert/scikit-dsp-comm.git
pip install -e .
git pull origin master | 0.632616 | 0.993661 |
![Logo](logo.png)
# scikit-dsp-comm
[![pypi](https://img.shields.io/pypi/v/scikit-dsp-comm.svg)](https://pypi.python.org/pypi/scikit-dsp-comm)
[![Anaconda-Server Badge](https://anaconda.org/conda-forge/scikit-dsp-comm/badges/version.svg)](https://anaconda.org/conda-forge/scikit-dsp-comm)
[![Docs](https://readthedocs.org/projects/scikit-dsp-comm/badge/?version=latest)](http://scikit-dsp-comm.readthedocs.io/en/latest/?badge=latest)
## Background
The origin of this package comes from the writing the book Signals and Systems for Dummies, published by Wiley in 2013. The original module for this book is named `ssd.py`. In `scikit-dsp-comm` this module is renamed to `sigsys.py` to better reflect the fact that signal processing and communications theory is founded in signals and systems, a traditional subject in electrical engineering curricula.
## Package High Level Overview
This package is a collection of functions and classes to support signal processing and communications theory teaching and research. The foundation for this package is `scipy.signal`. The code in particular currently requires Python `>=3.5x`.
**There are presently ten modules that make up scikit-dsp-comm:**
1. `sigsys.py` for basic signals and systems functions both continuous-time and discrete-time, including graphical display tools such as pole-zero plots, up-sampling and down-sampling.
2. `digitalcomm.py` for digital modulation theory components, including asynchronous resampling and variable time delay functions, both useful in advanced modem testing.
3. `synchronization.py` which contains phase-locked loop simulation functions and functions for carrier and phase synchronization of digital communications waveforms.
4. `fec_conv.py` for the generation rate one-half and one-third convolutional codes and soft decision Viterbi algorithm decoding, including soft and hard decisions, trellis and trellis-traceback display functions, and puncturing.
5. `fir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using the Kaiser window and equal-ripple designs, also includes a list plotting function for easily comparing magnitude, phase, and group delay frequency responses.
6. `iir_design_helper.py` which for easy design of lowpass, highpass, bandpass, and bandstop filters using scipy.signal Butterworth, Chebyshev I and II, and elliptical designs, including the use of the cascade of second-order sections (SOS) topology from scipy.signal, also includes a list plotting function for easily comparing of magnitude, phase, and group delay frequency responses.
7. `multirate.py` that encapsulate digital filters into objects for filtering, interpolation by an integer factor, and decimation by an integer factor.
8. `coeff2header.py` write `C/C++` header files for FIR and IIR filters implemented in `C/C++`, using the cascade of second-order section representation for the IIR case. This last module find use in real-time signal processing on embedded systems, but can be used for simulation models in `C/C++`.
Presently the collection of modules contains about 125 functions and classes. The authors/maintainers are working to get more detailed documentation in place.
## Documentation
Documentation is now housed on `readthedocs` which you can get to by clicking the docs badge near the top of this `README`. Example notebooks can be viewed on [GitHub pages](https://mwickert.github.io/scikit-dsp-comm/). In time more notebook postings will be extracted from [Dr. Wickert's Info Center](http://www.eas.uccs.edu/~mwickert/).
## Getting Set-up on Your System
The best way to use this package is to clone this repository and then install it.
```bash
git clone https://github.com/mwickert/scikit-dsp-comm.git
```
There are package dependencies for some modules that you may want to avoid. Specifically these are whenever hardware interfacing is involved. Specific hardware and software configuration details are discussed in [wiki pages](https://github.com/mwickert/SP-Comm-Tutorial-using-scikit-dsp-comm/wiki).
For Windows users `pip` install takes care of almost everything. I assume below you have Python on your path, so for example with [Anaconda](https://www.anaconda.com/download/#macos), I suggest letting the installer set these paths up for you.
### Editable Install with Dependencies
With the terminal in the root directory of the cloned repo perform an editable `pip` install using
```bash
pip install -e .
```
### Why an Editable Install?
The advantage of the editable `pip` install is that it is very easy to keep `scikit-dsp-comm ` up to date. If you know that updates have been pushed to the master branch, you simply go to your local repo folder and
```bash
git pull origin master
```
This will update you local repo and automatically update the Python install without the need to run `pip` again. **Note**: If you have any Python kernels running, such as a Jupyter Notebook, you will need to restart the kernel to insure any module changes get reloaded. | scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/README | README | git clone https://github.com/mwickert/scikit-dsp-comm.git
pip install -e .
git pull origin master | 0.632616 | 0.993574 |
import numpy as np
import scipy.special as special
from .digitalcom import q_fctn
from .fec_conv import binary
from logging import getLogger
log = getLogger(__name__)
class FECHamming(object):
"""
Class responsible for creating hamming block codes and then
encoding and decoding. Methods provided include hamm_gen,
hamm_encoder(), hamm_decoder().
Parameters
----------
j: Hamming code order (in terms of parity bits) where n = 2^j-1,
k = n-j, and the rate is k/n.
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,j):
self.j = j
self.G, self.H, self.R, self.n, self.k = self.hamm_gen(self.j)
log.info('(%d,%d) hamming code object' %(self.n,self.k))
def hamm_gen(self,j):
"""
Generates parity check matrix (H) and generator
matrix (G).
Parameters
----------
j: Number of Hamming code parity bits with n = 2^j-1 and k = n-j
returns
-------
G: Systematic generator matrix with left-side identity matrix
H: Systematic parity-check matrix with right-side identity matrix
R: k x k identity matrix
n: number of total bits/block
k: number of source bits/block
Andrew Smit November 2018
"""
if(j < 3):
raise ValueError('j must be > 2')
# calculate codeword length
n = 2**j-1
# calculate source bit length
k = n-j
# Allocate memory for Matrices
G = np.zeros((k,n),dtype=int)
H = np.zeros((j,n),dtype=int)
P = np.zeros((j,k),dtype=int)
R = np.zeros((k,n),dtype=int)
# Encode parity-check matrix columns with binary 1-n
for i in range(1,n+1):
b = list(binary(i,j))
for m in range(0,len(b)):
b[m] = int(b[m])
H[:,i-1] = np.array(b)
# Reformat H to be systematic
H1 = np.zeros((1,j),dtype=int)
H2 = np.zeros((1,j),dtype=int)
for i in range(0,j):
idx1 = 2**i-1
idx2 = n-i-1
H1[0,:] = H[:,idx1]
H2[0,:] = H[:,idx2]
H[:,idx1] = H2
H[:,idx2] = H1
# Get parity matrix from H
P = H[:,:k]
# Use P to calcuate generator matrix P
G[:,:k] = np.diag(np.ones(k))
G[:,k:] = P.T
# Get k x k identity matrix
R[:,:k] = np.diag(np.ones(k))
return G, H, R, n, k
def hamm_encoder(self,x):
"""
Encodes input bit array x using hamming block code.
parameters
----------
x: array of source bits to be encoded by block encoder.
returns
-------
codewords: array of code words generated by generator
matrix G and input x.
Andrew Smit November 2018
"""
if(np.dtype(x[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.k)
N_symbols = int(len(x)/self.k)
codewords = np.zeros(N_symbols*self.n)
x = np.reshape(x,(1,len(x)))
for i in range(0,N_symbols):
codewords[i*self.n:(i+1)*self.n] = np.matmul(x[:,i*self.k:(i+1)*self.k],self.G)%2
return codewords
def hamm_decoder(self,codewords):
"""
Decode hamming encoded codewords. Make sure code words are of
the appropriate length for the object.
parameters
---------
codewords: bit array of codewords
returns
-------
decoded_bits: bit array of decoded source bits
Andrew Smit November 2018
"""
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.n)
# Calculate the number of symbols (codewords) in the input array
N_symbols = int(len(codewords)/self.n)
# Allocate memory for decoded sourcebits
decoded_bits = np.zeros(N_symbols*self.k)
# Loop through codewords to decode one block at a time
codewords = np.reshape(codewords,(1,len(codewords)))
for i in range(0,N_symbols):
# find the syndrome of each codeword
S = np.matmul(self.H,codewords[:,i*self.n:(i+1)*self.n].T) % 2
# convert binary syndrome to an integer
bits = ''
for m in range(0,len(S)):
bit = str(int(S[m,:]))
bits = bits + bit
error_pos = int(bits,2)
h_pos = self.H[:,error_pos-1]
# Use the syndrome to find the position of an error within the block
bits = ''
for m in range(0,len(S)):
bit = str(int(h_pos[m]))
bits = bits + bit
decoded_pos = int(bits,2)-1
# correct error if present
if(error_pos):
codewords[:,i*self.n+decoded_pos] = (codewords[:,i*self.n+decoded_pos] + 1) % 2
# Decode the corrected codeword
decoded_bits[i*self.k:(i+1)*self.k] = np.matmul(self.R,codewords[:,i*self.n:(i+1)*self.n].T).T % 2
return decoded_bits.astype(int)
class FECCyclic(object):
"""
Class responsible for creating cyclic block codes and then
encoding and decoding. Methods provided include
cyclic_encoder(), cyclic_decoder().
Parameters
----------
G: Generator polynomial used to create cyclic code object
Suggested G values (from Ziemer and Peterson pg 430):
j G
------------
3 G = '1011'
4 G = '10011'
5 G = '101001'
6 G = '1100001'
7 G = '10100001'
8 G = '101110001'
9 G = '1000100001'
10 G = '10010000001'
11 G = '101000000001'
12 G = '1100101000001'
13 G = '11011000000001'
14 G = '110000100010001'
15 G = '1100000000000001'
16 G = '11010000000010001'
17 G = '100100000000000001'
18 G = '1000000100000000001'
19 G = '11100100000000000001'
20 G = '100100000000000000001'
21 G = '1010000000000000000001'
22 G = '11000000000000000000001'
23 G = '100001000000000000000001'
24 G = '1110000100000000000000001'
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,G='1011'):
self.j = len(G)-1
self.n = 2**self.j - 1
self.k =self.n-self.j
self.G = G
if(G[0] == '0' or G[len(G)-1] == '0'):
raise ValueError('Error: Invalid generator polynomial')
log.info('(%d,%d) cyclic code object' %(self.n,self.k))
def cyclic_encoder(self,x,G='1011'):
"""
Encodes input bit array x using cyclic block code.
parameters
----------
x: vector of source bits to be encoded by block encoder. Numpy array
of integers expected.
returns
-------
codewords: vector of code words generated from input vector
Andrew Smit November 2018
"""
# Check block length
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Incomplete block in input array. Make sure input array length is a multiple of %d' %self.k)
# Check data type of input vector
if(np.dtype(x[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(x) / self.k)
codewords = np.zeros((Num_blocks,self.n),dtype=int)
x = np.reshape(x,(Num_blocks,self.k))
#print(x)
for p in range(Num_blocks):
S = np.zeros(len(self.G))
codeword = np.zeros(self.n)
current_block = x[p,:]
#print(current_block)
for i in range(0,self.n):
if(i < self.k):
S[0] = current_block[i]
S0temp = 0
for m in range(0,len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
#print(j,S0temp,S[j])
S0temp = S0temp % 2
S = np.roll(S,1)
codeword[i] = current_block[i]
S[1] = S0temp
else:
out = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
out = out + S[m]
codeword[i] = out % 2
S = np.roll(S,1)
S[1] = 0
codewords[p,:] = codeword
#print(codeword)
codewords = np.reshape(codewords,np.size(codewords))
return codewords.astype(int)
def cyclic_decoder(self,codewords):
"""
Decodes a vector of cyclic coded codewords.
parameters
----------
codewords: vector of codewords to be decoded. Numpy array of integers expected.
returns
-------
decoded_blocks: vector of decoded bits
Andrew Smit November 2018
"""
# Check block length
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Incomplete coded block in input array. Make sure coded input array length is a multiple of %d' %self.n)
# Check input data type
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(codewords) / self.n)
decoded_blocks = np.zeros((Num_blocks,self.k),dtype=int)
codewords = np.reshape(codewords,(Num_blocks,self.n))
for p in range(Num_blocks):
codeword = codewords[p,:]
Ureg = np.zeros(self.n)
S = np.zeros(len(self.G))
decoded_bits = np.zeros(self.k)
output = np.zeros(self.n)
for i in range(0,self.n): # Switch A closed B open
Ureg = np.roll(Ureg,1)
Ureg[0] = codeword[i]
S0temp = 0
S[0] = codeword[i]
for m in range(len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
S0 = S
S = np.roll(S,1)
S[1] = S0temp % 2
for i in range(0,self.n): # Switch B closed A open
Stemp = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
Stemp = Stemp + S[m]
S = np.roll(S,1)
S[1] = Stemp % 2
and_out = 1
for m in range(1,len(self.G)):
if(m > 1):
and_out = and_out and ((S[m]+1) % 2)
else:
and_out = and_out and S[m]
output[i] = (and_out + Ureg[len(Ureg)-1]) % 2
Ureg = np.roll(Ureg,1)
Ureg[0] = 0
decoded_bits = output[0:self.k].astype(int)
decoded_blocks[p,:] = decoded_bits
return np.reshape(decoded_blocks,np.size(decoded_blocks)).astype(int)
def ser2ber(q,n,d,t,ps):
"""
Converts symbol error rate to bit error rate. Taken from Ziemer and
Tranter page 650. Necessary when comparing different types of block codes.
parameters
----------
q: size of the code alphabet for given modulation type (BPSK=2)
n: number of channel bits
d: distance (2e+1) where e is the number of correctable errors per code word.
For hamming codes, e=1, so d=3.
t: number of correctable errors per code word
ps: symbol error probability vector
returns
-------
ber: bit error rate
"""
lnps = len(ps) # len of error vector
ber = np.zeros(lnps) # inialize output vector
for k in range(0,lnps): # iterate error vector
ser = ps[k] # channel symbol error rate
sum1 = 0 # initialize sums
sum2 = 0
for i in range(t+1,d+1):
term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i)
sum1 = sum1 + term
for i in range(d+1,n+1):
term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i))
sum2 = sum2+term
ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2)
return ber
def block_single_error_Pb_bound(j,SNRdB,coded=True,M=2):
"""
Finds the bit error probability bounds according to Ziemer and Tranter
page 656.
parameters:
-----------
j: number of parity bits used in single error correction block code
SNRdB: Eb/N0 values in dB
coded: Select single error correction code (True) or uncoded (False)
M: modulation order
returns:
--------
Pb: bit error probability bound
"""
Pb = np.zeros_like(SNRdB)
Ps = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
n = 2**j-1
k = n-j
for i,SNRn in enumerate(SNR):
if coded: # compute Hamming code Ps
if M == 2:
Ps[i] = q_fctn(np.sqrt(k * 2. * SNRn / n))
else:
Ps[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))/k
else: # Compute Uncoded Pb
if M == 2:
Pb[i] = q_fctn(np.sqrt(2. * SNRn))
else:
Pb[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))
# Convert symbol error probability to bit error probability
if coded:
Pb = ser2ber(M,n,3,1,Ps)
return Pb
# .. ._.. .._ # | scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/fec_block.py | fec_block.py | import numpy as np
import scipy.special as special
from .digitalcom import q_fctn
from .fec_conv import binary
from logging import getLogger
log = getLogger(__name__)
class FECHamming(object):
"""
Class responsible for creating hamming block codes and then
encoding and decoding. Methods provided include hamm_gen,
hamm_encoder(), hamm_decoder().
Parameters
----------
j: Hamming code order (in terms of parity bits) where n = 2^j-1,
k = n-j, and the rate is k/n.
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,j):
self.j = j
self.G, self.H, self.R, self.n, self.k = self.hamm_gen(self.j)
log.info('(%d,%d) hamming code object' %(self.n,self.k))
def hamm_gen(self,j):
"""
Generates parity check matrix (H) and generator
matrix (G).
Parameters
----------
j: Number of Hamming code parity bits with n = 2^j-1 and k = n-j
returns
-------
G: Systematic generator matrix with left-side identity matrix
H: Systematic parity-check matrix with right-side identity matrix
R: k x k identity matrix
n: number of total bits/block
k: number of source bits/block
Andrew Smit November 2018
"""
if(j < 3):
raise ValueError('j must be > 2')
# calculate codeword length
n = 2**j-1
# calculate source bit length
k = n-j
# Allocate memory for Matrices
G = np.zeros((k,n),dtype=int)
H = np.zeros((j,n),dtype=int)
P = np.zeros((j,k),dtype=int)
R = np.zeros((k,n),dtype=int)
# Encode parity-check matrix columns with binary 1-n
for i in range(1,n+1):
b = list(binary(i,j))
for m in range(0,len(b)):
b[m] = int(b[m])
H[:,i-1] = np.array(b)
# Reformat H to be systematic
H1 = np.zeros((1,j),dtype=int)
H2 = np.zeros((1,j),dtype=int)
for i in range(0,j):
idx1 = 2**i-1
idx2 = n-i-1
H1[0,:] = H[:,idx1]
H2[0,:] = H[:,idx2]
H[:,idx1] = H2
H[:,idx2] = H1
# Get parity matrix from H
P = H[:,:k]
# Use P to calcuate generator matrix P
G[:,:k] = np.diag(np.ones(k))
G[:,k:] = P.T
# Get k x k identity matrix
R[:,:k] = np.diag(np.ones(k))
return G, H, R, n, k
def hamm_encoder(self,x):
"""
Encodes input bit array x using hamming block code.
parameters
----------
x: array of source bits to be encoded by block encoder.
returns
-------
codewords: array of code words generated by generator
matrix G and input x.
Andrew Smit November 2018
"""
if(np.dtype(x[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.k)
N_symbols = int(len(x)/self.k)
codewords = np.zeros(N_symbols*self.n)
x = np.reshape(x,(1,len(x)))
for i in range(0,N_symbols):
codewords[i*self.n:(i+1)*self.n] = np.matmul(x[:,i*self.k:(i+1)*self.k],self.G)%2
return codewords
def hamm_decoder(self,codewords):
"""
Decode hamming encoded codewords. Make sure code words are of
the appropriate length for the object.
parameters
---------
codewords: bit array of codewords
returns
-------
decoded_bits: bit array of decoded source bits
Andrew Smit November 2018
"""
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.n)
# Calculate the number of symbols (codewords) in the input array
N_symbols = int(len(codewords)/self.n)
# Allocate memory for decoded sourcebits
decoded_bits = np.zeros(N_symbols*self.k)
# Loop through codewords to decode one block at a time
codewords = np.reshape(codewords,(1,len(codewords)))
for i in range(0,N_symbols):
# find the syndrome of each codeword
S = np.matmul(self.H,codewords[:,i*self.n:(i+1)*self.n].T) % 2
# convert binary syndrome to an integer
bits = ''
for m in range(0,len(S)):
bit = str(int(S[m,:]))
bits = bits + bit
error_pos = int(bits,2)
h_pos = self.H[:,error_pos-1]
# Use the syndrome to find the position of an error within the block
bits = ''
for m in range(0,len(S)):
bit = str(int(h_pos[m]))
bits = bits + bit
decoded_pos = int(bits,2)-1
# correct error if present
if(error_pos):
codewords[:,i*self.n+decoded_pos] = (codewords[:,i*self.n+decoded_pos] + 1) % 2
# Decode the corrected codeword
decoded_bits[i*self.k:(i+1)*self.k] = np.matmul(self.R,codewords[:,i*self.n:(i+1)*self.n].T).T % 2
return decoded_bits.astype(int)
class FECCyclic(object):
"""
Class responsible for creating cyclic block codes and then
encoding and decoding. Methods provided include
cyclic_encoder(), cyclic_decoder().
Parameters
----------
G: Generator polynomial used to create cyclic code object
Suggested G values (from Ziemer and Peterson pg 430):
j G
------------
3 G = '1011'
4 G = '10011'
5 G = '101001'
6 G = '1100001'
7 G = '10100001'
8 G = '101110001'
9 G = '1000100001'
10 G = '10010000001'
11 G = '101000000001'
12 G = '1100101000001'
13 G = '11011000000001'
14 G = '110000100010001'
15 G = '1100000000000001'
16 G = '11010000000010001'
17 G = '100100000000000001'
18 G = '1000000100000000001'
19 G = '11100100000000000001'
20 G = '100100000000000000001'
21 G = '1010000000000000000001'
22 G = '11000000000000000000001'
23 G = '100001000000000000000001'
24 G = '1110000100000000000000001'
Returns
-------
Examples
--------
Andrew Smit November 2018
"""
def __init__(self,G='1011'):
self.j = len(G)-1
self.n = 2**self.j - 1
self.k =self.n-self.j
self.G = G
if(G[0] == '0' or G[len(G)-1] == '0'):
raise ValueError('Error: Invalid generator polynomial')
log.info('(%d,%d) cyclic code object' %(self.n,self.k))
def cyclic_encoder(self,x,G='1011'):
"""
Encodes input bit array x using cyclic block code.
parameters
----------
x: vector of source bits to be encoded by block encoder. Numpy array
of integers expected.
returns
-------
codewords: vector of code words generated from input vector
Andrew Smit November 2018
"""
# Check block length
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Incomplete block in input array. Make sure input array length is a multiple of %d' %self.k)
# Check data type of input vector
if(np.dtype(x[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(x) / self.k)
codewords = np.zeros((Num_blocks,self.n),dtype=int)
x = np.reshape(x,(Num_blocks,self.k))
#print(x)
for p in range(Num_blocks):
S = np.zeros(len(self.G))
codeword = np.zeros(self.n)
current_block = x[p,:]
#print(current_block)
for i in range(0,self.n):
if(i < self.k):
S[0] = current_block[i]
S0temp = 0
for m in range(0,len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
#print(j,S0temp,S[j])
S0temp = S0temp % 2
S = np.roll(S,1)
codeword[i] = current_block[i]
S[1] = S0temp
else:
out = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
out = out + S[m]
codeword[i] = out % 2
S = np.roll(S,1)
S[1] = 0
codewords[p,:] = codeword
#print(codeword)
codewords = np.reshape(codewords,np.size(codewords))
return codewords.astype(int)
def cyclic_decoder(self,codewords):
"""
Decodes a vector of cyclic coded codewords.
parameters
----------
codewords: vector of codewords to be decoded. Numpy array of integers expected.
returns
-------
decoded_blocks: vector of decoded bits
Andrew Smit November 2018
"""
# Check block length
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Incomplete coded block in input array. Make sure coded input array length is a multiple of %d' %self.n)
# Check input data type
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(codewords) / self.n)
decoded_blocks = np.zeros((Num_blocks,self.k),dtype=int)
codewords = np.reshape(codewords,(Num_blocks,self.n))
for p in range(Num_blocks):
codeword = codewords[p,:]
Ureg = np.zeros(self.n)
S = np.zeros(len(self.G))
decoded_bits = np.zeros(self.k)
output = np.zeros(self.n)
for i in range(0,self.n): # Switch A closed B open
Ureg = np.roll(Ureg,1)
Ureg[0] = codeword[i]
S0temp = 0
S[0] = codeword[i]
for m in range(len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
S0 = S
S = np.roll(S,1)
S[1] = S0temp % 2
for i in range(0,self.n): # Switch B closed A open
Stemp = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
Stemp = Stemp + S[m]
S = np.roll(S,1)
S[1] = Stemp % 2
and_out = 1
for m in range(1,len(self.G)):
if(m > 1):
and_out = and_out and ((S[m]+1) % 2)
else:
and_out = and_out and S[m]
output[i] = (and_out + Ureg[len(Ureg)-1]) % 2
Ureg = np.roll(Ureg,1)
Ureg[0] = 0
decoded_bits = output[0:self.k].astype(int)
decoded_blocks[p,:] = decoded_bits
return np.reshape(decoded_blocks,np.size(decoded_blocks)).astype(int)
def ser2ber(q,n,d,t,ps):
"""
Converts symbol error rate to bit error rate. Taken from Ziemer and
Tranter page 650. Necessary when comparing different types of block codes.
parameters
----------
q: size of the code alphabet for given modulation type (BPSK=2)
n: number of channel bits
d: distance (2e+1) where e is the number of correctable errors per code word.
For hamming codes, e=1, so d=3.
t: number of correctable errors per code word
ps: symbol error probability vector
returns
-------
ber: bit error rate
"""
lnps = len(ps) # len of error vector
ber = np.zeros(lnps) # inialize output vector
for k in range(0,lnps): # iterate error vector
ser = ps[k] # channel symbol error rate
sum1 = 0 # initialize sums
sum2 = 0
for i in range(t+1,d+1):
term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i)
sum1 = sum1 + term
for i in range(d+1,n+1):
term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i))
sum2 = sum2+term
ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2)
return ber
def block_single_error_Pb_bound(j,SNRdB,coded=True,M=2):
"""
Finds the bit error probability bounds according to Ziemer and Tranter
page 656.
parameters:
-----------
j: number of parity bits used in single error correction block code
SNRdB: Eb/N0 values in dB
coded: Select single error correction code (True) or uncoded (False)
M: modulation order
returns:
--------
Pb: bit error probability bound
"""
Pb = np.zeros_like(SNRdB)
Ps = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
n = 2**j-1
k = n-j
for i,SNRn in enumerate(SNR):
if coded: # compute Hamming code Ps
if M == 2:
Ps[i] = q_fctn(np.sqrt(k * 2. * SNRn / n))
else:
Ps[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))/k
else: # Compute Uncoded Pb
if M == 2:
Pb[i] = q_fctn(np.sqrt(2. * SNRn))
else:
Pb[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))
# Convert symbol error probability to bit error probability
if coded:
Pb = ser2ber(M,n,3,1,Ps)
return Pb
# .. ._.. .._ # | 0.793306 | 0.450541 |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def firwin_lpf(n_taps, fc, fs = 1.0):
"""
Design a windowed FIR lowpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * fc / fs)
def firwin_bpf(n_taps, f1, f2, fs = 1.0, pass_zero=False):
"""
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * (f1, f2) / fs, pass_zero=pass_zero)
def firwin_kaiser_lpf(f_pass, f_stop, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR lowpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_hpf(f_stop, f_pass, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR highpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
wc = 2*np.pi*(f_pass_eq + f_stop_eq)/2/fs
delta_w = 2*np.pi*(f_stop_eq - f_pass_eq)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF equivalent to HPF
n = np.arange(len(b_k))
b_k *= (-1)**n
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Design BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bp = 2*b_k*np.cos(w0*(n-M/2))
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bp
def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandstop filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: The passband ripple cannot be set independent of the
stopband attenuation.
Note: The filter order is forced to be even (odd number of taps)
so there is a center tap that can be used to form 1 - H_BPF.
Mark Wickert October 2016
"""
# First design a BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
# Make filter order even (odd number of taps)
if ((M+1)/2.0-int((M+1)/2.0)) == 0:
M += 1
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bs = 2*b_k*np.cos(w0*(n-M/2))
# Transform BPF to BSF via 1 - BPF for odd N_taps
b_k_bs = -b_k_bs
b_k_bs[int(M/2)] += 1
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bs
def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Lowpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: Herriman et al., Practical Design Rules for Optimum
Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp.
769-799, July-Aug., 1973.IEEE, 1973.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df = (f_stop - f_pass)/fsamp
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
Dinf = np.log10(dstop)*(a1*np.log10(dpass)**2 + a2*np.log10(dpass) + a3) \
+ (a4*np.log10(dpass)**2 + a5*np.log10(dpass) + a6)
f = 11.01217 + 0.51244*(np.log10(dpass) - np.log10(dstop))
N = Dinf/Df - f*Df + 1
ff = 2*np.array([0, f_pass, f_stop, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0])
wts = np.array([1.0, dpass/dstop])
return int(N), ff, aa, wts
def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([0, 0, 1, 1, 0, 0])
wts = np.array([dpass/dstop, 1, dpass/dstop])
return int(N), ff, aa, wts
def bandstop_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandstop Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0, 1, 1])
wts = np.array([2, dpass/dstop, 2])
return int(N), ff, aa, wts
def fir_remez_lpf(f_pass, f_stop, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR lowpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = lowpass_order(f_pass, f_stop, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_hpf(f_stop, f_pass, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR highpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
n, ff, aa, wts = lowpass_order(f_pass_eq, f_stop_eq, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
# Transform LPF equivalent to HPF
n = np.arange(len(b))
b *= (-1)**n
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandpass filter using remez with order
determination. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
# Initially make sure the number of taps is even so N_bump needs to be odd
if np.mod(n,2) != 0:
n += 1
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2,
maxiter = 25, grid_density = 16)
if status:
log.info('N_bump must be odd to maintain odd filter length')
log.info('Remez filter taps = %d.' % N_taps)
return b
def freqz_resp_list(b, a=np.array([1]), mode = 'dB', fs=1.0, n_pts = 1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2) | scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/fir_design_helper.py | fir_design_helper.py | import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def firwin_lpf(n_taps, fc, fs = 1.0):
"""
Design a windowed FIR lowpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * fc / fs)
def firwin_bpf(n_taps, f1, f2, fs = 1.0, pass_zero=False):
"""
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(n_taps, 2 * (f1, f2) / fs, pass_zero=pass_zero)
def firwin_kaiser_lpf(f_pass, f_stop, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR lowpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_hpf(f_stop, f_pass, d_stop, fs = 1.0, n_bump=0, status = True):
"""
Design an FIR highpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
wc = 2*np.pi*(f_pass_eq + f_stop_eq)/2/fs
delta_w = 2*np.pi*(f_stop_eq - f_pass_eq)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF equivalent to HPF
n = np.arange(len(b_k))
b_k *= (-1)**n
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k
def firwin_kaiser_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandpass filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: the passband ripple cannot be set independent of the
stopband attenuation.
Mark Wickert October 2016
"""
# Design BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bp = 2*b_k*np.cos(w0*(n-M/2))
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bp
def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop,
fs = 1.0, n_bump=0, status = True):
"""
Design an FIR bandstop filter using the sinc() kernel and
a Kaiser window. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired stopband attenuation d_stop in dB for both stopbands,
all relative to a sampling rate of fs Hz.
Note: The passband ripple cannot be set independent of the
stopband attenuation.
Note: The filter order is forced to be even (odd number of taps)
so there is a center tap that can be used to form 1 - H_BPF.
Mark Wickert October 2016
"""
# First design a BPF starting from simple LPF equivalent
# The upper and lower stopbands are assumed to have
# the same attenuation level. The LPF equivalent critical
# frequencies:
f_pass = (f_pass2 - f_pass1)/2
f_stop = (f_stop2 - f_stop1)/2
# Continue to design equivalent LPF
wc = 2*np.pi*(f_pass + f_stop)/2/fs
delta_w = 2*np.pi*(f_stop - f_pass)/fs
# Find the filter order
M = np.ceil((d_stop - 8)/(2.285*delta_w))
# Adjust filter order up or down as needed
M += n_bump
# Make filter order even (odd number of taps)
if ((M+1)/2.0-int((M+1)/2.0)) == 0:
M += 1
N_taps = M + 1
# Obtain the Kaiser window
beta = signal.kaiser_beta(d_stop)
w_k = signal.kaiser(N_taps,beta)
n = np.arange(N_taps)
b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k
b_k /= np.sum(b_k)
# Transform LPF to BPF
f0 = (f_pass2 + f_pass1)/2
w0 = 2*np.pi*f0/fs
n = np.arange(len(b_k))
b_k_bs = 2*b_k*np.cos(w0*(n-M/2))
# Transform BPF to BSF via 1 - BPF for odd N_taps
b_k_bs = -b_k_bs
b_k_bs[int(M/2)] += 1
if status:
log.info('Kaiser Win filter taps = %d.' % N_taps)
return b_k_bs
def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Lowpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: Herriman et al., Practical Design Rules for Optimum
Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp.
769-799, July-Aug., 1973.IEEE, 1973.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df = (f_stop - f_pass)/fsamp
a1 = 5.309e-3
a2 = 7.114e-2
a3 = -4.761e-1
a4 = -2.66e-3
a5 = -5.941e-1
a6 = -4.278e-1
Dinf = np.log10(dstop)*(a1*np.log10(dpass)**2 + a2*np.log10(dpass) + a3) \
+ (a4*np.log10(dpass)**2 + a5*np.log10(dpass) + a6)
f = 11.01217 + 0.51244*(np.log10(dpass) - np.log10(dstop))
N = Dinf/Df - f*Df + 1
ff = 2*np.array([0, f_pass, f_stop, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0])
wts = np.array([1.0, dpass/dstop])
return int(N), ff, aa, wts
def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandpass Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([0, 0, 1, 1, 0, 0])
wts = np.array([dpass/dstop, 1, dpass/dstop])
return int(N), ff, aa, wts
def bandstop_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1):
"""
Optimal FIR (equal ripple) Bandstop Order Determination
Text reference: Ifeachor, Digital Signal Processing a Practical Approach,
second edition, Prentice Hall, 2002.
Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum
FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp.
204-206, April,1979.
"""
dpass = 1 - 10**(-dpass_dB/20)
dstop = 10**(-dstop_dB/20)
Df1 = (f_pass1 - f_stop1)/fsamp
Df2 = (f_stop2 - f_pass2)/fsamp
b1 = 0.01201
b2 = 0.09664
b3 = -0.51325
b4 = 0.00203
b5 = -0.5705
b6 = -0.44314
Df = min(Df1, Df2)
Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \
+ (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6)
g = -14.6*np.log10(dpass/dstop) - 16.9
N = Cinf/Df + g*Df + 1
ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp
aa = np.array([1, 1, 0, 0, 1, 1])
wts = np.array([2, dpass/dstop, 2])
return int(N), ff, aa, wts
def fir_remez_lpf(f_pass, f_stop, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR lowpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = lowpass_order(f_pass, f_stop, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_hpf(f_stop, f_pass, d_pass, d_stop, fs = 1.0, n_bump=5, status = True):
"""
Design an FIR highpass filter using remez with order
determination. The filter order is determined based on
f_pass Hz, fstop Hz, and the desired passband ripple
d_pass dB and stopband attenuation d_stop dB all
relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
# Transform HPF critical frequencies to lowpass equivalent
f_pass_eq = fs/2. - f_pass
f_stop_eq = fs/2. - f_stop
# Design LPF equivalent
n, ff, aa, wts = lowpass_order(f_pass_eq, f_stop_eq, d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
# Transform LPF equivalent to HPF
n = np.arange(len(b))
b *= (-1)**n
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandpass filter using remez with order
determination. The filter order is determined based on
f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2)
if status:
log.info('Remez filter taps = %d.' % N_taps)
return b
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop,
fs = 1.0, n_bump=5, status = True):
"""
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
# Initially make sure the number of taps is even so N_bump needs to be odd
if np.mod(n,2) != 0:
n += 1
N_taps = n
N_taps += n_bump
b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2,
maxiter = 25, grid_density = 16)
if status:
log.info('N_bump must be odd to maintain odd filter length')
log.info('Remez filter taps = %d.' % N_taps)
return b
def freqz_resp_list(b, a=np.array([1]), mode = 'dB', fs=1.0, n_pts = 1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2) | 0.749912 | 0.37088 |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR lowpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_pass : Passband critical frequency in Hz
f_stop : Stopband critical frequency in Hz
Ripple_pass : Filter gain in dB at f_pass
Atten_stop : Filter attenuation in dB at f_stop
fs : Sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Notes
-----
Additionally a text string telling the user the filter order is
written to the console, e.g., IIR cheby1 order = 8.
Examples
--------
>>> fs = 48000
>>> f_pass = 5000
>>> f_stop = 8000
>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR highpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop :
f_pass :
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bpf(f_stop1, f_pass1, f_pass2, f_stop2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop1 : ndarray of the numerator coefficients
f_pass : ndarray of the denominator coefficients
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0,Npts)/(2.0*Npts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas
def freqz_resp_cas_list(sos, mode = 'dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying cascade digital filter form frequency response
magnitude, phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(sos) == list:
# We have a list of filters
N_filt = len(sos)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = freqz_cas(sos[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def sos_cascade(sos1,sos2):
"""
Mark Wickert October 2016
"""
return np.vstack((sos1,sos2))
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
sos : ndarray of the sos coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> sos_zplane(sos)
>>> # Here the plot is generated using manual scaling
>>> sos_zplane(sos,False,1.5)
"""
Ns,Mcol = sos.shape
# Extract roots from sos num and den removing z = 0
# roots due to first-order sections
N_roots = []
for k in range(Ns):
N_roots_tmp = np.roots(sos[k,:3])
if N_roots_tmp[1] == 0.:
N_roots = np.hstack((N_roots,N_roots_tmp[0]))
else:
N_roots = np.hstack((N_roots,N_roots_tmp))
D_roots = []
for k in range(Ns):
D_roots_tmp = np.roots(sos[k,3:])
if D_roots_tmp[1] == 0.:
D_roots = np.hstack((D_roots,D_roots_tmp[0]))
else:
D_roots = np.hstack((D_roots,D_roots_tmp))
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
M = len(N_roots)
N = len(D_roots)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),
ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),
ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),
ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N | scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/iir_design_helper.py | iir_design_helper.py | import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from logging import getLogger
log = getLogger(__name__)
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR lowpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_pass : Passband critical frequency in Hz
f_stop : Stopband critical frequency in Hz
Ripple_pass : Filter gain in dB at f_pass
Atten_stop : Filter attenuation in dB at f_stop
fs : Sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Notes
-----
Additionally a text string telling the user the filter order is
written to the console, e.g., IIR cheby1 order = 8.
Examples
--------
>>> fs = 48000
>>> f_pass = 5000
>>> f_stop = 8000
>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR highpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop :
f_pass :
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs,
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bpf(f_stop1, f_pass1, f_pass2, f_stop2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandpass filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Parameters
----------
f_stop1 : ndarray of the numerator coefficients
f_pass : ndarray of the denominator coefficients
Ripple_pass :
Atten_stop :
fs : sampling rate in Hz
ftype : Analog prototype from 'butter' 'cheby1', 'cheby2',
'ellip', and 'bessel'
Returns
-------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
sos : 2D ndarray of second-order section coefficients
Examples
--------
>>> fs = 48000
>>> f_pass = 8000
>>> f_stop = 5000
>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')
>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')
>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')
>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop,
fs = 1.00, ftype = 'butter', status = True):
"""
Design an IIR bandstop filter using scipy.signal.iirdesign.
The filter order is determined based on
f_pass Hz, f_stop Hz, and the desired stopband attenuation
d_stop in dB, all relative to a sampling rate of fs Hz.
Mark Wickert October 2016
"""
b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype = ftype, output='ba')
sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs],
[2*float(f_stop1)/fs, 2*float(f_stop2)/fs],
Ripple_pass, Atten_stop,
ftype =ftype, output='sos')
tag = 'IIR ' + ftype + ' order'
if status:
log.info('%s = %d.' % (tag,len(a)-1))
return b, a, sos
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
f = np.arange(0,Npts)/(2.0*Npts)
for n in range(N_filt):
w,H = signal.freqz(b[n],a[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas
def freqz_resp_cas_list(sos, mode = 'dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying cascade digital filter form frequency response
magnitude, phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; default is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(sos) == list:
# We have a list of filters
N_filt = len(sos)
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w,H = freqz_cas(sos[n],2*np.pi*f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0]
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
if n == N_filt-1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2)
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def sos_cascade(sos1,sos2):
"""
Mark Wickert October 2016
"""
return np.vstack((sos1,sos2))
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
sos : ndarray of the sos coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> sos_zplane(sos)
>>> # Here the plot is generated using manual scaling
>>> sos_zplane(sos,False,1.5)
"""
Ns,Mcol = sos.shape
# Extract roots from sos num and den removing z = 0
# roots due to first-order sections
N_roots = []
for k in range(Ns):
N_roots_tmp = np.roots(sos[k,:3])
if N_roots_tmp[1] == 0.:
N_roots = np.hstack((N_roots,N_roots_tmp[0]))
else:
N_roots = np.hstack((N_roots,N_roots_tmp))
D_roots = []
for k in range(Ns):
D_roots_tmp = np.roots(sos[k,3:])
if D_roots_tmp[1] == 0.:
D_roots = np.hstack((D_roots,D_roots_tmp[0]))
else:
D_roots = np.hstack((D_roots,D_roots_tmp))
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
M = len(N_roots)
N = len(D_roots)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),
ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),
ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),
ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N | 0.801431 | 0.4856 |
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from . import sigsys as ssd
from . import fir_design_helper as fir_d
from . import iir_design_helper as iir_d
from logging import getLogger
log = getLogger(__name__)
import warnings
class rate_change(object):
"""
A simple class for encapsulating the upsample/filter and
filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated.
Mark Wickert February 2015
"""
def __init__(self,M_change = 12,fcutoff=0.9,N_filt_order=8,ftype='butter'):
"""
Object constructor method
"""
self.M = M_change # Rate change factor M or L
self.fc = fcutoff*.5 # must be fs/(2*M), but scale by fcutoff
self.N_forder = N_filt_order
if ftype.lower() == 'butter':
self.b, self.a = signal.butter(self.N_forder,2/self.M*self.fc)
elif ftype.lower() == 'cheby1':
# Set the ripple to 0.05 dB
self.b, self.a = signal.cheby1(self.N_forder,0.05,2/self.M*self.fc)
else:
warnings.warn('ftype must be "butter" or "cheby1"')
def up(self,x):
"""
Upsample and filter the signal
"""
y = self.M*ssd.upsample(x,self.M)
y = signal.lfilter(self.b,self.a,y)
return y
def dn(self,x):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,self.a,x)
y = ssd.downsample(y,self.M)
return y
class multirate_FIR(object):
"""
A simple class for encapsulating FIR filtering, or FIR upsample/
filter, or FIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
Mark Wickert March 2017
"""
def __init__(self,b):
"""
Object constructor method
"""
self.N_forder = len(b)
self.b = b
log.info('FIR filter taps = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal
"""
y = signal.lfilter(self.b,[1],x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.lfilter(self.b,[1],y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,[1],x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
"""
fir_d.freqz_resp_list([self.b], [1], mode, fs=fs, n_pts= 1024)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b,[1],auto_scale,size,tol)
class multirate_IIR(object):
"""
A simple class for encapsulating IIR filtering, or IIR upsample/
filter, or IIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
For added robustness to floating point quantization all filtering
is done using the scipy.signal cascade of second-order sections filter
method y = sosfilter(sos,x).
Mark Wickert March 2017
"""
def __init__(self,sos):
"""
Object constructor method
"""
self.N_forder = np.sum(np.sign(np.abs(sos[:,2]))) \
+ np.sum(np.sign(np.abs(sos[:,1])))
self.sos = sos
log.info('IIR filter order = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal using second-order sections
"""
y = signal.sosfilt(self.sos,x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.sosfilt(self.sos,y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.sosfilt(self.sos,x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
Frequency response plot
"""
iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
def freqz_resp(b,a=[1],mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; defult is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
f = np.arange(0,Npts)/(2.0*Npts)
w,H = signal.freqz(b,a,2*np.pi*f)
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20*np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
warnings.warn(s1 + s2) | scikit-dsp-comm | /scikit-dsp-comm-2.0.3.tar.gz/scikit-dsp-comm-2.0.3/src/sk_dsp_comm/multirate_helper.py | multirate_helper.py | from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
from . import sigsys as ssd
from . import fir_design_helper as fir_d
from . import iir_design_helper as iir_d
from logging import getLogger
log = getLogger(__name__)
import warnings
class rate_change(object):
"""
A simple class for encapsulating the upsample/filter and
filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated.
Mark Wickert February 2015
"""
def __init__(self,M_change = 12,fcutoff=0.9,N_filt_order=8,ftype='butter'):
"""
Object constructor method
"""
self.M = M_change # Rate change factor M or L
self.fc = fcutoff*.5 # must be fs/(2*M), but scale by fcutoff
self.N_forder = N_filt_order
if ftype.lower() == 'butter':
self.b, self.a = signal.butter(self.N_forder,2/self.M*self.fc)
elif ftype.lower() == 'cheby1':
# Set the ripple to 0.05 dB
self.b, self.a = signal.cheby1(self.N_forder,0.05,2/self.M*self.fc)
else:
warnings.warn('ftype must be "butter" or "cheby1"')
def up(self,x):
"""
Upsample and filter the signal
"""
y = self.M*ssd.upsample(x,self.M)
y = signal.lfilter(self.b,self.a,y)
return y
def dn(self,x):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,self.a,x)
y = ssd.downsample(y,self.M)
return y
class multirate_FIR(object):
"""
A simple class for encapsulating FIR filtering, or FIR upsample/
filter, or FIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
Mark Wickert March 2017
"""
def __init__(self,b):
"""
Object constructor method
"""
self.N_forder = len(b)
self.b = b
log.info('FIR filter taps = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal
"""
y = signal.lfilter(self.b,[1],x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.lfilter(self.b,[1],y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,[1],x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
"""
fir_d.freqz_resp_list([self.b], [1], mode, fs=fs, n_pts= 1024)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b,[1],auto_scale,size,tol)
class multirate_IIR(object):
"""
A simple class for encapsulating IIR filtering, or IIR upsample/
filter, or IIR filter/downsample operations used in modeling a comm
system. Objects of this class will hold the required filter
coefficients once an object is instantiated. Frequency response
and the pole zero plot can also be plotted using supplied class methods.
For added robustness to floating point quantization all filtering
is done using the scipy.signal cascade of second-order sections filter
method y = sosfilter(sos,x).
Mark Wickert March 2017
"""
def __init__(self,sos):
"""
Object constructor method
"""
self.N_forder = np.sum(np.sign(np.abs(sos[:,2]))) \
+ np.sum(np.sign(np.abs(sos[:,1])))
self.sos = sos
log.info('IIR filter order = %d' % self.N_forder)
def filter(self,x):
"""
Filter the signal using second-order sections
"""
y = signal.sosfilt(self.sos,x)
return y
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.sosfilt(self.sos,y)
return y
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.sosfilt(self.sos,x)
y = ssd.downsample(y,M_change)
return y
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
Frequency response plot
"""
iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs)
pylab.grid()
pylab.ylim(ylim)
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
def freqz_resp(b,a=[1],mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
b = ndarray of numerator coefficients
a = ndarray of denominator coefficents
mode = display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
Npts = number of points to plot; defult is 1024
fsize = figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
f = np.arange(0,Npts)/(2.0*Npts)
w,H = signal.freqz(b,a,2*np.pi*f)
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f*fs,20*np.log10(np.abs(H)))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f*fs,np.angle(H))
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2*theta)/2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2)/np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20*np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
#print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1]*fs,Tg/fs)
plt.ylim([0,1.2*max_Tg])
else:
plt.plot(f[:-1]*fs,Tg)
plt.ylim([0,1.2*max_Tg])
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
warnings.warn(s1 + s2) | 0.634317 | 0.493958 |
# scikit-duplo
Very simple reusable blocks for scikit-learn pipelines (inspired by scikit-lego)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![PyPI](https://img.shields.io/pypi/v/scikit-duplo.svg)](https://pypi.org/project/scikit-duplo)
[![Documentation Status](https://readthedocs.org/projects/scikit-duplo/badge/?version=latest)](https://scikit-duplo.readthedocs.io/en/latest/?badge=latest)
# Installation
Installation from the source tree:
```
python setup.py install
```
Or via pip from PyPI:
```
pip install scikit-duplo
```
# Contents
The sci-kit duplo package contains multiple classes that you can use in a sci-kit
learn compatible pipeline. There are ensemble learning classes within the `meta` subdirectory.
These classes expect you to pass in multiple other Sci-kit learn compatible
machine learning classes. It will use these to build an ensemble of models to
predict the target variable.
There are feature engineering classes inside the `preprocessing` subdirectory. These are
ColumnTransformer compatible classes that expect to receive a dataframe and set of column
names that it will transform for the downstream pipeline processes.
| scikit-duplo | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/README.md | README.md | python setup.py install
pip install scikit-duplo | 0.499512 | 0.895933 |
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class QuantileStackRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking using underlying
quantile propensity models. The model will first learn a
series of quantile discriminator functions and then stack
them with out of sample predictions into a final regressor.
Particularly useful for zero-inflated or heavily skewed datasets,
`QuantileStackRegressor` consists of a series of classifiers and a regressor.
- The classifier's task is to build a series of propensity models
that predict if the target is above a given threshold.
These are built in a two fold CV, so that out of sample predictions
can be added to the x vector for the final regression model
- The regressor's task is to output the final prediction, aided by the
probabilities added by the underlying quantile classifiers.
At prediction time, the average of the two classifiers is used for all propensity models.
Credits: This structure of this code is based off the zero inflated regressor from sklego:
https://github.com/koaning/scikit-lego
Parameters
----------
classifier : Any, scikit-learn classifier
regressor : Any, scikit-learn regressor
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = QuantileStackRegressor(
... classifier=ExtraTreesClassifier(random_state=0),
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
QuantileStackRegressor(classifier=ExtraTreesClassifier(random_state=0),
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, classifier, regressor, cuts=[0]) -> None:
"""Initialize."""
self.classifier = classifier
self.regressor = regressor
self.cuts = cuts
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
QuantileStackRegressor
Fitted regressor.
Raises
------
ValueError
If `classifier` is not a classifier or `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_classifier(self.classifier):
raise ValueError(
f"`classifier` has to be a classifier. Received instance of {type(self.classifier)} instead.")
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of the classifiers
to prevent target leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build two sets of classifiers for each of the required cuts
"""
self.classifiers_ = [0] * 2
for index in [0,1]:
self.classifiers_[index] = [0] * len(self.cuts)
for c, cut in enumerate(self.cuts):
self.classifiers_[index][c] = clone(self.classifier)
self.classifiers_[index][c].fit(X_[index], y_[index] > cut )
"""
Apply those classifier to the out of sample data
"""
Xfinal_ = [0] * 2
for index in [0,1]:
Xfinal_[index] = X_[index].copy()
c_index = 1 - index
for c, cut in enumerate(self.cuts):
preds = self.classifiers_[c_index][c].predict_proba( X_[index] )[:,1]
Xfinal_[index] = np.append(Xfinal_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_[0], Xfinal_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
"""
Apply classifiers to generate new colums
"""
Xfinale = X.copy()
for c, cut in enumerate(self.cuts):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.classifiers_[index][c].predict_proba(X)[:,1]
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | scikit-duplo | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/quantile_stack_regressor.py | quantile_stack_regressor.py | import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class QuantileStackRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking using underlying
quantile propensity models. The model will first learn a
series of quantile discriminator functions and then stack
them with out of sample predictions into a final regressor.
Particularly useful for zero-inflated or heavily skewed datasets,
`QuantileStackRegressor` consists of a series of classifiers and a regressor.
- The classifier's task is to build a series of propensity models
that predict if the target is above a given threshold.
These are built in a two fold CV, so that out of sample predictions
can be added to the x vector for the final regression model
- The regressor's task is to output the final prediction, aided by the
probabilities added by the underlying quantile classifiers.
At prediction time, the average of the two classifiers is used for all propensity models.
Credits: This structure of this code is based off the zero inflated regressor from sklego:
https://github.com/koaning/scikit-lego
Parameters
----------
classifier : Any, scikit-learn classifier
regressor : Any, scikit-learn regressor
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = QuantileStackRegressor(
... classifier=ExtraTreesClassifier(random_state=0),
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
QuantileStackRegressor(classifier=ExtraTreesClassifier(random_state=0),
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, classifier, regressor, cuts=[0]) -> None:
"""Initialize."""
self.classifier = classifier
self.regressor = regressor
self.cuts = cuts
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
QuantileStackRegressor
Fitted regressor.
Raises
------
ValueError
If `classifier` is not a classifier or `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_classifier(self.classifier):
raise ValueError(
f"`classifier` has to be a classifier. Received instance of {type(self.classifier)} instead.")
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of the classifiers
to prevent target leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build two sets of classifiers for each of the required cuts
"""
self.classifiers_ = [0] * 2
for index in [0,1]:
self.classifiers_[index] = [0] * len(self.cuts)
for c, cut in enumerate(self.cuts):
self.classifiers_[index][c] = clone(self.classifier)
self.classifiers_[index][c].fit(X_[index], y_[index] > cut )
"""
Apply those classifier to the out of sample data
"""
Xfinal_ = [0] * 2
for index in [0,1]:
Xfinal_[index] = X_[index].copy()
c_index = 1 - index
for c, cut in enumerate(self.cuts):
preds = self.classifiers_[c_index][c].predict_proba( X_[index] )[:,1]
Xfinal_[index] = np.append(Xfinal_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_[0], Xfinal_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
"""
Apply classifiers to generate new colums
"""
Xfinale = X.copy()
for c, cut in enumerate(self.cuts):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.classifiers_[index][c].predict_proba(X)[:,1]
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | 0.926183 | 0.812012 |
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
import pandas as pd
import numpy as np
class BaselineProportionalRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for learning the target value as a proportional difference
relative to a mean value for a subset of other features.
Creates and maintains an internal lookup table for the baseline during the
model fir process.
Parameters
----------
regressor : Any, scikit-learn regressor that will be learned for the adjust target
"""
def __init__(self, baseline_cols, regressor) -> None:
"""Initialize."""
self.baseline_cols = baseline_cols
self.regressor = regressor
self.baseline_func = 'mean'
def generate_baseline(self, df):
self.lookup = df.groupby(self.baseline_cols).agg({'baseline':self.baseline_func}).reset_index()
self.baseline_default = df['baseline'].agg(self.baseline_func)
def get_baseline_predictions(self, df):
new_df = pd.merge(df, self.lookup, how='left', on=self.baseline_cols)
new_df['baseline'] = np.where(new_df['baseline'].isnull(), self.baseline_default, new_df['baseline'])
return new_df['baseline']
def get_relative_target(self, baseline, y):
return (y-baseline)/baseline
def invert_relative_target(self, preds, baseline):
return (preds*baseline)+baseline
def get_params(self, deep=True):
return self.regressor.get_params()
def set_params(self, **parameters):
for parameter, value in parameters.items():
if parameter == "baseline_cols":
self.baseline_cols = value
else:
self.regressor.setattr(parameter, value)
return self
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Note: this model diverges from the scikit-learn standard in that it needs a
pandas dataframe.
Parameters
----------
X : pandas.DataFrame of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
BaselineProportionalRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
column_names = X.columns
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
X = pd.DataFrame(X, columns=column_names)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
df = X.copy()
df['baseline'] = y
self.generate_baseline(df)
baseline = self.get_baseline_predictions(X)
Yfinale = self.get_relative_target(baseline, y)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( X, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : pd.DataFrame - shape (n_samples, n_features)
DataFrame of samples to get predictions for.
Note: DataFrame is required because the baseline uses column names.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
column_names = X.columns
X = check_array(X)
self._check_n_features(X, reset=False)
X = pd.DataFrame(X, columns=column_names)
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
baseline = self.get_baseline_predictions(X)
preds = self.regressor_.predict(X)
return self.invert_relative_target(preds, baseline) | scikit-duplo | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/baseline_proportional_regressor.py | baseline_proportional_regressor.py | from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
import pandas as pd
import numpy as np
class BaselineProportionalRegressor(BaseEstimator, RegressorMixin):
"""
A meta regressor for learning the target value as a proportional difference
relative to a mean value for a subset of other features.
Creates and maintains an internal lookup table for the baseline during the
model fir process.
Parameters
----------
regressor : Any, scikit-learn regressor that will be learned for the adjust target
"""
def __init__(self, baseline_cols, regressor) -> None:
"""Initialize."""
self.baseline_cols = baseline_cols
self.regressor = regressor
self.baseline_func = 'mean'
def generate_baseline(self, df):
self.lookup = df.groupby(self.baseline_cols).agg({'baseline':self.baseline_func}).reset_index()
self.baseline_default = df['baseline'].agg(self.baseline_func)
def get_baseline_predictions(self, df):
new_df = pd.merge(df, self.lookup, how='left', on=self.baseline_cols)
new_df['baseline'] = np.where(new_df['baseline'].isnull(), self.baseline_default, new_df['baseline'])
return new_df['baseline']
def get_relative_target(self, baseline, y):
return (y-baseline)/baseline
def invert_relative_target(self, preds, baseline):
return (preds*baseline)+baseline
def get_params(self, deep=True):
return self.regressor.get_params()
def set_params(self, **parameters):
for parameter, value in parameters.items():
if parameter == "baseline_cols":
self.baseline_cols = value
else:
self.regressor.setattr(parameter, value)
return self
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Note: this model diverges from the scikit-learn standard in that it needs a
pandas dataframe.
Parameters
----------
X : pandas.DataFrame of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
BaselineProportionalRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
column_names = X.columns
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
X = pd.DataFrame(X, columns=column_names)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
df = X.copy()
df['baseline'] = y
self.generate_baseline(df)
baseline = self.get_baseline_predictions(X)
Yfinale = self.get_relative_target(baseline, y)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( X, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : pd.DataFrame - shape (n_samples, n_features)
DataFrame of samples to get predictions for.
Note: DataFrame is required because the baseline uses column names.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
column_names = X.columns
X = check_array(X)
self._check_n_features(X, reset=False)
X = pd.DataFrame(X, columns=column_names)
for col in self.baseline_cols:
if col not in X.columns:
raise ValueError(f"pandas.DataFrame required with baseline columns: `{col}` NOT FOUND.")
baseline = self.get_baseline_predictions(X)
preds = self.regressor_.predict(X)
return self.invert_relative_target(preds, baseline) | 0.942275 | 0.498901 |
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class RegressorStack(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking for regression using underlying
quantile propensity models and internal regressors.
Particularly designed for zero-inflated or heavily skewed datasets,
`RegressorStack` consists of a series of internal regressors
all of which are fitted in an internal cross validation and scored out-of-sample
A final regressor is trained over the original features and the output
of these stacked regression models.
Parameters
----------
regressor : Any, scikit-learn regressor
A regressor for predicting the target.
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = RegressorStack(
... [KNeighborsRegressor(), BayesianRidge()],
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
RegressorStack([KNeighborsRegressor(), BayesianRidge()],
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, regressor_list, regressor) -> None:
"""Initialize."""
self.regressor_list = regressor_list
self.regressor = regressor
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
StackedRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of
internal regressors to prevent leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build the internal regressors
"""
self.regressors_ = [0] * 2
for index in [0,1]:
self.regressors_[index] = [0] * len(self.regressor_list)
for c, reg in enumerate(self.regressor_list):
self.regressors_[index][c] = clone(reg)
self.regressors_[index][c].fit(X_[index], y_[index] )
"""
Apply those classifier to the out of sample data
"""
Xfinal_reg_ = [0] * 2
for index in [0,1]:
Xfinal_reg_[index] = X_[index].copy()
c_index = 1 - index
for c, reg in enumerate(self.regressor_list):
preds = self.regressors_[c_index][c].predict( X_[index] )
Xfinal_reg_[index] = np.append(Xfinal_reg_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_reg_[0], Xfinal_reg_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
Xfinale = X.copy()
for c, reg in enumerate(self.regressor_list):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.regressors_[index][c].predict( X )
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | scikit-duplo | /scikit-duplo-0.1.7.tar.gz/scikit-duplo-0.1.7/skduplo/meta/regressor_stack.py | regressor_stack.py | import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone, is_regressor, is_classifier
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
class RegressorStack(BaseEstimator, RegressorMixin):
"""
A meta regressor for doing model stacking for regression using underlying
quantile propensity models and internal regressors.
Particularly designed for zero-inflated or heavily skewed datasets,
`RegressorStack` consists of a series of internal regressors
all of which are fitted in an internal cross validation and scored out-of-sample
A final regressor is trained over the original features and the output
of these stacked regression models.
Parameters
----------
regressor : Any, scikit-learn regressor
A regressor for predicting the target.
Examples
--------
>>> import numpy as np
>>> from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
>>> np.random.seed(0)
>>> X = np.random.randn(10000, 4)
>>> y = ((X[:, 0]>0) & (X[:, 1]>0)) * np.abs(X[:, 2] * X[:, 3]**2)
>>> z = RegressorStack(
... [KNeighborsRegressor(), BayesianRidge()],
... regressor=ExtraTreesRegressor(random_state=0)
... )
>>> z.fit(X, y)
RegressorStack([KNeighborsRegressor(), BayesianRidge()],
regressor=ExtraTreesRegressor(random_state=0))
>>> z.predict(X)[:5]
array([4.91483294, 0. , 0. , 0.04941909, 0. ])
"""
def __init__(self, regressor_list, regressor) -> None:
"""Initialize."""
self.regressor_list = regressor_list
self.regressor = regressor
def fit(self, X, y, sample_weight=None):
"""
Fit the model.
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
The training data.
y : np.ndarray, 1-dimensional
The target values.
sample_weight : Optional[np.array], default=None
Individual weights for each sample.
Returns
-------
StackedRegressor
Fitted regressor.
Raises
------
ValueError
If `regressor` is not a regressor.
"""
X, y = check_X_y(X, y)
self._check_n_features(X, reset=True)
if not is_regressor(self.regressor):
raise ValueError(f"`regressor` has to be a regressor. Received instance of {type(self.regressor)} instead.")
"""
Now we need to internally split the data and build two sets of
internal regressors to prevent leakage
"""
X_ = [0] * 2
y_ = [0] * 2
X_[0], X_[1], y_[0], y_[1] = train_test_split(X, y, test_size=0.5)
"""
Build the internal regressors
"""
self.regressors_ = [0] * 2
for index in [0,1]:
self.regressors_[index] = [0] * len(self.regressor_list)
for c, reg in enumerate(self.regressor_list):
self.regressors_[index][c] = clone(reg)
self.regressors_[index][c].fit(X_[index], y_[index] )
"""
Apply those classifier to the out of sample data
"""
Xfinal_reg_ = [0] * 2
for index in [0,1]:
Xfinal_reg_[index] = X_[index].copy()
c_index = 1 - index
for c, reg in enumerate(self.regressor_list):
preds = self.regressors_[c_index][c].predict( X_[index] )
Xfinal_reg_[index] = np.append(Xfinal_reg_[index], preds.T[:, None], axis=1)
"""
Join the split data into a final dataset for the regression model
"""
Xfinale = np.concatenate((Xfinal_reg_[0], Xfinal_reg_[1] ), axis=0)
Yfinale = np.concatenate((y_[0], y_[1] ), axis=0)
self.regressor_ = clone(self.regressor)
self.regressor_.fit( Xfinale, Yfinale, sample_weight=sample_weight)
return self
def predict(self, X):
"""
Get predictions.
Parameters
----------
X : np.ndarray, shape (n_samples, n_features)
Samples to get predictions of.
Returns
-------
y : np.ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
X = check_array(X)
self._check_n_features(X, reset=False)
Xfinale = X.copy()
for c, reg in enumerate(self.regressor_list):
temp = np.zeros(len(X))
for index in [0,1]:
temp = temp + self.regressors_[index][c].predict( X )
temp = temp/2
Xfinale = np.append(Xfinale, temp[:, None], axis=1)
return self.regressor_.predict(Xfinale) | 0.933073 | 0.737962 |
Master Status: [![Build Status](https://travis-ci.com/UrbsLab/scikit-eLCS.svg?branch=master)](https://travis-ci.com/UrbsLab/scikit-eLCS)
# scikit-eLCS
The scikit-eLCS package includes a sklearn-compatible Python implementation of eLCS, a supervised learning variant of the Learning Classifier System, based off of UCS. In general, Learning Classifier Systems (LCSs) are a classification of Rule Based Machine Learning Algorithms that have been shown to perform well on problems involving high amounts of heterogeneity and epistasis. Well designed LCSs are also highly human interpretable. LCS variants have been shown to adeptly handle supervised and reinforced, classification and regression, online and offline learning problems, as well as missing or unbalanced data. These characteristics of versatility and interpretability give LCSs a wide range of potential applications, notably those in biomedicine. This package is **still under active development** and we encourage you to check back on this repository for updates.
eLCS, or Educational Learning Classifier System, implements the core components of a Michigan-Style Learning Classifier System (where the system's genetic algorithm operates on a rule level, evolving a population of rules with each their own parameters) in an easy to understand way, while still being highly functional in solving ML problems.
While Learning Classifier Systems are commonly applied to genetic analyses, where epistatis (i.e. feature interactions) is common, the eLCS algorithm implemented in this package can be applied to almost any supervised classification data set and supports:
* Feature sets that are discrete/categorical, continuous-valued or a mix of both
* Data with missing values
* Binary endpoints (i.e., classification)
* Multi-class endpoints (i.e., classification)
* eLCS does not currently support regression problems. We have built out the infrastructure for it do so, but have disabled its functionality for this version.
Built into this code, is a strategy to 'automatically' detect from the loaded data, these relevant above characteristics so that they don't need to be parameterized at initialization.
The core Scikit package only supports numeric data. However, an additional StringEnumerator Class is provided within the DataCleanup file that allows quick data conversion from any type of data into pure numeric data, making it possible for natively string/non-numeric data to be run by eLCS.
In addition, powerful data tracking collection methods are built into the scikit package, that continuously tracks features every iteration such as:
* Approximate Accuracy
* Average Population Generality
* Macropopulation Size
* Micropopulation Size
* Match Set, Correct Set Sizes
* Number of classifiers subsumed/deleted/covered
* Number of crossover/mutation operations performed
* Times for matching, deletion, subsumption, selection, evaluation
These values can then be exported as a csv after training is complete for analysis using the built in "export_iteration_tracking_data" method.
In addition, the package includes functionality that allows the final rule population to be exported as a csv after training.
## Usage
For more information on the eLCS algorithm and how to use it, please refer to the ["eLCS User Guide"](https://github.com/UrbsLab/scikit-eLCS/blob/master/eLCS%20User%20Guide.ipynb) Jupyter Notebook inside this repository.
## Usage TLDR
```python
#Import Necessary Packages/Modules
from skeLCS import eLCS
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
#Load Data Using Pandas
data = pd.read_csv('myDataFile.csv') #REPLACE with your own dataset .csv filename
dataFeatures = data.drop(classLabel,axis=1).values #DEFINE classLabel variable as the Str at the top of your dataset's class column
dataPhenotypes = data[classLabel].values
#Shuffle Data Before CV
formatted = np.insert(dataFeatures,dataFeatures.shape[1],dataPhenotypes,1)
np.random.shuffle(formatted)
dataFeatures = np.delete(formatted,-1,axis=1)
dataPhenotypes = formatted[:,-1]
#Initialize eLCS Model
model = eLCS(learning_iterations = 5000)
#3-fold CV
print(np.mean(cross_val_score(model,dataFeatures,dataPhenotypes,cv=3)))
```
## License
Please see the repository [license](https://github.com/UrbsLab/scikit-eLCS/blob/master/LICENSE) for the licensing and usage information for scikit-eLCS.
Generally, we have licensed scikit-eLCS to make it as widely usable as possible.
## Installation
scikit-eLCS is built on top of the following Python packages:
<ol>
<li> numpy </li>
<li> pandas </li>
<li> scikit-learn </li>
</ol>
Once the prerequisites are installed, you can install scikit-eLCS with a pip command:
```
pip/pip3 install scikit-elcs
```
We strongly recommend you use Python 3. scikit-eLCS does not support Python 2, given its depreciation in Jan 1 2020. If something goes wrong during installation, make sure that your pip is up to date and try again.
```
pip/pip3 install --upgrade pip
```
## Contributing to scikit-eLCS
scikit-eLCS is an open source project and we'd love if you could suggest changes!
<ol>
<li> Fork the project repository to your personal account and clone this copy to your local disk</li>
<li> Create a branch from master to hold your changes: (e.g. <b>git checkout -b my-contribution-branch</b>) </li>
<li> Commit changes on your branch. Remember to never work on any other branch but your own! </li>
<li> When you are done, push your changes to your forked GitHub repository with <b>git push -u origin my-contribution-branch</b> </li>
<li> Create a pull request to send your changes to the scikit-eLCS maintainers for review. </li>
</ol>
**Before submitting your pull request**
If your contribution changes eLCS in any way, make sure you update the Jupyter Notebook documentation and the README with relevant details. If your contribution involves any code changes, update the project unit tests to test your code changes, and make sure your code is properly commented to explain your rationale behind non-obvious coding practices.
**After submitting your pull request**
After submitting your pull request, Travis CI will run all of the project's unit tests. Check back shortly after submitting to make sure your code passes these checks. If any checks come back failed, do your best to address the errors.
| scikit-eLCS | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/README.md | README.md | #Import Necessary Packages/Modules
from skeLCS import eLCS
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
#Load Data Using Pandas
data = pd.read_csv('myDataFile.csv') #REPLACE with your own dataset .csv filename
dataFeatures = data.drop(classLabel,axis=1).values #DEFINE classLabel variable as the Str at the top of your dataset's class column
dataPhenotypes = data[classLabel].values
#Shuffle Data Before CV
formatted = np.insert(dataFeatures,dataFeatures.shape[1],dataPhenotypes,1)
np.random.shuffle(formatted)
dataFeatures = np.delete(formatted,-1,axis=1)
dataPhenotypes = formatted[:,-1]
#Initialize eLCS Model
model = eLCS(learning_iterations = 5000)
#3-fold CV
print(np.mean(cross_val_score(model,dataFeatures,dataPhenotypes,cv=3)))
pip/pip3 install scikit-elcs
pip/pip3 install --upgrade pip | 0.334481 | 0.989013 |
import time
# --------------------------------------
class Timer:
def __init__(self):
# Global Time objects
self.globalStartRef = time.time()
self.globalTime = 0.0
self.globalAdd = 0
# Match Time Variables
self.startRefMatching = 0.0
self.globalMatching = 0.0
# Deletion Time Variables
self.startRefDeletion = 0.0
self.globalDeletion = 0.0
# Subsumption Time Variables
self.startRefSubsumption = 0.0
self.globalSubsumption = 0.0
# Selection Time Variables
self.startRefSelection = 0.0
self.globalSelection = 0.0
# Evaluation Time Variables
self.startRefEvaluation = 0.0
self.globalEvaluation = 0.0
# ************************************************************
def startTimeMatching(self):
""" Tracks MatchSet Time """
self.startRefMatching = time.time()
def stopTimeMatching(self):
""" Tracks MatchSet Time """
diff = time.time() - self.startRefMatching
self.globalMatching += diff
# ************************************************************
def startTimeDeletion(self):
""" Tracks Deletion Time """
self.startRefDeletion = time.time()
def stopTimeDeletion(self):
""" Tracks Deletion Time """
diff = time.time() - self.startRefDeletion
self.globalDeletion += diff
# ************************************************************
def startTimeSubsumption(self):
"""Tracks Subsumption Time """
self.startRefSubsumption = time.time()
def stopTimeSubsumption(self):
"""Tracks Subsumption Time """
diff = time.time() - self.startRefSubsumption
self.globalSubsumption += diff
# ************************************************************
def startTimeSelection(self):
""" Tracks Selection Time """
self.startRefSelection = time.time()
def stopTimeSelection(self):
""" Tracks Selection Time """
diff = time.time() - self.startRefSelection
self.globalSelection += diff
# ************************************************************
def startTimeEvaluation(self):
""" Tracks Evaluation Time """
self.startRefEvaluation = time.time()
def stopTimeEvaluation(self):
""" Tracks Evaluation Time """
diff = time.time() - self.startRefEvaluation
self.globalEvaluation += diff
# ************************************************************
def updateGlobalTime(self):
self.globalTime = (time.time() - self.globalStartRef)+self.globalAdd | scikit-eLCS | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/Timer.py | Timer.py | import time
# --------------------------------------
class Timer:
def __init__(self):
# Global Time objects
self.globalStartRef = time.time()
self.globalTime = 0.0
self.globalAdd = 0
# Match Time Variables
self.startRefMatching = 0.0
self.globalMatching = 0.0
# Deletion Time Variables
self.startRefDeletion = 0.0
self.globalDeletion = 0.0
# Subsumption Time Variables
self.startRefSubsumption = 0.0
self.globalSubsumption = 0.0
# Selection Time Variables
self.startRefSelection = 0.0
self.globalSelection = 0.0
# Evaluation Time Variables
self.startRefEvaluation = 0.0
self.globalEvaluation = 0.0
# ************************************************************
def startTimeMatching(self):
""" Tracks MatchSet Time """
self.startRefMatching = time.time()
def stopTimeMatching(self):
""" Tracks MatchSet Time """
diff = time.time() - self.startRefMatching
self.globalMatching += diff
# ************************************************************
def startTimeDeletion(self):
""" Tracks Deletion Time """
self.startRefDeletion = time.time()
def stopTimeDeletion(self):
""" Tracks Deletion Time """
diff = time.time() - self.startRefDeletion
self.globalDeletion += diff
# ************************************************************
def startTimeSubsumption(self):
"""Tracks Subsumption Time """
self.startRefSubsumption = time.time()
def stopTimeSubsumption(self):
"""Tracks Subsumption Time """
diff = time.time() - self.startRefSubsumption
self.globalSubsumption += diff
# ************************************************************
def startTimeSelection(self):
""" Tracks Selection Time """
self.startRefSelection = time.time()
def stopTimeSelection(self):
""" Tracks Selection Time """
diff = time.time() - self.startRefSelection
self.globalSelection += diff
# ************************************************************
def startTimeEvaluation(self):
""" Tracks Evaluation Time """
self.startRefEvaluation = time.time()
def stopTimeEvaluation(self):
""" Tracks Evaluation Time """
diff = time.time() - self.startRefEvaluation
self.globalEvaluation += diff
# ************************************************************
def updateGlobalTime(self):
self.globalTime = (time.time() - self.globalStartRef)+self.globalAdd | 0.520253 | 0.173743 |
import random
import copy
import math
class Classifier:
def __init__(self,elcs,a=None,b=None,c=None,d=None):
#Major Parameters
self.specifiedAttList = []
self.condition = []
self.phenotype = None #arbitrary
self.fitness = elcs.init_fit
self.accuracy = 0.0
self.numerosity = 1
self.aveMatchSetSize = None
self.deletionProb = None
# Experience Management
self.timeStampGA = None
self.initTimeStamp = None
# Classifier Accuracy Tracking --------------------------------------
self.matchCount = 0 # Known in many LCS implementations as experience i.e. the total number of times this classifier was in a match set
self.correctCount = 0 # The total number of times this classifier was in a correct set
if isinstance(c, list):
self.classifierCovering(elcs, a, b, c, d)
elif isinstance(a, Classifier):
self.classifierCopy(a, b)
# Classifier Construction Methods
def classifierCovering(self, elcs, setSize, exploreIter, state, phenotype):
# Initialize new classifier parameters----------
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = setSize
dataInfo = elcs.env.formatData
# -------------------------------------------------------
# DISCRETE PHENOTYPE
# -------------------------------------------------------
if dataInfo.discretePhenotype:
self.phenotype = phenotype
# -------------------------------------------------------
# CONTINUOUS PHENOTYPE
# -------------------------------------------------------
else:
phenotypeRange = dataInfo.phenotypeList[1] - dataInfo.phenotypeList[0]
rangeRadius = random.randint(25,75) * 0.01 * phenotypeRange / 2.0 # Continuous initialization domain radius.
Low = float(phenotype) - rangeRadius
High = float(phenotype) + rangeRadius
self.phenotype = [Low, High]
while len(self.specifiedAttList) < 1:
for attRef in range(len(state)):
if random.random() < elcs.p_spec and not(state[attRef] == None):
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state) # Add classifierConditionElement
def classifierCopy(self, toCopy, exploreIter):
self.specifiedAttList = copy.deepcopy(toCopy.specifiedAttList)
self.condition = copy.deepcopy(toCopy.condition)
self.phenotype = copy.deepcopy(toCopy.phenotype)
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = copy.deepcopy(toCopy.aveMatchSetSize)
self.fitness = toCopy.fitness
self.accuracy = toCopy.accuracy
def buildMatch(self, elcs, attRef, state):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not(attributeInfoType): #Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
# Continuous attribute
if attributeInfoType:
attRange = attributeInfoValue[1] - attributeInfoValue[0]
rangeRadius = random.randint(25, 75) * 0.01 * attRange / 2.0 # Continuous initialization domain radius.
ar = state[attRef]
Low = ar - rangeRadius
High = ar + rangeRadius
condList = [Low, High]
self.condition.append(condList)
# Discrete attribute
else:
condList = state[attRef]
self.condition.append(condList)
# Matching
def match(self, state, elcs):
for i in range(len(self.condition)):
specifiedIndex = self.specifiedAttList[i]
attributeInfoType = elcs.env.formatData.attributeInfoType[specifiedIndex]
# Continuous
if attributeInfoType:
instanceValue = state[specifiedIndex]
if elcs.match_for_missingness:
if instanceValue == None:
pass
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
else:
if instanceValue == None:
return False
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
# Discrete
else:
stateRep = state[specifiedIndex]
if elcs.match_for_missingness:
if stateRep == self.condition[i] or stateRep == None:
pass
else:
return False
else:
if stateRep == self.condition[i]:
pass
elif stateRep == None:
return False
else:
return False
return True
def equals(self, elcs, cl):
if cl.phenotype == self.phenotype and len(cl.specifiedAttList) == len(self.specifiedAttList):
clRefs = sorted(cl.specifiedAttList)
selfRefs = sorted(self.specifiedAttList)
if clRefs == selfRefs:
for i in range(len(cl.specifiedAttList)):
tempIndex = self.specifiedAttList.index(cl.specifiedAttList[i])
if not (cl.condition[i] == self.condition[tempIndex]):
return False
return True
return False
def updateNumerosity(self, num):
""" Updates the numberosity of the classifier. Notice that 'num' can be negative! """
self.numerosity += num
def updateExperience(self):
""" Increases the experience of the classifier by one. Once an epoch has completed, rule accuracy can't change."""
self.matchCount += 1
def updateCorrect(self):
""" Increases the correct phenotype tracking by one. Once an epoch has completed, rule accuracy can't change."""
self.correctCount += 1
def updateMatchSetSize(self, elcs, matchSetSize):
""" Updates the average match set size. """
if self.matchCount < 1.0 / elcs.beta:
self.aveMatchSetSize = (self.aveMatchSetSize * (self.matchCount - 1) + matchSetSize) / float(
self.matchCount)
else:
self.aveMatchSetSize = self.aveMatchSetSize + elcs.beta * (matchSetSize - self.aveMatchSetSize)
def updateAccuracy(self):
""" Update the accuracy tracker """
self.accuracy = self.correctCount / float(self.matchCount)
def updateFitness(self, elcs):
""" Update the fitness parameter. """
if elcs.env.formatData.discretePhenotype or (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange < 0.5:
self.fitness = pow(self.accuracy, elcs.nu)
else:
if (self.phenotype[1] - self.phenotype[0]) >= elcs.env.formatData.phenotypeRange:
self.fitness = 0.0
else:
self.fitness = math.fabs(pow(self.accuracy, elcs.nu) - (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange)
def isSubsumer(self, elcs):
if self.matchCount > elcs.theta_sub and self.accuracy > elcs.acc_sub:
return True
return False
def isMoreGeneral(self, cl, elcs):
if len(self.specifiedAttList) >= len(cl.specifiedAttList):
return False
for i in range(len(self.specifiedAttList)):
attributeInfoType = elcs.env.formatData.attributeInfoType[self.specifiedAttList[i]]
if self.specifiedAttList[i] not in cl.specifiedAttList:
return False
# Continuous
if attributeInfoType:
otherRef = cl.specifiedAttList.index(self.specifiedAttList[i])
if self.condition[i][0] < cl.condition[otherRef][0]:
return False
if self.condition[i][1] > cl.condition[otherRef][1]:
return False
return True
def uniformCrossover(self, elcs, cl):
if elcs.env.formatData.discretePhenotype or random.random() < 0.5:
p_self_specifiedAttList = copy.deepcopy(self.specifiedAttList)
p_cl_specifiedAttList = copy.deepcopy(cl.specifiedAttList)
# Make list of attribute references appearing in at least one of the parents.-----------------------------
comboAttList = []
for i in p_self_specifiedAttList:
comboAttList.append(i)
for i in p_cl_specifiedAttList:
if i not in comboAttList:
comboAttList.append(i)
elif not elcs.env.formatData.attributeInfoType[i]:
comboAttList.remove(i)
comboAttList.sort()
changed = False
for attRef in comboAttList:
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
probability = 0.5
ref = 0
if attRef in p_self_specifiedAttList:
ref += 1
if attRef in p_cl_specifiedAttList:
ref += 1
if ref == 0:
pass
elif ref == 1:
if attRef in p_self_specifiedAttList and random.random() > probability:
i = self.specifiedAttList.index(attRef)
cl.condition.append(self.condition.pop(i))
cl.specifiedAttList.append(attRef)
self.specifiedAttList.remove(attRef)
changed = True
if attRef in p_cl_specifiedAttList and random.random() < probability:
i = cl.specifiedAttList.index(attRef)
self.condition.append(cl.condition.pop(i))
self.specifiedAttList.append(attRef)
cl.specifiedAttList.remove(attRef)
changed = True
else:
# Continuous Attribute
if attributeInfoType:
i_cl1 = self.specifiedAttList.index(attRef)
i_cl2 = cl.specifiedAttList.index(attRef)
tempKey = random.randint(0, 3)
if tempKey == 0:
temp = self.condition[i_cl1][0]
self.condition[i_cl1][0] = cl.condition[i_cl2][0]
cl.condition[i_cl2][0] = temp
elif tempKey == 1:
temp = self.condition[i_cl1][1]
self.condition[i_cl1][1] = cl.condition[i_cl2][1]
cl.condition[i_cl2][1] = temp
else:
allList = self.condition[i_cl1] + cl.condition[i_cl2]
newMin = min(allList)
newMax = max(allList)
if tempKey == 2:
self.condition[i_cl1] = [newMin, newMax]
cl.condition.pop(i_cl2)
cl.specifiedAttList.remove(attRef)
else:
cl.condition[i_cl2] = [newMin, newMax]
self.condition.pop(i_cl1)
self.specifiedAttList.remove(attRef)
# Discrete Attribute
else:
pass
tempList1 = copy.deepcopy(p_self_specifiedAttList)
tempList2 = copy.deepcopy(cl.specifiedAttList)
tempList1.sort()
tempList2.sort()
if changed and len(set(tempList1) & set(tempList2)) == len(tempList2):
changed = False
return changed
else:
return self.phenotypeCrossover(cl)
def phenotypeCrossover(self, cl):
changed = False
if self.phenotype == cl.phenotype:
return changed
else:
tempKey = random.random() < 0.5 # Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.
if tempKey: # Swap minimum
temp = self.phenotype[0]
self.phenotype[0] = cl.phenotype[0]
cl.phenotype[0] = temp
changed = True
elif tempKey: # Swap maximum
temp = self.phenotype[1]
self.phenotype[1] = cl.phenotype[1]
cl.phenotype[1] = temp
changed = True
return changed
def Mutation(self, elcs, state, phenotype):
changed = False
# Mutate Condition
for attRef in range(elcs.env.formatData.numAttributes):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not (attributeInfoType): # Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
if random.random() < elcs.mu and not(state[attRef] == None):
# Mutation
if attRef not in self.specifiedAttList:
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state)
changed = True
elif attRef in self.specifiedAttList:
i = self.specifiedAttList.index(attRef)
if not attributeInfoType or random.random() > 0.5:
del self.specifiedAttList[i]
del self.condition[i]
changed = True
else:
attRange = float(attributeInfoValue[1]) - float(attributeInfoValue[0])
mutateRange = random.random() * 0.5 * attRange
if random.random() > 0.5:
if random.random() > 0.5:
self.condition[i][0] += mutateRange
else:
self.condition[i][0] -= mutateRange
else:
if random.random() > 0.5:
self.condition[i][1] += mutateRange
else:
self.condition[i][1] -= mutateRange
self.condition[i] = sorted(self.condition[i])
changed = True
else:
pass
# Mutate Phenotype
if elcs.env.formatData.discretePhenotype:
nowChanged = self.discretePhenotypeMutation(elcs)
else:
nowChanged = self.continuousPhenotypeMutation(elcs, phenotype)
if changed or nowChanged:
return True
def discretePhenotypeMutation(self, elcs):
changed = False
if random.random() < elcs.mu:
phenotypeList = copy.deepcopy(elcs.env.formatData.phenotypeList)
phenotypeList.remove(self.phenotype)
newPhenotype = random.choice(phenotypeList)
self.phenotype = newPhenotype
changed = True
return changed
def continuousPhenotypeMutation(self, elcs, phenotype):
changed = False
if random.random() < elcs.mu:
phenRange = self.phenotype[1] - self.phenotype[0]
mutateRange = random.random() * 0.5 * phenRange
tempKey = random.randint(0,2) # Make random choice between 3 scenarios, mutate minimums, mutate maximums, mutate both
if tempKey == 0: # Mutate minimum
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
changed = True
elif tempKey == 1: # Mutate maximum
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
else: # mutate both
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
self.phenotype.sort()
return changed
def updateTimeStamp(self, ts):
""" Sets the time stamp of the classifier. """
self.timeStampGA = ts
def setAccuracy(self, acc):
""" Sets the accuracy of the classifier """
self.accuracy = acc
def setFitness(self, fit):
""" Sets the fitness of the classifier. """
self.fitness = fit
def subsumes(self, elcs, cl):
# Discrete Phenotype
if elcs.env.formatData.discretePhenotype:
if cl.phenotype == self.phenotype:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
# Continuous Phenotype
else:
if self.phenotype[0] >= cl.phenotype[0] and self.phenotype[1] <= cl.phenotype[1]:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
def getDelProp(self, elcs, meanFitness):
""" Returns the vote for deletion of the classifier. """
if self.fitness / self.numerosity >= elcs.delta * meanFitness or self.matchCount < elcs.theta_del:
deletionVote = self.aveMatchSetSize * self.numerosity
elif self.fitness == 0.0:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (elcs.init_fit / self.numerosity)
else:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (self.fitness / self.numerosity)
return deletionVote | scikit-eLCS | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/Classifier.py | Classifier.py | import random
import copy
import math
class Classifier:
def __init__(self,elcs,a=None,b=None,c=None,d=None):
#Major Parameters
self.specifiedAttList = []
self.condition = []
self.phenotype = None #arbitrary
self.fitness = elcs.init_fit
self.accuracy = 0.0
self.numerosity = 1
self.aveMatchSetSize = None
self.deletionProb = None
# Experience Management
self.timeStampGA = None
self.initTimeStamp = None
# Classifier Accuracy Tracking --------------------------------------
self.matchCount = 0 # Known in many LCS implementations as experience i.e. the total number of times this classifier was in a match set
self.correctCount = 0 # The total number of times this classifier was in a correct set
if isinstance(c, list):
self.classifierCovering(elcs, a, b, c, d)
elif isinstance(a, Classifier):
self.classifierCopy(a, b)
# Classifier Construction Methods
def classifierCovering(self, elcs, setSize, exploreIter, state, phenotype):
# Initialize new classifier parameters----------
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = setSize
dataInfo = elcs.env.formatData
# -------------------------------------------------------
# DISCRETE PHENOTYPE
# -------------------------------------------------------
if dataInfo.discretePhenotype:
self.phenotype = phenotype
# -------------------------------------------------------
# CONTINUOUS PHENOTYPE
# -------------------------------------------------------
else:
phenotypeRange = dataInfo.phenotypeList[1] - dataInfo.phenotypeList[0]
rangeRadius = random.randint(25,75) * 0.01 * phenotypeRange / 2.0 # Continuous initialization domain radius.
Low = float(phenotype) - rangeRadius
High = float(phenotype) + rangeRadius
self.phenotype = [Low, High]
while len(self.specifiedAttList) < 1:
for attRef in range(len(state)):
if random.random() < elcs.p_spec and not(state[attRef] == None):
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state) # Add classifierConditionElement
def classifierCopy(self, toCopy, exploreIter):
self.specifiedAttList = copy.deepcopy(toCopy.specifiedAttList)
self.condition = copy.deepcopy(toCopy.condition)
self.phenotype = copy.deepcopy(toCopy.phenotype)
self.timeStampGA = exploreIter
self.initTimeStamp = exploreIter
self.aveMatchSetSize = copy.deepcopy(toCopy.aveMatchSetSize)
self.fitness = toCopy.fitness
self.accuracy = toCopy.accuracy
def buildMatch(self, elcs, attRef, state):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not(attributeInfoType): #Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
# Continuous attribute
if attributeInfoType:
attRange = attributeInfoValue[1] - attributeInfoValue[0]
rangeRadius = random.randint(25, 75) * 0.01 * attRange / 2.0 # Continuous initialization domain radius.
ar = state[attRef]
Low = ar - rangeRadius
High = ar + rangeRadius
condList = [Low, High]
self.condition.append(condList)
# Discrete attribute
else:
condList = state[attRef]
self.condition.append(condList)
# Matching
def match(self, state, elcs):
for i in range(len(self.condition)):
specifiedIndex = self.specifiedAttList[i]
attributeInfoType = elcs.env.formatData.attributeInfoType[specifiedIndex]
# Continuous
if attributeInfoType:
instanceValue = state[specifiedIndex]
if elcs.match_for_missingness:
if instanceValue == None:
pass
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
else:
if instanceValue == None:
return False
elif self.condition[i][0] < instanceValue < self.condition[i][1]:
pass
else:
return False
# Discrete
else:
stateRep = state[specifiedIndex]
if elcs.match_for_missingness:
if stateRep == self.condition[i] or stateRep == None:
pass
else:
return False
else:
if stateRep == self.condition[i]:
pass
elif stateRep == None:
return False
else:
return False
return True
def equals(self, elcs, cl):
if cl.phenotype == self.phenotype and len(cl.specifiedAttList) == len(self.specifiedAttList):
clRefs = sorted(cl.specifiedAttList)
selfRefs = sorted(self.specifiedAttList)
if clRefs == selfRefs:
for i in range(len(cl.specifiedAttList)):
tempIndex = self.specifiedAttList.index(cl.specifiedAttList[i])
if not (cl.condition[i] == self.condition[tempIndex]):
return False
return True
return False
def updateNumerosity(self, num):
""" Updates the numberosity of the classifier. Notice that 'num' can be negative! """
self.numerosity += num
def updateExperience(self):
""" Increases the experience of the classifier by one. Once an epoch has completed, rule accuracy can't change."""
self.matchCount += 1
def updateCorrect(self):
""" Increases the correct phenotype tracking by one. Once an epoch has completed, rule accuracy can't change."""
self.correctCount += 1
def updateMatchSetSize(self, elcs, matchSetSize):
""" Updates the average match set size. """
if self.matchCount < 1.0 / elcs.beta:
self.aveMatchSetSize = (self.aveMatchSetSize * (self.matchCount - 1) + matchSetSize) / float(
self.matchCount)
else:
self.aveMatchSetSize = self.aveMatchSetSize + elcs.beta * (matchSetSize - self.aveMatchSetSize)
def updateAccuracy(self):
""" Update the accuracy tracker """
self.accuracy = self.correctCount / float(self.matchCount)
def updateFitness(self, elcs):
""" Update the fitness parameter. """
if elcs.env.formatData.discretePhenotype or (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange < 0.5:
self.fitness = pow(self.accuracy, elcs.nu)
else:
if (self.phenotype[1] - self.phenotype[0]) >= elcs.env.formatData.phenotypeRange:
self.fitness = 0.0
else:
self.fitness = math.fabs(pow(self.accuracy, elcs.nu) - (
self.phenotype[1] - self.phenotype[0]) / elcs.env.formatData.phenotypeRange)
def isSubsumer(self, elcs):
if self.matchCount > elcs.theta_sub and self.accuracy > elcs.acc_sub:
return True
return False
def isMoreGeneral(self, cl, elcs):
if len(self.specifiedAttList) >= len(cl.specifiedAttList):
return False
for i in range(len(self.specifiedAttList)):
attributeInfoType = elcs.env.formatData.attributeInfoType[self.specifiedAttList[i]]
if self.specifiedAttList[i] not in cl.specifiedAttList:
return False
# Continuous
if attributeInfoType:
otherRef = cl.specifiedAttList.index(self.specifiedAttList[i])
if self.condition[i][0] < cl.condition[otherRef][0]:
return False
if self.condition[i][1] > cl.condition[otherRef][1]:
return False
return True
def uniformCrossover(self, elcs, cl):
if elcs.env.formatData.discretePhenotype or random.random() < 0.5:
p_self_specifiedAttList = copy.deepcopy(self.specifiedAttList)
p_cl_specifiedAttList = copy.deepcopy(cl.specifiedAttList)
# Make list of attribute references appearing in at least one of the parents.-----------------------------
comboAttList = []
for i in p_self_specifiedAttList:
comboAttList.append(i)
for i in p_cl_specifiedAttList:
if i not in comboAttList:
comboAttList.append(i)
elif not elcs.env.formatData.attributeInfoType[i]:
comboAttList.remove(i)
comboAttList.sort()
changed = False
for attRef in comboAttList:
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
probability = 0.5
ref = 0
if attRef in p_self_specifiedAttList:
ref += 1
if attRef in p_cl_specifiedAttList:
ref += 1
if ref == 0:
pass
elif ref == 1:
if attRef in p_self_specifiedAttList and random.random() > probability:
i = self.specifiedAttList.index(attRef)
cl.condition.append(self.condition.pop(i))
cl.specifiedAttList.append(attRef)
self.specifiedAttList.remove(attRef)
changed = True
if attRef in p_cl_specifiedAttList and random.random() < probability:
i = cl.specifiedAttList.index(attRef)
self.condition.append(cl.condition.pop(i))
self.specifiedAttList.append(attRef)
cl.specifiedAttList.remove(attRef)
changed = True
else:
# Continuous Attribute
if attributeInfoType:
i_cl1 = self.specifiedAttList.index(attRef)
i_cl2 = cl.specifiedAttList.index(attRef)
tempKey = random.randint(0, 3)
if tempKey == 0:
temp = self.condition[i_cl1][0]
self.condition[i_cl1][0] = cl.condition[i_cl2][0]
cl.condition[i_cl2][0] = temp
elif tempKey == 1:
temp = self.condition[i_cl1][1]
self.condition[i_cl1][1] = cl.condition[i_cl2][1]
cl.condition[i_cl2][1] = temp
else:
allList = self.condition[i_cl1] + cl.condition[i_cl2]
newMin = min(allList)
newMax = max(allList)
if tempKey == 2:
self.condition[i_cl1] = [newMin, newMax]
cl.condition.pop(i_cl2)
cl.specifiedAttList.remove(attRef)
else:
cl.condition[i_cl2] = [newMin, newMax]
self.condition.pop(i_cl1)
self.specifiedAttList.remove(attRef)
# Discrete Attribute
else:
pass
tempList1 = copy.deepcopy(p_self_specifiedAttList)
tempList2 = copy.deepcopy(cl.specifiedAttList)
tempList1.sort()
tempList2.sort()
if changed and len(set(tempList1) & set(tempList2)) == len(tempList2):
changed = False
return changed
else:
return self.phenotypeCrossover(cl)
def phenotypeCrossover(self, cl):
changed = False
if self.phenotype == cl.phenotype:
return changed
else:
tempKey = random.random() < 0.5 # Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.
if tempKey: # Swap minimum
temp = self.phenotype[0]
self.phenotype[0] = cl.phenotype[0]
cl.phenotype[0] = temp
changed = True
elif tempKey: # Swap maximum
temp = self.phenotype[1]
self.phenotype[1] = cl.phenotype[1]
cl.phenotype[1] = temp
changed = True
return changed
def Mutation(self, elcs, state, phenotype):
changed = False
# Mutate Condition
for attRef in range(elcs.env.formatData.numAttributes):
attributeInfoType = elcs.env.formatData.attributeInfoType[attRef]
if not (attributeInfoType): # Discrete
attributeInfoValue = elcs.env.formatData.attributeInfoDiscrete[attRef]
else:
attributeInfoValue = elcs.env.formatData.attributeInfoContinuous[attRef]
if random.random() < elcs.mu and not(state[attRef] == None):
# Mutation
if attRef not in self.specifiedAttList:
self.specifiedAttList.append(attRef)
self.buildMatch(elcs, attRef, state)
changed = True
elif attRef in self.specifiedAttList:
i = self.specifiedAttList.index(attRef)
if not attributeInfoType or random.random() > 0.5:
del self.specifiedAttList[i]
del self.condition[i]
changed = True
else:
attRange = float(attributeInfoValue[1]) - float(attributeInfoValue[0])
mutateRange = random.random() * 0.5 * attRange
if random.random() > 0.5:
if random.random() > 0.5:
self.condition[i][0] += mutateRange
else:
self.condition[i][0] -= mutateRange
else:
if random.random() > 0.5:
self.condition[i][1] += mutateRange
else:
self.condition[i][1] -= mutateRange
self.condition[i] = sorted(self.condition[i])
changed = True
else:
pass
# Mutate Phenotype
if elcs.env.formatData.discretePhenotype:
nowChanged = self.discretePhenotypeMutation(elcs)
else:
nowChanged = self.continuousPhenotypeMutation(elcs, phenotype)
if changed or nowChanged:
return True
def discretePhenotypeMutation(self, elcs):
changed = False
if random.random() < elcs.mu:
phenotypeList = copy.deepcopy(elcs.env.formatData.phenotypeList)
phenotypeList.remove(self.phenotype)
newPhenotype = random.choice(phenotypeList)
self.phenotype = newPhenotype
changed = True
return changed
def continuousPhenotypeMutation(self, elcs, phenotype):
changed = False
if random.random() < elcs.mu:
phenRange = self.phenotype[1] - self.phenotype[0]
mutateRange = random.random() * 0.5 * phenRange
tempKey = random.randint(0,2) # Make random choice between 3 scenarios, mutate minimums, mutate maximums, mutate both
if tempKey == 0: # Mutate minimum
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
changed = True
elif tempKey == 1: # Mutate maximum
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
else: # mutate both
if random.random() > 0.5 or self.phenotype[0] + mutateRange <= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[0] += mutateRange
else: # Subtract
self.phenotype[0] -= mutateRange
if random.random() > 0.5 or self.phenotype[1] - mutateRange >= phenotype: # Checks that mutated range still contains current phenotype
self.phenotype[1] -= mutateRange
else: # Subtract
self.phenotype[1] += mutateRange
changed = True
self.phenotype.sort()
return changed
def updateTimeStamp(self, ts):
""" Sets the time stamp of the classifier. """
self.timeStampGA = ts
def setAccuracy(self, acc):
""" Sets the accuracy of the classifier """
self.accuracy = acc
def setFitness(self, fit):
""" Sets the fitness of the classifier. """
self.fitness = fit
def subsumes(self, elcs, cl):
# Discrete Phenotype
if elcs.env.formatData.discretePhenotype:
if cl.phenotype == self.phenotype:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
# Continuous Phenotype
else:
if self.phenotype[0] >= cl.phenotype[0] and self.phenotype[1] <= cl.phenotype[1]:
if self.isSubsumer(elcs) and self.isMoreGeneral(cl, elcs):
return True
return False
def getDelProp(self, elcs, meanFitness):
""" Returns the vote for deletion of the classifier. """
if self.fitness / self.numerosity >= elcs.delta * meanFitness or self.matchCount < elcs.theta_del:
deletionVote = self.aveMatchSetSize * self.numerosity
elif self.fitness == 0.0:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (elcs.init_fit / self.numerosity)
else:
deletionVote = self.aveMatchSetSize * self.numerosity * meanFitness / (self.fitness / self.numerosity)
return deletionVote | 0.615088 | 0.229018 |
import numpy as np
import pandas as pd
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
class StringEnumerator:
def __init__(self, inputFile, classLabel):
self.classLabel = classLabel
self.map = {} #Dictionary of header names: Attribute dictionaries
data = pd.read_csv(inputFile, sep=',') # Puts data from csv into indexable np arrays
data = data.fillna("NA")
self.dataFeatures = data.drop(classLabel, axis=1).values #splits into an array of instances
self.dataPhenotypes = data[classLabel].values
self.dataHeaders = data.drop(classLabel, axis=1).columns.values
tempPhenoArray = np.empty(len(self.dataPhenotypes),dtype=object)
for instanceIndex in range(len(self.dataPhenotypes)):
tempPhenoArray[instanceIndex] = str(self.dataPhenotypes[instanceIndex])
self.dataPhenotypes = tempPhenoArray
tempFeatureArray = np.empty((len(self.dataPhenotypes),len(self.dataHeaders)),dtype=object)
for instanceIndex in range(len(self.dataFeatures)):
for attrInst in range(len(self.dataHeaders)):
tempFeatureArray[instanceIndex][attrInst] = str(self.dataFeatures[instanceIndex][attrInst])
self.dataFeatures = tempFeatureArray
self.delete_all_instances_without_phenotype()
def print_invalid_attributes(self):
print("ALL INVALID ATTRIBUTES & THEIR DISTINCT VALUES")
for attr in range(len(self.dataHeaders)):
distinctValues = []
isInvalid = False
for instIndex in range(len(self.dataFeatures)):
val = self.dataFeatures[instIndex,attr]
if not val in distinctValues and val != "NA":
distinctValues.append(self.dataFeatures[instIndex,attr])
if val != "NA":
try:
float(val)
except:
isInvalid = True
if isInvalid:
print(str(self.dataHeaders[attr])+": ",end="")
for i in distinctValues:
print(str(i)+"\t",end="")
print()
distinctValues = []
isInvalid = False
for instIndex in range(len(self.dataPhenotypes)):
val = self.dataPhenotypes[instIndex]
if not val in distinctValues and val != "NA":
distinctValues.append(self.dataPhenotypes[instIndex])
if val != "NA":
try:
float(val)
except:
isInvalid = True
if isInvalid:
print(str(self.classLabel)+" (the phenotype): ",end="")
for i in distinctValues:
print(str(i)+"\t",end="")
print()
def change_class_name(self,newName):
if newName in self.dataHeaders:
raise Exception("New Class Name Cannot Be An Already Existing Data Header Name")
if self.classLabel in self.map.keys():
self.map[self.newName] = self.map.pop(self.classLabel)
self.classLabel = newName
def change_header_name(self,currentName,newName):
if newName in self.dataHeaders or newName == self.classLabel:
raise Exception("New Class Name Cannot Be An Already Existing Data Header or Phenotype Name")
if currentName in self.dataHeaders:
headerIndex = np.where(self.dataHeaders == currentName)[0][0]
self.dataHeaders[headerIndex] = newName
if currentName in self.map.keys():
self.map[newName] = self.map.pop(currentName)
else:
raise Exception("Current Header Doesn't Exist")
def add_attribute_converter(self,headerName,array):#map is an array of strings, ordered by how it is to be enumerated enumeration
if headerName in self.dataHeaders and not (headerName in self.map):
newAttributeConverter = {}
for index in range(len(array)):
if str(array[index]) != "NA" and str(array[index]) != "" and str(array[index]) != "NaN":
newAttributeConverter[str(array[index])] = str(index)
self.map[headerName] = newAttributeConverter
def add_attribute_converter_map(self,headerName,map):
if headerName in self.dataHeaders and not (headerName in self.map) and not("" in map) and not("NA" in map) and not("NaN" in map):
self.map[headerName] = map
else:
raise Exception("Invalid Map")
def add_attribute_converter_random(self,headerName):
if headerName in self.dataHeaders and not (headerName in self.map):
headerIndex = np.where(self.dataHeaders == headerName)[0][0]
uniqueItems = []
for instance in self.dataFeatures:
if not(instance[headerIndex] in uniqueItems) and instance[headerIndex] != "NA":
uniqueItems.append(instance[headerIndex])
self.add_attribute_converter(headerName,np.array(uniqueItems))
def add_class_converter(self,array):
if not (self.classLabel in self.map.keys()):
newAttributeConverter = {}
for index in range(len(array)):
newAttributeConverter[str(array[index])] = str(index)
self.map[self.classLabel] = newAttributeConverter
def add_class_converter_random(self):
if not (self.classLabel in self.map.keys()):
uniqueItems = []
for instance in self.dataPhenotypes:
if not (instance in uniqueItems) and instance != "NA":
uniqueItems.append(instance)
self.add_class_converter(np.array(uniqueItems))
def convert_all_attributes(self):
for attribute in self.dataHeaders:
if attribute in self.map.keys():
i = np.where(self.dataHeaders == attribute)[0][0]
for state in self.dataFeatures:#goes through each instance's state
if (state[i] in self.map[attribute].keys()):
state[i] = self.map[attribute][state[i]]
if self.classLabel in self.map.keys():
for state in self.dataPhenotypes:
if (state in self.map[self.classLabel].keys()):
i = np.where(self.dataPhenotypes == state)
self.dataPhenotypes[i] = self.map[self.classLabel][state]
def delete_attribute(self,headerName):
if headerName in self.dataHeaders:
i = np.where(headerName == self.dataHeaders)[0][0]
self.dataHeaders = np.delete(self.dataHeaders,i)
if headerName in self.map.keys():
del self.map[headerName]
newFeatures = []
for instanceIndex in range(len(self.dataFeatures)):
instance = np.delete(self.dataFeatures[instanceIndex],i)
newFeatures.append(instance)
self.dataFeatures = np.array(newFeatures)
else:
raise Exception("Header Doesn't Exist")
def delete_all_instances_without_header_data(self,headerName):
newFeatures = []
newPhenotypes = []
attributeIndex = np.where(self.dataHeaders == headerName)[0][0]
for instanceIndex in range(len(self.dataFeatures)):
instance = self.dataFeatures[instanceIndex]
if instance[attributeIndex] != "NA":
newFeatures.append(instance)
newPhenotypes.append(self.dataPhenotypes[instanceIndex])
self.dataFeatures = np.array(newFeatures)
self.dataPhenotypes = np.array(newPhenotypes)
def delete_all_instances_without_phenotype(self):
newFeatures = []
newPhenotypes = []
for instanceIndex in range(len(self.dataFeatures)):
instance = self.dataPhenotypes[instanceIndex]
if instance != "NA":
newFeatures.append(self.dataFeatures[instanceIndex])
newPhenotypes.append(instance)
self.dataFeatures = np.array(newFeatures)
self.dataPhenotypes = np.array(newPhenotypes)
def print(self):
isFullNumber = self.check_is_full_numeric()
print("Converted Data Features and Phenotypes")
for header in self.dataHeaders:
print(header,end="\t")
print()
for instanceIndex in range(len(self.dataFeatures)):
for attribute in self.dataFeatures[instanceIndex]:
if attribute != "NA":
if (isFullNumber):
print(float(attribute), end="\t")
else:
print(attribute, end="\t\t")
else:
print("NA", end = "\t")
if self.dataPhenotypes[instanceIndex] != "NA":
if (isFullNumber):
print(float(self.dataPhenotypes[instanceIndex]))
else:
print(self.dataPhenotypes[instanceIndex])
else:
print("NA")
print()
def print_attribute_conversions(self):
print("Changed Attribute Conversions")
for headerName,conversions in self.map:
print(headerName + " conversions:")
for original,numberVal in conversions:
print("\tOriginal: "+original+" Converted: "+numberVal)
print()
print()
def check_is_full_numeric(self):
try:
for instance in self.dataFeatures:
for value in instance:
if value != "NA":
float(value)
for value in self.dataPhenotypes:
if value != "NA":
float(value)
except:
return False
return True
def get_params(self):
if not(self.check_is_full_numeric()):
raise Exception("Features and Phenotypes must be fully numeric")
newFeatures = []
newPhenotypes = []
for instanceIndex in range(len(self.dataFeatures)):
newInstance = []
for attribute in self.dataFeatures[instanceIndex]:
if attribute == "NA":
newInstance.append(np.nan)
else:
newInstance.append(float(attribute))
newFeatures.append(np.array(newInstance,dtype=float))
if self.dataPhenotypes[instanceIndex] == "NA": #Should never happen. All NaN phenotypes should be removed automatically at init. Just a safety mechanism.
newPhenotypes.append(np.nan)
else:
newPhenotypes.append(float(self.dataPhenotypes[instanceIndex]))
return self.dataHeaders,self.classLabel,np.array(newFeatures,dtype=float),np.array(newPhenotypes,dtype=float) | scikit-eLCS | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/DataCleanup.py | DataCleanup.py | import numpy as np
import pandas as pd
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
class StringEnumerator:
def __init__(self, inputFile, classLabel):
self.classLabel = classLabel
self.map = {} #Dictionary of header names: Attribute dictionaries
data = pd.read_csv(inputFile, sep=',') # Puts data from csv into indexable np arrays
data = data.fillna("NA")
self.dataFeatures = data.drop(classLabel, axis=1).values #splits into an array of instances
self.dataPhenotypes = data[classLabel].values
self.dataHeaders = data.drop(classLabel, axis=1).columns.values
tempPhenoArray = np.empty(len(self.dataPhenotypes),dtype=object)
for instanceIndex in range(len(self.dataPhenotypes)):
tempPhenoArray[instanceIndex] = str(self.dataPhenotypes[instanceIndex])
self.dataPhenotypes = tempPhenoArray
tempFeatureArray = np.empty((len(self.dataPhenotypes),len(self.dataHeaders)),dtype=object)
for instanceIndex in range(len(self.dataFeatures)):
for attrInst in range(len(self.dataHeaders)):
tempFeatureArray[instanceIndex][attrInst] = str(self.dataFeatures[instanceIndex][attrInst])
self.dataFeatures = tempFeatureArray
self.delete_all_instances_without_phenotype()
def print_invalid_attributes(self):
print("ALL INVALID ATTRIBUTES & THEIR DISTINCT VALUES")
for attr in range(len(self.dataHeaders)):
distinctValues = []
isInvalid = False
for instIndex in range(len(self.dataFeatures)):
val = self.dataFeatures[instIndex,attr]
if not val in distinctValues and val != "NA":
distinctValues.append(self.dataFeatures[instIndex,attr])
if val != "NA":
try:
float(val)
except:
isInvalid = True
if isInvalid:
print(str(self.dataHeaders[attr])+": ",end="")
for i in distinctValues:
print(str(i)+"\t",end="")
print()
distinctValues = []
isInvalid = False
for instIndex in range(len(self.dataPhenotypes)):
val = self.dataPhenotypes[instIndex]
if not val in distinctValues and val != "NA":
distinctValues.append(self.dataPhenotypes[instIndex])
if val != "NA":
try:
float(val)
except:
isInvalid = True
if isInvalid:
print(str(self.classLabel)+" (the phenotype): ",end="")
for i in distinctValues:
print(str(i)+"\t",end="")
print()
def change_class_name(self,newName):
if newName in self.dataHeaders:
raise Exception("New Class Name Cannot Be An Already Existing Data Header Name")
if self.classLabel in self.map.keys():
self.map[self.newName] = self.map.pop(self.classLabel)
self.classLabel = newName
def change_header_name(self,currentName,newName):
if newName in self.dataHeaders or newName == self.classLabel:
raise Exception("New Class Name Cannot Be An Already Existing Data Header or Phenotype Name")
if currentName in self.dataHeaders:
headerIndex = np.where(self.dataHeaders == currentName)[0][0]
self.dataHeaders[headerIndex] = newName
if currentName in self.map.keys():
self.map[newName] = self.map.pop(currentName)
else:
raise Exception("Current Header Doesn't Exist")
def add_attribute_converter(self,headerName,array):#map is an array of strings, ordered by how it is to be enumerated enumeration
if headerName in self.dataHeaders and not (headerName in self.map):
newAttributeConverter = {}
for index in range(len(array)):
if str(array[index]) != "NA" and str(array[index]) != "" and str(array[index]) != "NaN":
newAttributeConverter[str(array[index])] = str(index)
self.map[headerName] = newAttributeConverter
def add_attribute_converter_map(self,headerName,map):
if headerName in self.dataHeaders and not (headerName in self.map) and not("" in map) and not("NA" in map) and not("NaN" in map):
self.map[headerName] = map
else:
raise Exception("Invalid Map")
def add_attribute_converter_random(self,headerName):
if headerName in self.dataHeaders and not (headerName in self.map):
headerIndex = np.where(self.dataHeaders == headerName)[0][0]
uniqueItems = []
for instance in self.dataFeatures:
if not(instance[headerIndex] in uniqueItems) and instance[headerIndex] != "NA":
uniqueItems.append(instance[headerIndex])
self.add_attribute_converter(headerName,np.array(uniqueItems))
def add_class_converter(self,array):
if not (self.classLabel in self.map.keys()):
newAttributeConverter = {}
for index in range(len(array)):
newAttributeConverter[str(array[index])] = str(index)
self.map[self.classLabel] = newAttributeConverter
def add_class_converter_random(self):
if not (self.classLabel in self.map.keys()):
uniqueItems = []
for instance in self.dataPhenotypes:
if not (instance in uniqueItems) and instance != "NA":
uniqueItems.append(instance)
self.add_class_converter(np.array(uniqueItems))
def convert_all_attributes(self):
for attribute in self.dataHeaders:
if attribute in self.map.keys():
i = np.where(self.dataHeaders == attribute)[0][0]
for state in self.dataFeatures:#goes through each instance's state
if (state[i] in self.map[attribute].keys()):
state[i] = self.map[attribute][state[i]]
if self.classLabel in self.map.keys():
for state in self.dataPhenotypes:
if (state in self.map[self.classLabel].keys()):
i = np.where(self.dataPhenotypes == state)
self.dataPhenotypes[i] = self.map[self.classLabel][state]
def delete_attribute(self,headerName):
if headerName in self.dataHeaders:
i = np.where(headerName == self.dataHeaders)[0][0]
self.dataHeaders = np.delete(self.dataHeaders,i)
if headerName in self.map.keys():
del self.map[headerName]
newFeatures = []
for instanceIndex in range(len(self.dataFeatures)):
instance = np.delete(self.dataFeatures[instanceIndex],i)
newFeatures.append(instance)
self.dataFeatures = np.array(newFeatures)
else:
raise Exception("Header Doesn't Exist")
def delete_all_instances_without_header_data(self,headerName):
newFeatures = []
newPhenotypes = []
attributeIndex = np.where(self.dataHeaders == headerName)[0][0]
for instanceIndex in range(len(self.dataFeatures)):
instance = self.dataFeatures[instanceIndex]
if instance[attributeIndex] != "NA":
newFeatures.append(instance)
newPhenotypes.append(self.dataPhenotypes[instanceIndex])
self.dataFeatures = np.array(newFeatures)
self.dataPhenotypes = np.array(newPhenotypes)
def delete_all_instances_without_phenotype(self):
newFeatures = []
newPhenotypes = []
for instanceIndex in range(len(self.dataFeatures)):
instance = self.dataPhenotypes[instanceIndex]
if instance != "NA":
newFeatures.append(self.dataFeatures[instanceIndex])
newPhenotypes.append(instance)
self.dataFeatures = np.array(newFeatures)
self.dataPhenotypes = np.array(newPhenotypes)
def print(self):
isFullNumber = self.check_is_full_numeric()
print("Converted Data Features and Phenotypes")
for header in self.dataHeaders:
print(header,end="\t")
print()
for instanceIndex in range(len(self.dataFeatures)):
for attribute in self.dataFeatures[instanceIndex]:
if attribute != "NA":
if (isFullNumber):
print(float(attribute), end="\t")
else:
print(attribute, end="\t\t")
else:
print("NA", end = "\t")
if self.dataPhenotypes[instanceIndex] != "NA":
if (isFullNumber):
print(float(self.dataPhenotypes[instanceIndex]))
else:
print(self.dataPhenotypes[instanceIndex])
else:
print("NA")
print()
def print_attribute_conversions(self):
print("Changed Attribute Conversions")
for headerName,conversions in self.map:
print(headerName + " conversions:")
for original,numberVal in conversions:
print("\tOriginal: "+original+" Converted: "+numberVal)
print()
print()
def check_is_full_numeric(self):
try:
for instance in self.dataFeatures:
for value in instance:
if value != "NA":
float(value)
for value in self.dataPhenotypes:
if value != "NA":
float(value)
except:
return False
return True
def get_params(self):
if not(self.check_is_full_numeric()):
raise Exception("Features and Phenotypes must be fully numeric")
newFeatures = []
newPhenotypes = []
for instanceIndex in range(len(self.dataFeatures)):
newInstance = []
for attribute in self.dataFeatures[instanceIndex]:
if attribute == "NA":
newInstance.append(np.nan)
else:
newInstance.append(float(attribute))
newFeatures.append(np.array(newInstance,dtype=float))
if self.dataPhenotypes[instanceIndex] == "NA": #Should never happen. All NaN phenotypes should be removed automatically at init. Just a safety mechanism.
newPhenotypes.append(np.nan)
else:
newPhenotypes.append(float(self.dataPhenotypes[instanceIndex]))
return self.dataHeaders,self.classLabel,np.array(newFeatures,dtype=float),np.array(newPhenotypes,dtype=float) | 0.140602 | 0.305335 |
from skeLCS.Classifier import Classifier
import random
import copy
class ClassifierSet:
def __init__(self):
#Major Parameters
self.popSet = []
self.matchSet = []
self.correctSet = []
self.microPopSize = 0
def makeMatchSet(self,state_phenotype,exploreIter,elcs):
state = state_phenotype[0]
phenotype = state_phenotype[1]
doCovering = True
setNumerositySum = 0
#Matching
elcs.timer.startTimeMatching()
for i in range(len(self.popSet)):
cl = self.popSet[i]
if cl.match(state,elcs):
self.matchSet.append(i)
setNumerositySum += cl.numerosity
#Covering Check
if elcs.env.formatData.discretePhenotype:
if cl.phenotype == phenotype:
doCovering = False
else:
if float(cl.phenotype[0]) <= float(phenotype) <= float(cl.phenotype[1]):
doCovering = False
elcs.timer.stopTimeMatching()
#Covering
while doCovering:
newCl = Classifier(elcs,setNumerositySum+1,exploreIter,state,phenotype)
self.addClassifierToPopulation(elcs,newCl,True)
self.matchSet.append(len(self.popSet) - 1)
elcs.trackingObj.coveringCount+=1
doCovering = False
def getIdenticalClassifier(self,elcs,newCl):
for cl in self.popSet:
if newCl.equals(elcs,cl):
return cl
return None
def addClassifierToPopulation(self,elcs,cl,covering):
oldCl = None
if not covering:
oldCl = self.getIdenticalClassifier(elcs,cl)
if oldCl != None:
oldCl.updateNumerosity(1)
self.microPopSize += 1
else:
self.popSet.append(cl)
self.microPopSize += 1
def makeCorrectSet(self,elcs,phenotype):
for i in range(len(self.matchSet)):
ref = self.matchSet[i]
#Discrete Phenotype
if elcs.env.formatData.discretePhenotype:
if self.popSet[ref].phenotype == phenotype:
self.correctSet.append(ref)
#Continuous Phenotype
else:
if float(phenotype) <= float(self.popSet[ref].phenotype[1]) and float(phenotype) >= float(self.popSet[ref].phenotype[0]):
self.correctSet.append(ref)
def updateSets(self,elcs,exploreIter):
matchSetNumerosity = 0
for ref in self.matchSet:
matchSetNumerosity += self.popSet[ref].numerosity
for ref in self.matchSet:
self.popSet[ref].updateExperience()
self.popSet[ref].updateMatchSetSize(elcs,matchSetNumerosity)
if ref in self.correctSet:
self.popSet[ref].updateCorrect()
self.popSet[ref].updateAccuracy()
self.popSet[ref].updateFitness(elcs)
def do_correct_set_subsumption(self,elcs):
subsumer = None
for ref in self.correctSet:
cl = self.popSet[ref]
if cl.isSubsumer(elcs):
if subsumer == None or cl.isMoreGeneral(subsumer,elcs):
subsumer = cl
if subsumer != None:
i = 0
while i < len(self.correctSet):
ref = self.correctSet[i]
if subsumer.isMoreGeneral(self.popSet[ref],elcs):
elcs.trackingObj.subsumptionCount += 1
subsumer.updateNumerosity(self.popSet[ref].numerosity)
self.removeMacroClassifier(ref)
self.deleteFromMatchSet(ref)
self.deleteFromCorrectSet(ref)
i -= 1
i+=1
def removeMacroClassifier(self,ref):
del self.popSet[ref]
def deleteFromMatchSet(self,deleteRef):
if deleteRef in self.matchSet:
self.matchSet.remove(deleteRef)
for j in range(len(self.matchSet)):
ref = self.matchSet[j]
if ref > deleteRef:
self.matchSet[j] -=1
def deleteFromCorrectSet(self,deleteRef):
if deleteRef in self.correctSet:
self.correctSet.remove(deleteRef)
for j in range(len(self.correctSet)):
ref = self.correctSet[j]
if ref > deleteRef:
self.correctSet[j] -= 1
def runGA(self,elcs,exploreIter,state,phenotype):
#GA Run Requirement
if (exploreIter - self.getIterStampAverage()) < elcs.theta_GA:
return
elcs.timer.startTimeSelection()
self.setIterStamps(exploreIter)
changed = False
#Select Parents
if elcs.selection_method == "roulette":
selectList = self.selectClassifierRW()
clP1 = selectList[0]
clP2 = selectList[1]
elif elcs.selection_method == "tournament":
selectList = self.selectClassifierT(elcs)
clP1 = selectList[0]
clP2 = selectList[1]
elcs.timer.stopTimeSelection()
#Initialize Offspring
cl1 = Classifier(elcs,clP1,exploreIter)
if clP2 == None:
cl2 = Classifier(elcs,clP1, exploreIter)
else:
cl2 = Classifier(elcs,clP2, exploreIter)
#Crossover Operator (uniform crossover)
if not cl1.equals(elcs,cl2) and random.random() < elcs.chi:
changed = cl1.uniformCrossover(elcs,cl2)
#Initialize Key Offspring Parameters
if changed:
cl1.setAccuracy((cl1.accuracy + cl2.accuracy) / 2.0)
cl1.setFitness(elcs.fitness_reduction * (cl1.fitness + cl2.fitness) / 2.0)
cl2.setAccuracy(cl1.accuracy)
cl2.setFitness(cl1.fitness)
else:
cl1.setFitness(elcs.fitness_reduction * cl1.fitness)
cl2.setFitness(elcs.fitness_reduction * cl2.fitness)
#Mutation Operator
nowchanged = cl1.Mutation(elcs,state,phenotype)
howaboutnow = cl2.Mutation(elcs,state,phenotype)
#Add offspring to population
if changed or nowchanged or howaboutnow:
if nowchanged:
elcs.trackingObj.mutationCount += 1
if howaboutnow:
elcs.trackingObj.mutationCount += 1
if changed:
elcs.trackingObj.crossOverCount += 1
self.insertDiscoveredClassifiers(elcs,cl1, cl2, clP1, clP2, exploreIter) # Subsumption
def getIterStampAverage(self):
sumCl = 0.0
numSum = 0.0
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
sumCl += self.popSet[ref].timeStampGA * self.popSet[ref].numerosity
numSum += self.popSet[ref].numerosity
if numSum != 0:
return sumCl/float(numSum)
else:
return 0
def getInitStampAverage(self):
sumCl = 0.0
numSum = 0.0
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
sumCl += self.popSet[ref].initTimeStamp * self.popSet[ref].numerosity
numSum += self.popSet[ref].numerosity
if numSum != 0:
return sumCl / float(numSum)
else:
return 0
def setIterStamps(self,exploreIter):
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
self.popSet[ref].updateTimeStamp(exploreIter)
def selectClassifierRW(self):
setList = copy.deepcopy(self.correctSet)
if len(setList) > 2:
selectList = [None,None]
currentCount = 0
while currentCount < 2:
fitSum = self.getFitnessSum(setList)
choiceP = random.random() * fitSum
i = 0
sumCl = self.popSet[setList[i]].fitness
while choiceP > sumCl:
i = i + 1
sumCl += self.popSet[setList[i]].fitness
selectList[currentCount] = self.popSet[setList[i]]
setList.remove(setList[i])
currentCount += 1
elif len(setList) == 2:
selectList = [self.popSet[setList[0]], self.popSet[setList[1]]]
elif len(setList) == 1:
selectList = [self.popSet[setList[0]], self.popSet[setList[0]]]
return selectList
def getFitnessSum(self, setList):
""" Returns the sum of the fitnesses of all classifiers in the set. """
sumCl = 0.0
for i in range(len(setList)):
ref = setList[i]
sumCl += self.popSet[ref].fitness
return sumCl
def selectClassifierT(self,elcs):
selectList = [None, None]
currentCount = 0
setList = self.correctSet
while currentCount < 2:
tSize = int(len(setList) * elcs.theta_sel)
#Select tSize elements from correctSet
posList = random.sample(setList,tSize)
bestF = 0
bestC = self.correctSet[0]
for j in posList:
if self.popSet[j].fitness > bestF:
bestF = self.popSet[j].fitness
bestC = j
selectList[currentCount] = self.popSet[bestC]
currentCount += 1
return selectList
def insertDiscoveredClassifiers(self,elcs,cl1,cl2,clP1,clP2,exploreIter):
if elcs.do_GA_subsumption:
elcs.timer.startTimeSubsumption()
if len(cl1.specifiedAttList) > 0:
self.subsumeClassifier(elcs,cl1,clP1,clP2)
if len(cl2.specifiedAttList) > 0:
self.subsumeClassifier(elcs,cl2, clP1, clP2)
elcs.timer.stopTimeSubsumption()
else:
if len(cl1.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs,cl1,False)
if len(cl2.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs,cl2, False)
def subsumeClassifier(self,elcs,cl=None,cl1P=None,cl2P=None):
if cl1P != None and cl1P.subsumes(elcs,cl):
self.microPopSize += 1
cl1P.updateNumerosity(1)
elcs.trackingObj.subsumptionCount+=1
elif cl2P != None and cl2P.subsumes(elcs,cl):
self.microPopSize += 1
cl2P.updateNumerosity(1)
elcs.trackingObj.subsumptionCount += 1
else:
if len(cl.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs, cl, False)
def deletion(self,elcs,exploreIter):
while (self.microPopSize > elcs.N):
self.deleteFromPopulation(elcs)
def deleteFromPopulation(self,elcs):
meanFitness = self.getPopFitnessSum() / float(self.microPopSize)
sumCl = 0.0
voteList = []
for cl in self.popSet:
vote = cl.getDelProp(elcs,meanFitness)
sumCl += vote
voteList.append(vote)
i = 0
for cl in self.popSet:
cl.deletionProb = voteList[i]/sumCl
i+=1
choicePoint = sumCl * random.random() # Determine the choice point
newSum = 0.0
for i in range(len(voteList)):
cl = self.popSet[i]
newSum = newSum + voteList[i]
if newSum > choicePoint: # Select classifier for deletion
# Delete classifier----------------------------------
cl.updateNumerosity(-1)
self.microPopSize -= 1
if cl.numerosity < 1: # When all micro-classifiers for a given classifier have been depleted.
self.removeMacroClassifier(i)
self.deleteFromMatchSet(i)
self.deleteFromCorrectSet(i)
elcs.trackingObj.deletionCount += 1
return
return
def getPopFitnessSum(self):
""" Returns the sum of the fitnesses of all classifiers in the set. """
sumCl = 0.0
for cl in self.popSet:
sumCl += cl.fitness * cl.numerosity
return sumCl
def clearSets(self):
""" Clears out references in the match and correct sets for the next learning iteration. """
self.matchSet = []
self.correctSet = []
def getAveGenerality(self,elcs):
genSum = 0
for cl in self.popSet:
genSum += ((elcs.env.formatData.numAttributes - len(cl.condition))/float(elcs.env.formatData.numAttributes))*cl.numerosity
if self.microPopSize == 0:
aveGenerality = 0
else:
aveGenerality = genSum/float(self.microPopSize)
return aveGenerality
def getAttributeSpecificityList(self,elcs):
attributeSpecList = []
for i in range(elcs.env.formatData.numAttributes):
attributeSpecList.append(0)
for cl in self.popSet:
for ref in cl.specifiedAttList:
attributeSpecList[ref] += cl.numerosity
return attributeSpecList
def getAttributeAccuracyList(self,elcs):
attributeAccList = []
for i in range(elcs.env.formatData.numAttributes):
attributeAccList.append(0.0)
for cl in self.popSet:
for ref in cl.specifiedAttList:
attributeAccList[ref] += cl.numerosity * cl.accuracy
return attributeAccList
def makeEvalMatchSet(self,state,elcs):
for i in range(len(self.popSet)):
cl = self.popSet[i]
if cl.match(state,elcs):
self.matchSet.append(i) | scikit-eLCS | /scikit-eLCS-1.2.4.tar.gz/scikit-eLCS-1.2.4/skeLCS/ClassifierSet.py | ClassifierSet.py | from skeLCS.Classifier import Classifier
import random
import copy
class ClassifierSet:
def __init__(self):
#Major Parameters
self.popSet = []
self.matchSet = []
self.correctSet = []
self.microPopSize = 0
def makeMatchSet(self,state_phenotype,exploreIter,elcs):
state = state_phenotype[0]
phenotype = state_phenotype[1]
doCovering = True
setNumerositySum = 0
#Matching
elcs.timer.startTimeMatching()
for i in range(len(self.popSet)):
cl = self.popSet[i]
if cl.match(state,elcs):
self.matchSet.append(i)
setNumerositySum += cl.numerosity
#Covering Check
if elcs.env.formatData.discretePhenotype:
if cl.phenotype == phenotype:
doCovering = False
else:
if float(cl.phenotype[0]) <= float(phenotype) <= float(cl.phenotype[1]):
doCovering = False
elcs.timer.stopTimeMatching()
#Covering
while doCovering:
newCl = Classifier(elcs,setNumerositySum+1,exploreIter,state,phenotype)
self.addClassifierToPopulation(elcs,newCl,True)
self.matchSet.append(len(self.popSet) - 1)
elcs.trackingObj.coveringCount+=1
doCovering = False
def getIdenticalClassifier(self,elcs,newCl):
for cl in self.popSet:
if newCl.equals(elcs,cl):
return cl
return None
def addClassifierToPopulation(self,elcs,cl,covering):
oldCl = None
if not covering:
oldCl = self.getIdenticalClassifier(elcs,cl)
if oldCl != None:
oldCl.updateNumerosity(1)
self.microPopSize += 1
else:
self.popSet.append(cl)
self.microPopSize += 1
def makeCorrectSet(self,elcs,phenotype):
for i in range(len(self.matchSet)):
ref = self.matchSet[i]
#Discrete Phenotype
if elcs.env.formatData.discretePhenotype:
if self.popSet[ref].phenotype == phenotype:
self.correctSet.append(ref)
#Continuous Phenotype
else:
if float(phenotype) <= float(self.popSet[ref].phenotype[1]) and float(phenotype) >= float(self.popSet[ref].phenotype[0]):
self.correctSet.append(ref)
def updateSets(self,elcs,exploreIter):
matchSetNumerosity = 0
for ref in self.matchSet:
matchSetNumerosity += self.popSet[ref].numerosity
for ref in self.matchSet:
self.popSet[ref].updateExperience()
self.popSet[ref].updateMatchSetSize(elcs,matchSetNumerosity)
if ref in self.correctSet:
self.popSet[ref].updateCorrect()
self.popSet[ref].updateAccuracy()
self.popSet[ref].updateFitness(elcs)
def do_correct_set_subsumption(self,elcs):
subsumer = None
for ref in self.correctSet:
cl = self.popSet[ref]
if cl.isSubsumer(elcs):
if subsumer == None or cl.isMoreGeneral(subsumer,elcs):
subsumer = cl
if subsumer != None:
i = 0
while i < len(self.correctSet):
ref = self.correctSet[i]
if subsumer.isMoreGeneral(self.popSet[ref],elcs):
elcs.trackingObj.subsumptionCount += 1
subsumer.updateNumerosity(self.popSet[ref].numerosity)
self.removeMacroClassifier(ref)
self.deleteFromMatchSet(ref)
self.deleteFromCorrectSet(ref)
i -= 1
i+=1
def removeMacroClassifier(self,ref):
del self.popSet[ref]
def deleteFromMatchSet(self,deleteRef):
if deleteRef in self.matchSet:
self.matchSet.remove(deleteRef)
for j in range(len(self.matchSet)):
ref = self.matchSet[j]
if ref > deleteRef:
self.matchSet[j] -=1
def deleteFromCorrectSet(self,deleteRef):
if deleteRef in self.correctSet:
self.correctSet.remove(deleteRef)
for j in range(len(self.correctSet)):
ref = self.correctSet[j]
if ref > deleteRef:
self.correctSet[j] -= 1
def runGA(self,elcs,exploreIter,state,phenotype):
#GA Run Requirement
if (exploreIter - self.getIterStampAverage()) < elcs.theta_GA:
return
elcs.timer.startTimeSelection()
self.setIterStamps(exploreIter)
changed = False
#Select Parents
if elcs.selection_method == "roulette":
selectList = self.selectClassifierRW()
clP1 = selectList[0]
clP2 = selectList[1]
elif elcs.selection_method == "tournament":
selectList = self.selectClassifierT(elcs)
clP1 = selectList[0]
clP2 = selectList[1]
elcs.timer.stopTimeSelection()
#Initialize Offspring
cl1 = Classifier(elcs,clP1,exploreIter)
if clP2 == None:
cl2 = Classifier(elcs,clP1, exploreIter)
else:
cl2 = Classifier(elcs,clP2, exploreIter)
#Crossover Operator (uniform crossover)
if not cl1.equals(elcs,cl2) and random.random() < elcs.chi:
changed = cl1.uniformCrossover(elcs,cl2)
#Initialize Key Offspring Parameters
if changed:
cl1.setAccuracy((cl1.accuracy + cl2.accuracy) / 2.0)
cl1.setFitness(elcs.fitness_reduction * (cl1.fitness + cl2.fitness) / 2.0)
cl2.setAccuracy(cl1.accuracy)
cl2.setFitness(cl1.fitness)
else:
cl1.setFitness(elcs.fitness_reduction * cl1.fitness)
cl2.setFitness(elcs.fitness_reduction * cl2.fitness)
#Mutation Operator
nowchanged = cl1.Mutation(elcs,state,phenotype)
howaboutnow = cl2.Mutation(elcs,state,phenotype)
#Add offspring to population
if changed or nowchanged or howaboutnow:
if nowchanged:
elcs.trackingObj.mutationCount += 1
if howaboutnow:
elcs.trackingObj.mutationCount += 1
if changed:
elcs.trackingObj.crossOverCount += 1
self.insertDiscoveredClassifiers(elcs,cl1, cl2, clP1, clP2, exploreIter) # Subsumption
def getIterStampAverage(self):
sumCl = 0.0
numSum = 0.0
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
sumCl += self.popSet[ref].timeStampGA * self.popSet[ref].numerosity
numSum += self.popSet[ref].numerosity
if numSum != 0:
return sumCl/float(numSum)
else:
return 0
def getInitStampAverage(self):
sumCl = 0.0
numSum = 0.0
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
sumCl += self.popSet[ref].initTimeStamp * self.popSet[ref].numerosity
numSum += self.popSet[ref].numerosity
if numSum != 0:
return sumCl / float(numSum)
else:
return 0
def setIterStamps(self,exploreIter):
for i in range(len(self.correctSet)):
ref = self.correctSet[i]
self.popSet[ref].updateTimeStamp(exploreIter)
def selectClassifierRW(self):
setList = copy.deepcopy(self.correctSet)
if len(setList) > 2:
selectList = [None,None]
currentCount = 0
while currentCount < 2:
fitSum = self.getFitnessSum(setList)
choiceP = random.random() * fitSum
i = 0
sumCl = self.popSet[setList[i]].fitness
while choiceP > sumCl:
i = i + 1
sumCl += self.popSet[setList[i]].fitness
selectList[currentCount] = self.popSet[setList[i]]
setList.remove(setList[i])
currentCount += 1
elif len(setList) == 2:
selectList = [self.popSet[setList[0]], self.popSet[setList[1]]]
elif len(setList) == 1:
selectList = [self.popSet[setList[0]], self.popSet[setList[0]]]
return selectList
def getFitnessSum(self, setList):
""" Returns the sum of the fitnesses of all classifiers in the set. """
sumCl = 0.0
for i in range(len(setList)):
ref = setList[i]
sumCl += self.popSet[ref].fitness
return sumCl
def selectClassifierT(self,elcs):
selectList = [None, None]
currentCount = 0
setList = self.correctSet
while currentCount < 2:
tSize = int(len(setList) * elcs.theta_sel)
#Select tSize elements from correctSet
posList = random.sample(setList,tSize)
bestF = 0
bestC = self.correctSet[0]
for j in posList:
if self.popSet[j].fitness > bestF:
bestF = self.popSet[j].fitness
bestC = j
selectList[currentCount] = self.popSet[bestC]
currentCount += 1
return selectList
def insertDiscoveredClassifiers(self,elcs,cl1,cl2,clP1,clP2,exploreIter):
if elcs.do_GA_subsumption:
elcs.timer.startTimeSubsumption()
if len(cl1.specifiedAttList) > 0:
self.subsumeClassifier(elcs,cl1,clP1,clP2)
if len(cl2.specifiedAttList) > 0:
self.subsumeClassifier(elcs,cl2, clP1, clP2)
elcs.timer.stopTimeSubsumption()
else:
if len(cl1.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs,cl1,False)
if len(cl2.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs,cl2, False)
def subsumeClassifier(self,elcs,cl=None,cl1P=None,cl2P=None):
if cl1P != None and cl1P.subsumes(elcs,cl):
self.microPopSize += 1
cl1P.updateNumerosity(1)
elcs.trackingObj.subsumptionCount+=1
elif cl2P != None and cl2P.subsumes(elcs,cl):
self.microPopSize += 1
cl2P.updateNumerosity(1)
elcs.trackingObj.subsumptionCount += 1
else:
if len(cl.specifiedAttList) > 0:
self.addClassifierToPopulation(elcs, cl, False)
def deletion(self,elcs,exploreIter):
while (self.microPopSize > elcs.N):
self.deleteFromPopulation(elcs)
def deleteFromPopulation(self,elcs):
meanFitness = self.getPopFitnessSum() / float(self.microPopSize)
sumCl = 0.0
voteList = []
for cl in self.popSet:
vote = cl.getDelProp(elcs,meanFitness)
sumCl += vote
voteList.append(vote)
i = 0
for cl in self.popSet:
cl.deletionProb = voteList[i]/sumCl
i+=1
choicePoint = sumCl * random.random() # Determine the choice point
newSum = 0.0
for i in range(len(voteList)):
cl = self.popSet[i]
newSum = newSum + voteList[i]
if newSum > choicePoint: # Select classifier for deletion
# Delete classifier----------------------------------
cl.updateNumerosity(-1)
self.microPopSize -= 1
if cl.numerosity < 1: # When all micro-classifiers for a given classifier have been depleted.
self.removeMacroClassifier(i)
self.deleteFromMatchSet(i)
self.deleteFromCorrectSet(i)
elcs.trackingObj.deletionCount += 1
return
return
def getPopFitnessSum(self):
""" Returns the sum of the fitnesses of all classifiers in the set. """
sumCl = 0.0
for cl in self.popSet:
sumCl += cl.fitness * cl.numerosity
return sumCl
def clearSets(self):
""" Clears out references in the match and correct sets for the next learning iteration. """
self.matchSet = []
self.correctSet = []
def getAveGenerality(self,elcs):
genSum = 0
for cl in self.popSet:
genSum += ((elcs.env.formatData.numAttributes - len(cl.condition))/float(elcs.env.formatData.numAttributes))*cl.numerosity
if self.microPopSize == 0:
aveGenerality = 0
else:
aveGenerality = genSum/float(self.microPopSize)
return aveGenerality
def getAttributeSpecificityList(self,elcs):
attributeSpecList = []
for i in range(elcs.env.formatData.numAttributes):
attributeSpecList.append(0)
for cl in self.popSet:
for ref in cl.specifiedAttList:
attributeSpecList[ref] += cl.numerosity
return attributeSpecList
def getAttributeAccuracyList(self,elcs):
attributeAccList = []
for i in range(elcs.env.formatData.numAttributes):
attributeAccList.append(0.0)
for cl in self.popSet:
for ref in cl.specifiedAttList:
attributeAccList[ref] += cl.numerosity * cl.accuracy
return attributeAccList
def makeEvalMatchSet(self,state,elcs):
for i in range(len(self.popSet)):
cl = self.popSet[i]
if cl.match(state,elcs):
self.matchSet.append(i) | 0.314682 | 0.145025 |
import numpy as np
import scipy as sp
import warnings
from scipy.linalg import LinAlgWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
warnings.simplefilter("ignore", LinAlgWarning)
class BatchCholeskySolver(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='sym', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
Performs a negative update, effectively removing the information given by training
samples from the model. Output weights need to be re-computed after forgetting data.
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
if not forget:
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
else:
print("!!! forgetting")
self._XtX[0, 0] -= X.shape[0]
self._XtX[1:, 0] -= X_sum
self._XtX[0, 1:] -= X_sum
self._XtX[1:, 1:] -= X.T @ X
self._XtY[0] -= y_sum
self._XtY[1:] -= X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | scikit-elm | /scikit_elm-0.21a0-py3-none-any.whl/skelm/solver_batch.py | solver_batch.py | import numpy as np
import scipy as sp
import warnings
from scipy.linalg import LinAlgWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
warnings.simplefilter("ignore", LinAlgWarning)
class BatchCholeskySolver(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='sym', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
Performs a negative update, effectively removing the information given by training
samples from the model. Output weights need to be re-computed after forgetting data.
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
if not forget:
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
else:
print("!!! forgetting")
self._XtX[0, 0] -= X.shape[0]
self._XtX[1:, 0] -= X_sum
self._XtX[0, 1:] -= X_sum
self._XtX[1:, 1:] -= X.T @ X
self._XtY[0] -= y_sum
self._XtY[1:] -= X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | 0.89875 | 0.61086 |
import numpy as np
import warnings
from scipy.special import expit
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels, type_of_target
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.exceptions import DataConversionWarning, DataDimensionalityWarning
from .hidden_layer import HiddenLayer
from .solver_batch import BatchCholeskySolver
from .utils import _dense
warnings.simplefilter("ignore", DataDimensionalityWarning)
class _BaseELM(BaseEstimator):
def __init__(self, alpha=1e-7, batch_size=None, include_original_features=False,
n_neurons=None, ufunc="tanh", density=None, pairwise_metric=None,
random_state=None):
self.alpha = alpha
self.n_neurons = n_neurons
self.batch_size = batch_size
self.ufunc = ufunc
self.include_original_features = include_original_features
self.density = density
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _init_hidden_layers(self, X):
"""Init an empty model, creating objects for hidden layers and solver.
Also validates inputs for several hidden layers.
"""
# only one type of neurons
if not hasattr(self.n_neurons, '__iter__'):
hl = HiddenLayer(n_neurons=self.n_neurons, density=self.density, ufunc=self.ufunc,
pairwise_metric=self.pairwise_metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_ = (hl, )
# several different types of neurons
else:
k = len(self.n_neurons)
# fix default values
ufuncs = self.ufunc
if isinstance(ufuncs, str) or not hasattr(ufuncs, "__iter__"):
ufuncs = [ufuncs] * k
densities = self.density
if densities is None or not hasattr(densities, "__iter__"):
densities = [densities] * k
pw_metrics = self.pairwise_metric
if pw_metrics is None or isinstance(pw_metrics, str):
pw_metrics = [pw_metrics] * k
if not k == len(ufuncs) == len(densities) == len(pw_metrics):
raise ValueError("Inconsistent parameter lengths for model with {} different types of neurons.\n"
"Set 'ufunc', 'density' and 'pairwise_distances' by lists "
"with {} elements, or leave the default values.".format(k, k))
self.hidden_layers_ = []
for n_neurons, ufunc, density, metric in zip(self.n_neurons, ufuncs, densities, pw_metrics):
hl = HiddenLayer(n_neurons=n_neurons, density=density, ufunc=ufunc,
pairwise_metric=metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_.append(hl)
def _reset(self):
[delattr(self, attr) for attr in ('n_features_', 'solver_', 'hidden_layers_', 'is_fitted_', 'label_binarizer_') if hasattr(self, attr)]
@property
def n_neurons_(self):
if not hasattr(self, 'hidden_layers_'):
return None
neurons_count = sum([hl.n_neurons_ for hl in self.hidden_layers_])
if self.include_original_features:
neurons_count += self.n_features_
return neurons_count
@property
def coef_(self):
return self.solver_.coef_
@property
def intercept_(self):
return self.solver_.intercept_
def partial_fit(self, X, y=None, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
|method_partial_fit|
.. |method_partial_fit| replace:: Output weight computation can be temporary turned off
for faster processing. This will mark model as not fit. Enable `compute_output_weights`
in the final call to `partial_fit`.
.. |param_forget| replace:: Performs a negative update, effectively removing the information
given by training samples from the model. Output weights need to be re-computed after forgetting
data. Forgetting data that have not been learned before leads to unpredictable results.
.. |param_compute_output_weights| replace:: Whether to compute new output weights
(coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
Example:
>>> model.partial_fit(X_1, y_1)
... model.partial_fit(X_2, y_2)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 1:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 2:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3, compute_output_weights=False)
... model.partial_fit(X=None, y=None) # doctest: +SKIP
"""
# compute output weights only
if X is None and y is None and compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
return self
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = ("A column-vector y was passed when a 1d array was expected. "
"Please change the shape of y to (n_samples, ), for example using ravel().")
warnings.warn(msg, DataConversionWarning)
n_samples, n_features = X.shape
if hasattr(self, 'n_features_') and self.n_features_ != n_features:
raise ValueError('Shape of input is different from what was seen in `fit`')
# set batch size, default is bsize=2000 or all-at-once with less than 10_000 samples
self.bsize_ = self.batch_size
if self.bsize_ is None:
self.bsize_ = n_samples if n_samples < 10 * 1000 else 2000
# init model if not fit yet
if not hasattr(self, 'hidden_layers_'):
self.n_features_ = n_features
self.solver_ = BatchCholeskySolver(alpha=self.alpha)
self._init_hidden_layers(X)
# special case of one-shot processing
if self.bsize_ >= n_samples:
H = [hl.transform(X) for hl in self.hidden_layers_]
H = np.hstack(H if not self.include_original_features else [_dense(X)] + H)
self.solver_.partial_fit(H, y, forget=forget, compute_output_weights=False)
else: # batch processing
for b_start in range(0, n_samples, self.bsize_):
b_end = min(b_start + self.bsize_, n_samples)
b_X = X[b_start:b_end]
b_y = y[b_start:b_end]
b_H = [hl.transform(b_X) for hl in self.hidden_layers_]
b_H = np.hstack(b_H if not self.include_original_features else [_dense(b_X)] + b_H)
self.solver_.partial_fit(b_H, b_y, forget=forget, compute_output_weights=False)
# output weights if needed
if compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
# mark as needing a solution
elif hasattr(self, 'is_fitted_'):
del self.is_fitted_
return self
def fit(self, X, y=None):
"""Reset model and fit on the given data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
Target values used as real numbers.
Returns
-------
self : object
Returns self.
"""
#todo: add X as bunch of files support
self._reset()
self.partial_fit(X, y)
return self
def predict(self, X):
"""Predict real valued outputs for new inputs X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Predicted outputs for inputs X.
.. attention::
:mod:`predict` always returns a dense matrix of predicted outputs -- unlike
in :meth:`fit`, this may cause memory issues at high number of outputs
and very high number of samples. Feed data by smaller batches in such case.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, "is_fitted_")
H = [hl.transform(X) for hl in self.hidden_layers_]
if self.include_original_features:
H = [_dense(X)] + H
H = np.hstack(H)
return self.solver_.predict(H)
class ELMRegressor(_BaseELM, RegressorMixin):
"""Extreme Learning Machine for regression problems.
This model solves a regression problem, that is a problem of predicting continuous outputs.
It supports multi-variate regression (when ``y`` is a 2d array of shape [n_samples, n_targets].)
ELM uses ``L2`` regularization, and optionally includes the original data features to
capture linear dependencies in the data natively.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Larger values specify stronger effect.
Regularization improves model stability and reduces over-fitting at the cost of some learning
capacity. The same value is used for all targets in multi-variate regression.
The optimal regularization strength is suggested to select from a large range of logarithmically
distributed values, e.g. :math:`[10^{-5}, 10^{-4}, 10^{-3}, ..., 10^4, 10^5]`. A small default
regularization value of :math:`10^{-7}` should always be present to counter numerical instabilities
in the solution; it does not affect overall model performance.
.. attention::
The model may automatically increase the regularization value if the solution
becomes unfeasible otherwise. The actual used value contains in ``alpha_`` attribute.
batch_size : int, optional
Actual computations will proceed in batches of this size, except the last batch that may be smaller.
Default behavior is to process all data at once with <10,000 samples, otherwise use batches
of size 2000.
include_original_features : boolean, default=False
Adds extra hidden layer neurons that simpy copy the input data features, adding a linear part
to the final model solution that can directly capture linear relations between data and
outputs. Effectively increases `n_neurons` by `n_inputs` leading to a larger model.
Including original features is generally a good thing if the number of data features is low.
n_neurons : int or [int], optional
Number of hidden layer neurons in ELM model, controls model size and learning capacity.
Generally number of neurons should be less than the number of training data samples, as
otherwise the model will learn the training set perfectly resulting in overfitting.
Several different kinds of neurons can be used in the same model by specifying a list of
neuron counts. ELM will create a separate neuron type for each element in the list.
In that case, the following attributes ``ufunc``, ``density`` and ``pairwise_metric``
should be lists of the same length; default values will be automatically expanded into a list.
.. note::
Models with <1,000 neurons are very fast to compute, while GPU acceleration is efficient
starting from 1,000-2,000 neurons. A standard computer should handle up to 10,000 neurons.
Very large models will not fit in memory but can still be trained by an out-of-core solver.
ufunc : {'tanh', 'sigm', 'relu', 'lin' or callable}, or a list of those (see n_neurons)
Transformation function of hidden layer neurons. Includes the following options:
- 'tanh' for hyperbolic tangent
- 'sigm' for sigmoid
- 'relu' for rectified linear unit (clamps negative values to zero)
- 'lin' for linear neurons, transformation function does nothing
- any custom callable function like members of ``Numpu.ufunc``
density : float in range (0, 1], or a list of those (see n_neurons), optional
Specifying density replaces dense projection layer by a sparse one with the specified
density of the connections. For instance, ``density=0.1`` means each hidden neuron will
be connected to a random 10% of input features. Useful for working on very high-dimensional
data, or for large numbers of neurons.
pairwise_metric : {'euclidean', 'cityblock', 'cosine' or other}, or a list of those (see n_neurons), optional
Specifying pairwise metric replaces multiplicative hidden neurons by distance-based hidden
neurons. This ELM model is known as Radial Basis Function ELM (RBF-ELM).
.. note::
Pairwise function neurons ignore ufunc and density.
Typical metrics are `euclidean`, `cityblock` and `cosine`. For a full list of metrics check
the `webpage <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html>`_
of :mod:`sklearn.metrics.pairwise_distances`.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when generating random numbers e.g.
for hidden neuron parameters. Random state instance is passed to lower level objects and routines.
Use it for repeatable experiments.
Attributes
----------
n_neurons_ : int
Number of automatically generated neurons.
ufunc_ : function
Tranformation function of hidden neurons.
projection_ : object
Hidden layer projection function.
solver_ : object
Solver instance, read solution from there.
Examples
--------
Combining ten sigmoid and twenty RBF neurons in one model:
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... density=(None, None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
Default values in multi-neuron ELM are automatically expanded to a list
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
>>> model = ELMRegressor(n_neurons=(30, 30),
... pairwise_metric=('cityblock', 'cosine')) # doctest: +SKIP
"""
pass
class ELMClassifier(_BaseELM, ClassifierMixin):
"""ELM classifier, modified for multi-label classification support.
:param classes: Set of classes to consider in the model; can be expanded at runtime.
Samples of other classes will have their output set to zero.
:param solver: Solver to use, "default" for build-in Least Squares or "ridge" for Ridge regression
Example descr...
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self, classes=None, alpha=1e-7, batch_size=None, include_original_features=False, n_neurons=None,
ufunc="tanh", density=None, pairwise_metric=None, random_state=None):
super().__init__(alpha, batch_size, include_original_features, n_neurons, ufunc, density, pairwise_metric,
random_state)
self.classes = classes
@property
def classes_(self):
return self.label_binarizer_.classes_
def _get_tags(self):
return {"multioutput": True, "multilabel": True}
def _update_classes(self, y):
if not isinstance(self.solver_, BatchCholeskySolver):
raise ValueError("Only iterative solver supports dynamic class update")
old_classes = self.label_binarizer_.classes_
partial_classes = clone(self.label_binarizer_).fit(y).classes_
# no new classes detected
if set(partial_classes) <= set(old_classes):
return
if len(old_classes) < 3:
raise ValueError("Dynamic class update has to start with at least 3 classes to function correctly; "
"provide 3 or more 'classes=[...]' during initialization.")
# get new classes sorted by LabelBinarizer
self.label_binarizer_.fit(np.hstack((old_classes, partial_classes)))
new_classes = self.label_binarizer_.classes_
# convert existing XtY matrix to new classes
if hasattr(self.solver_, 'XtY_'):
XtY_old = self.solver_.XtY_
XtY_new = np.zeros((XtY_old.shape[0], new_classes.shape[0]))
for i, c in enumerate(old_classes):
j = np.where(new_classes == c)[0][0]
XtY_new[:, j] = XtY_old[:, i]
self.solver_.XtY_ = XtY_new
# reset the solution
if hasattr(self.solver_, 'is_fitted_'):
del self.solver_.is_fitted_
def partial_fit(self, X, y=None, forget=False, update_classes=False, compute_output_weights=True):
"""Update classifier with a new batch of data.
|method_partial_fit|
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
update_classes : boolean, default False
Include new classes from `y` into the model, assuming they were 0 in all previous samples.
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
"""
#todo: Warning on strongly non-normalized data
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
# init label binarizer if needed
if not hasattr(self, 'label_binarizer_'):
self.label_binarizer_ = LabelBinarizer()
if type_of_target(y).endswith("-multioutput"):
self.label_binarizer_ = MultiLabelBinarizer()
self.label_binarizer_.fit(self.classes if self.classes is not None else y)
if update_classes:
self._update_classes(y)
y_numeric = self.label_binarizer_.transform(y)
if len(y_numeric.shape) > 1 and y_numeric.shape[1] == 1:
y_numeric = y_numeric[:, 0]
super().partial_fit(X, y_numeric, forget=forget, compute_output_weights=compute_output_weights)
return self
def fit(self, X, y=None):
"""Fit a classifier erasing any previously trained model.
Returns
-------
self : object
Returns self.
"""
self._reset()
self.partial_fit(X, y, compute_output_weights=True)
return self
def predict(self, X):
"""Predict classes of new inputs X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Returns one most probable class for multi-class problem, or
a binary vector of all relevant classes for multi-label problem.
"""
check_is_fitted(self, "is_fitted_")
scores = super().predict(X)
return self.label_binarizer_.inverse_transform(scores)
def predict_proba(self, X):
"""Probability estimation for all classes.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
check_is_fitted(self, "is_fitted_")
prob = super().predict(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob | scikit-elm | /scikit_elm-0.21a0-py3-none-any.whl/skelm/elm.py | elm.py | import numpy as np
import warnings
from scipy.special import expit
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels, type_of_target
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.exceptions import DataConversionWarning, DataDimensionalityWarning
from .hidden_layer import HiddenLayer
from .solver_batch import BatchCholeskySolver
from .utils import _dense
warnings.simplefilter("ignore", DataDimensionalityWarning)
class _BaseELM(BaseEstimator):
def __init__(self, alpha=1e-7, batch_size=None, include_original_features=False,
n_neurons=None, ufunc="tanh", density=None, pairwise_metric=None,
random_state=None):
self.alpha = alpha
self.n_neurons = n_neurons
self.batch_size = batch_size
self.ufunc = ufunc
self.include_original_features = include_original_features
self.density = density
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _init_hidden_layers(self, X):
"""Init an empty model, creating objects for hidden layers and solver.
Also validates inputs for several hidden layers.
"""
# only one type of neurons
if not hasattr(self.n_neurons, '__iter__'):
hl = HiddenLayer(n_neurons=self.n_neurons, density=self.density, ufunc=self.ufunc,
pairwise_metric=self.pairwise_metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_ = (hl, )
# several different types of neurons
else:
k = len(self.n_neurons)
# fix default values
ufuncs = self.ufunc
if isinstance(ufuncs, str) or not hasattr(ufuncs, "__iter__"):
ufuncs = [ufuncs] * k
densities = self.density
if densities is None or not hasattr(densities, "__iter__"):
densities = [densities] * k
pw_metrics = self.pairwise_metric
if pw_metrics is None or isinstance(pw_metrics, str):
pw_metrics = [pw_metrics] * k
if not k == len(ufuncs) == len(densities) == len(pw_metrics):
raise ValueError("Inconsistent parameter lengths for model with {} different types of neurons.\n"
"Set 'ufunc', 'density' and 'pairwise_distances' by lists "
"with {} elements, or leave the default values.".format(k, k))
self.hidden_layers_ = []
for n_neurons, ufunc, density, metric in zip(self.n_neurons, ufuncs, densities, pw_metrics):
hl = HiddenLayer(n_neurons=n_neurons, density=density, ufunc=ufunc,
pairwise_metric=metric, random_state=self.random_state)
hl.fit(X)
self.hidden_layers_.append(hl)
def _reset(self):
[delattr(self, attr) for attr in ('n_features_', 'solver_', 'hidden_layers_', 'is_fitted_', 'label_binarizer_') if hasattr(self, attr)]
@property
def n_neurons_(self):
if not hasattr(self, 'hidden_layers_'):
return None
neurons_count = sum([hl.n_neurons_ for hl in self.hidden_layers_])
if self.include_original_features:
neurons_count += self.n_features_
return neurons_count
@property
def coef_(self):
return self.solver_.coef_
@property
def intercept_(self):
return self.solver_.intercept_
def partial_fit(self, X, y=None, forget=False, compute_output_weights=True):
"""Update model with a new batch of data.
|method_partial_fit|
.. |method_partial_fit| replace:: Output weight computation can be temporary turned off
for faster processing. This will mark model as not fit. Enable `compute_output_weights`
in the final call to `partial_fit`.
.. |param_forget| replace:: Performs a negative update, effectively removing the information
given by training samples from the model. Output weights need to be re-computed after forgetting
data. Forgetting data that have not been learned before leads to unpredictable results.
.. |param_compute_output_weights| replace:: Whether to compute new output weights
(coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
Example:
>>> model.partial_fit(X_1, y_1)
... model.partial_fit(X_2, y_2)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 1:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3) # doctest: +SKIP
Faster, option 2:
>>> model.partial_fit(X_1, y_1, compute_output_weights=False)
... model.partial_fit(X_2, y_2, compute_output_weights=False)
... model.partial_fit(X_3, y_3, compute_output_weights=False)
... model.partial_fit(X=None, y=None) # doctest: +SKIP
"""
# compute output weights only
if X is None and y is None and compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
return self
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = ("A column-vector y was passed when a 1d array was expected. "
"Please change the shape of y to (n_samples, ), for example using ravel().")
warnings.warn(msg, DataConversionWarning)
n_samples, n_features = X.shape
if hasattr(self, 'n_features_') and self.n_features_ != n_features:
raise ValueError('Shape of input is different from what was seen in `fit`')
# set batch size, default is bsize=2000 or all-at-once with less than 10_000 samples
self.bsize_ = self.batch_size
if self.bsize_ is None:
self.bsize_ = n_samples if n_samples < 10 * 1000 else 2000
# init model if not fit yet
if not hasattr(self, 'hidden_layers_'):
self.n_features_ = n_features
self.solver_ = BatchCholeskySolver(alpha=self.alpha)
self._init_hidden_layers(X)
# special case of one-shot processing
if self.bsize_ >= n_samples:
H = [hl.transform(X) for hl in self.hidden_layers_]
H = np.hstack(H if not self.include_original_features else [_dense(X)] + H)
self.solver_.partial_fit(H, y, forget=forget, compute_output_weights=False)
else: # batch processing
for b_start in range(0, n_samples, self.bsize_):
b_end = min(b_start + self.bsize_, n_samples)
b_X = X[b_start:b_end]
b_y = y[b_start:b_end]
b_H = [hl.transform(b_X) for hl in self.hidden_layers_]
b_H = np.hstack(b_H if not self.include_original_features else [_dense(b_X)] + b_H)
self.solver_.partial_fit(b_H, b_y, forget=forget, compute_output_weights=False)
# output weights if needed
if compute_output_weights:
self.solver_.partial_fit(None, None, compute_output_weights=True)
self.is_fitted_ = True
# mark as needing a solution
elif hasattr(self, 'is_fitted_'):
del self.is_fitted_
return self
def fit(self, X, y=None):
"""Reset model and fit on the given data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
Target values used as real numbers.
Returns
-------
self : object
Returns self.
"""
#todo: add X as bunch of files support
self._reset()
self.partial_fit(X, y)
return self
def predict(self, X):
"""Predict real valued outputs for new inputs X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Predicted outputs for inputs X.
.. attention::
:mod:`predict` always returns a dense matrix of predicted outputs -- unlike
in :meth:`fit`, this may cause memory issues at high number of outputs
and very high number of samples. Feed data by smaller batches in such case.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, "is_fitted_")
H = [hl.transform(X) for hl in self.hidden_layers_]
if self.include_original_features:
H = [_dense(X)] + H
H = np.hstack(H)
return self.solver_.predict(H)
class ELMRegressor(_BaseELM, RegressorMixin):
"""Extreme Learning Machine for regression problems.
This model solves a regression problem, that is a problem of predicting continuous outputs.
It supports multi-variate regression (when ``y`` is a 2d array of shape [n_samples, n_targets].)
ELM uses ``L2`` regularization, and optionally includes the original data features to
capture linear dependencies in the data natively.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Larger values specify stronger effect.
Regularization improves model stability and reduces over-fitting at the cost of some learning
capacity. The same value is used for all targets in multi-variate regression.
The optimal regularization strength is suggested to select from a large range of logarithmically
distributed values, e.g. :math:`[10^{-5}, 10^{-4}, 10^{-3}, ..., 10^4, 10^5]`. A small default
regularization value of :math:`10^{-7}` should always be present to counter numerical instabilities
in the solution; it does not affect overall model performance.
.. attention::
The model may automatically increase the regularization value if the solution
becomes unfeasible otherwise. The actual used value contains in ``alpha_`` attribute.
batch_size : int, optional
Actual computations will proceed in batches of this size, except the last batch that may be smaller.
Default behavior is to process all data at once with <10,000 samples, otherwise use batches
of size 2000.
include_original_features : boolean, default=False
Adds extra hidden layer neurons that simpy copy the input data features, adding a linear part
to the final model solution that can directly capture linear relations between data and
outputs. Effectively increases `n_neurons` by `n_inputs` leading to a larger model.
Including original features is generally a good thing if the number of data features is low.
n_neurons : int or [int], optional
Number of hidden layer neurons in ELM model, controls model size and learning capacity.
Generally number of neurons should be less than the number of training data samples, as
otherwise the model will learn the training set perfectly resulting in overfitting.
Several different kinds of neurons can be used in the same model by specifying a list of
neuron counts. ELM will create a separate neuron type for each element in the list.
In that case, the following attributes ``ufunc``, ``density`` and ``pairwise_metric``
should be lists of the same length; default values will be automatically expanded into a list.
.. note::
Models with <1,000 neurons are very fast to compute, while GPU acceleration is efficient
starting from 1,000-2,000 neurons. A standard computer should handle up to 10,000 neurons.
Very large models will not fit in memory but can still be trained by an out-of-core solver.
ufunc : {'tanh', 'sigm', 'relu', 'lin' or callable}, or a list of those (see n_neurons)
Transformation function of hidden layer neurons. Includes the following options:
- 'tanh' for hyperbolic tangent
- 'sigm' for sigmoid
- 'relu' for rectified linear unit (clamps negative values to zero)
- 'lin' for linear neurons, transformation function does nothing
- any custom callable function like members of ``Numpu.ufunc``
density : float in range (0, 1], or a list of those (see n_neurons), optional
Specifying density replaces dense projection layer by a sparse one with the specified
density of the connections. For instance, ``density=0.1`` means each hidden neuron will
be connected to a random 10% of input features. Useful for working on very high-dimensional
data, or for large numbers of neurons.
pairwise_metric : {'euclidean', 'cityblock', 'cosine' or other}, or a list of those (see n_neurons), optional
Specifying pairwise metric replaces multiplicative hidden neurons by distance-based hidden
neurons. This ELM model is known as Radial Basis Function ELM (RBF-ELM).
.. note::
Pairwise function neurons ignore ufunc and density.
Typical metrics are `euclidean`, `cityblock` and `cosine`. For a full list of metrics check
the `webpage <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html>`_
of :mod:`sklearn.metrics.pairwise_distances`.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when generating random numbers e.g.
for hidden neuron parameters. Random state instance is passed to lower level objects and routines.
Use it for repeatable experiments.
Attributes
----------
n_neurons_ : int
Number of automatically generated neurons.
ufunc_ : function
Tranformation function of hidden neurons.
projection_ : object
Hidden layer projection function.
solver_ : object
Solver instance, read solution from there.
Examples
--------
Combining ten sigmoid and twenty RBF neurons in one model:
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... density=(None, None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
Default values in multi-neuron ELM are automatically expanded to a list
>>> model = ELMRegressor(n_neurons=(10, 20),
... ufunc=('sigm', None),
... pairwise_metric=(None, 'euclidean')) # doctest: +SKIP
>>> model = ELMRegressor(n_neurons=(30, 30),
... pairwise_metric=('cityblock', 'cosine')) # doctest: +SKIP
"""
pass
class ELMClassifier(_BaseELM, ClassifierMixin):
"""ELM classifier, modified for multi-label classification support.
:param classes: Set of classes to consider in the model; can be expanded at runtime.
Samples of other classes will have their output set to zero.
:param solver: Solver to use, "default" for build-in Least Squares or "ridge" for Ridge regression
Example descr...
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self, classes=None, alpha=1e-7, batch_size=None, include_original_features=False, n_neurons=None,
ufunc="tanh", density=None, pairwise_metric=None, random_state=None):
super().__init__(alpha, batch_size, include_original_features, n_neurons, ufunc, density, pairwise_metric,
random_state)
self.classes = classes
@property
def classes_(self):
return self.label_binarizer_.classes_
def _get_tags(self):
return {"multioutput": True, "multilabel": True}
def _update_classes(self, y):
if not isinstance(self.solver_, BatchCholeskySolver):
raise ValueError("Only iterative solver supports dynamic class update")
old_classes = self.label_binarizer_.classes_
partial_classes = clone(self.label_binarizer_).fit(y).classes_
# no new classes detected
if set(partial_classes) <= set(old_classes):
return
if len(old_classes) < 3:
raise ValueError("Dynamic class update has to start with at least 3 classes to function correctly; "
"provide 3 or more 'classes=[...]' during initialization.")
# get new classes sorted by LabelBinarizer
self.label_binarizer_.fit(np.hstack((old_classes, partial_classes)))
new_classes = self.label_binarizer_.classes_
# convert existing XtY matrix to new classes
if hasattr(self.solver_, 'XtY_'):
XtY_old = self.solver_.XtY_
XtY_new = np.zeros((XtY_old.shape[0], new_classes.shape[0]))
for i, c in enumerate(old_classes):
j = np.where(new_classes == c)[0][0]
XtY_new[:, j] = XtY_old[:, i]
self.solver_.XtY_ = XtY_new
# reset the solution
if hasattr(self.solver_, 'is_fitted_'):
del self.solver_.is_fitted_
def partial_fit(self, X, y=None, forget=False, update_classes=False, compute_output_weights=True):
"""Update classifier with a new batch of data.
|method_partial_fit|
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
forget : boolean, default False
|param_forget|
update_classes : boolean, default False
Include new classes from `y` into the model, assuming they were 0 in all previous samples.
compute_output_weights : boolean, optional, default True
|param_compute_output_weights|
"""
#todo: Warning on strongly non-normalized data
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True)
# init label binarizer if needed
if not hasattr(self, 'label_binarizer_'):
self.label_binarizer_ = LabelBinarizer()
if type_of_target(y).endswith("-multioutput"):
self.label_binarizer_ = MultiLabelBinarizer()
self.label_binarizer_.fit(self.classes if self.classes is not None else y)
if update_classes:
self._update_classes(y)
y_numeric = self.label_binarizer_.transform(y)
if len(y_numeric.shape) > 1 and y_numeric.shape[1] == 1:
y_numeric = y_numeric[:, 0]
super().partial_fit(X, y_numeric, forget=forget, compute_output_weights=compute_output_weights)
return self
def fit(self, X, y=None):
"""Fit a classifier erasing any previously trained model.
Returns
-------
self : object
Returns self.
"""
self._reset()
self.partial_fit(X, y, compute_output_weights=True)
return self
def predict(self, X):
"""Predict classes of new inputs X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Returns one most probable class for multi-class problem, or
a binary vector of all relevant classes for multi-label problem.
"""
check_is_fitted(self, "is_fitted_")
scores = super().predict(X)
return self.label_binarizer_.inverse_transform(scores)
def predict_proba(self, X):
"""Probability estimation for all classes.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
check_is_fitted(self, "is_fitted_")
prob = super().predict(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob | 0.877844 | 0.352425 |
import scipy as sp
from enum import Enum
from sklearn.metrics import pairwise_distances
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
class HiddenLayerType(Enum):
RANDOM = 1 # Gaussian random projection
SPARSE = 2 # Sparse Random Projection
PAIRWISE = 3 # Pairwise kernel with a number of centroids
def dummy(x):
return x
def flatten(items):
"""Yield items from any nested iterable."""
for x in items:
# don't break strings into characters
if hasattr(x, '__iter__') and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def _is_list_of_strings(obj):
return obj is not None and all(isinstance(elem, str) for elem in obj)
def _dense(X):
if sp.sparse.issparse(X):
return X.todense()
else:
return X
class PairwiseRandomProjection(BaseEstimator, TransformerMixin):
def __init__(self, n_components=100, pairwise_metric='l2', n_jobs=None, random_state=None):
"""Pairwise distances projection with random centroids.
Parameters
----------
n_components : int
Number of components (centroids) in the projection. Creates the same number of output features.
pairwise_metric : str
A valid pairwise distance metric, see pairwise-distances_.
.. _pairwise-distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html#sklearn.metrics.pairwise_distances
n_jobs : int or None, optional, default=None
Number of jobs to use in distance computations, or `None` for no parallelism.
Passed to _pairwise-distances function.
random_state
Used for random generation of centroids.
"""
self.n_components = n_components
self.pairwise_metric = pairwise_metric
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, X, y=None):
"""Generate artificial centroids.
Centroids are sampled from a normal distribution. They work best if the data is normalized.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Input data
"""
X = check_array(X, accept_sparse=True)
self.random_state_ = check_random_state(self.random_state)
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s" % self.n_components)
self.components_ = self.random_state_.randn(self.n_components, X.shape[1])
self.n_jobs_ = 1 if self.n_jobs is None else self.n_jobs
return self
def transform(self, X):
"""Compute distance matrix between input data and the centroids.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Input data samples.
Returns
-------
X_dist : numpy array
Distance matrix between input data samples and centroids.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection: X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
try:
X_dist = pairwise_distances(X, self.components_, n_jobs=self.n_jobs_, metric=self.pairwise_metric)
except TypeError:
# scipy distances that don't support sparse matrices
X_dist = pairwise_distances(_dense(X), _dense(self.components_), n_jobs=self.n_jobs_, metric=self.pairwise_metric)
return X_dist | scikit-elm | /scikit_elm-0.21a0-py3-none-any.whl/skelm/utils.py | utils.py | import scipy as sp
from enum import Enum
from sklearn.metrics import pairwise_distances
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
class HiddenLayerType(Enum):
RANDOM = 1 # Gaussian random projection
SPARSE = 2 # Sparse Random Projection
PAIRWISE = 3 # Pairwise kernel with a number of centroids
def dummy(x):
return x
def flatten(items):
"""Yield items from any nested iterable."""
for x in items:
# don't break strings into characters
if hasattr(x, '__iter__') and not isinstance(x, (str, bytes)):
yield from flatten(x)
else:
yield x
def _is_list_of_strings(obj):
return obj is not None and all(isinstance(elem, str) for elem in obj)
def _dense(X):
if sp.sparse.issparse(X):
return X.todense()
else:
return X
class PairwiseRandomProjection(BaseEstimator, TransformerMixin):
def __init__(self, n_components=100, pairwise_metric='l2', n_jobs=None, random_state=None):
"""Pairwise distances projection with random centroids.
Parameters
----------
n_components : int
Number of components (centroids) in the projection. Creates the same number of output features.
pairwise_metric : str
A valid pairwise distance metric, see pairwise-distances_.
.. _pairwise-distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html#sklearn.metrics.pairwise_distances
n_jobs : int or None, optional, default=None
Number of jobs to use in distance computations, or `None` for no parallelism.
Passed to _pairwise-distances function.
random_state
Used for random generation of centroids.
"""
self.n_components = n_components
self.pairwise_metric = pairwise_metric
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, X, y=None):
"""Generate artificial centroids.
Centroids are sampled from a normal distribution. They work best if the data is normalized.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Input data
"""
X = check_array(X, accept_sparse=True)
self.random_state_ = check_random_state(self.random_state)
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s" % self.n_components)
self.components_ = self.random_state_.randn(self.n_components, X.shape[1])
self.n_jobs_ = 1 if self.n_jobs is None else self.n_jobs
return self
def transform(self, X):
"""Compute distance matrix between input data and the centroids.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Input data samples.
Returns
-------
X_dist : numpy array
Distance matrix between input data samples and centroids.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'components_')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection: X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
try:
X_dist = pairwise_distances(X, self.components_, n_jobs=self.n_jobs_, metric=self.pairwise_metric)
except TypeError:
# scipy distances that don't support sparse matrices
X_dist = pairwise_distances(_dense(X), _dense(self.components_), n_jobs=self.n_jobs_, metric=self.pairwise_metric)
return X_dist | 0.932522 | 0.450359 |
import numpy as np
import scipy as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from .utils import PairwiseRandomProjection, HiddenLayerType, dummy
# suppress annoying warning of random projection into a higher-dimensional space
import warnings
warnings.filterwarnings("ignore", message="DataDimensionalityWarning")
def auto_neuron_count(n, d):
# computes default number of neurons for `n` data samples with `d` features
return min(int(250 * np.log(1 + d/10) - 15), n//3 + 1)
ufuncs = {"tanh": np.tanh,
"sigm": sp.special.expit,
"relu": lambda x: np.maximum(x, 0),
"lin": dummy,
None: dummy}
class HiddenLayer(BaseEstimator, TransformerMixin):
def __init__(self, n_neurons=None, density=None, ufunc="tanh", pairwise_metric=None, random_state=None):
self.n_neurons = n_neurons
self.density = density
self.ufunc = ufunc
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _fit_random_projection(self, X):
self.hidden_layer_ = HiddenLayerType.RANDOM
self.projection_ = GaussianRandomProjection(n_components=self.n_neurons_, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_sparse_projection(self, X):
self.hidden_layer_ = HiddenLayerType.SPARSE
self.projection_ = SparseRandomProjection(n_components=self.n_neurons_, density=self.density,
dense_output=True, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_pairwise_projection(self, X):
self.hidden_layer_ = HiddenLayerType.PAIRWISE
self.projection_ = PairwiseRandomProjection(n_components=self.n_neurons_,
pairwise_metric=self.pairwise_metric,
random_state=self.random_state_)
self.projection_.fit(X)
def fit(self, X, y=None):
# basic checks
X = check_array(X, accept_sparse=True)
# handle random state
self.random_state_ = check_random_state(self.random_state)
# get number of neurons
n, d = X.shape
self.n_neurons_ = int(self.n_neurons) if self.n_neurons is not None else auto_neuron_count(n, d)
# fit a projection
if self.pairwise_metric is not None:
self._fit_pairwise_projection(X)
elif self.density is not None:
self._fit_sparse_projection(X)
else:
self._fit_random_projection(X)
if self.ufunc in ufuncs.keys():
self.ufunc_ = ufuncs[self.ufunc]
elif callable(self.ufunc):
self.ufunc_ = self.ufunc
else:
raise ValueError("Ufunc transformation function not understood: ", self.ufunc)
self.is_fitted_ = True
return self
def transform(self, X):
check_is_fitted(self, "is_fitted_")
X = check_array(X, accept_sparse=True)
n_features = self.projection_.components_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features))
if self.hidden_layer_ == HiddenLayerType.PAIRWISE:
return self.projection_.transform(X) # pairwise projection ignores ufunc
return self.ufunc_(self.projection_.transform(X)) | scikit-elm | /scikit_elm-0.21a0-py3-none-any.whl/skelm/hidden_layer.py | hidden_layer.py | import numpy as np
import scipy as sp
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from .utils import PairwiseRandomProjection, HiddenLayerType, dummy
# suppress annoying warning of random projection into a higher-dimensional space
import warnings
warnings.filterwarnings("ignore", message="DataDimensionalityWarning")
def auto_neuron_count(n, d):
# computes default number of neurons for `n` data samples with `d` features
return min(int(250 * np.log(1 + d/10) - 15), n//3 + 1)
ufuncs = {"tanh": np.tanh,
"sigm": sp.special.expit,
"relu": lambda x: np.maximum(x, 0),
"lin": dummy,
None: dummy}
class HiddenLayer(BaseEstimator, TransformerMixin):
def __init__(self, n_neurons=None, density=None, ufunc="tanh", pairwise_metric=None, random_state=None):
self.n_neurons = n_neurons
self.density = density
self.ufunc = ufunc
self.pairwise_metric = pairwise_metric
self.random_state = random_state
def _fit_random_projection(self, X):
self.hidden_layer_ = HiddenLayerType.RANDOM
self.projection_ = GaussianRandomProjection(n_components=self.n_neurons_, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_sparse_projection(self, X):
self.hidden_layer_ = HiddenLayerType.SPARSE
self.projection_ = SparseRandomProjection(n_components=self.n_neurons_, density=self.density,
dense_output=True, random_state=self.random_state_)
self.projection_.fit(X)
def _fit_pairwise_projection(self, X):
self.hidden_layer_ = HiddenLayerType.PAIRWISE
self.projection_ = PairwiseRandomProjection(n_components=self.n_neurons_,
pairwise_metric=self.pairwise_metric,
random_state=self.random_state_)
self.projection_.fit(X)
def fit(self, X, y=None):
# basic checks
X = check_array(X, accept_sparse=True)
# handle random state
self.random_state_ = check_random_state(self.random_state)
# get number of neurons
n, d = X.shape
self.n_neurons_ = int(self.n_neurons) if self.n_neurons is not None else auto_neuron_count(n, d)
# fit a projection
if self.pairwise_metric is not None:
self._fit_pairwise_projection(X)
elif self.density is not None:
self._fit_sparse_projection(X)
else:
self._fit_random_projection(X)
if self.ufunc in ufuncs.keys():
self.ufunc_ = ufuncs[self.ufunc]
elif callable(self.ufunc):
self.ufunc_ = self.ufunc
else:
raise ValueError("Ufunc transformation function not understood: ", self.ufunc)
self.is_fitted_ = True
return self
def transform(self, X):
check_is_fitted(self, "is_fitted_")
X = check_array(X, accept_sparse=True)
n_features = self.projection_.components_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features))
if self.hidden_layer_ == HiddenLayerType.PAIRWISE:
return self.projection_.transform(X) # pairwise projection ignores ufunc
return self.ufunc_(self.projection_.transform(X)) | 0.875282 | 0.437343 |
import numpy as np
import scipy as sp
import warnings
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
from dask import distributed
from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask.array as da
class DaskCholeskySolver(BaseEstimator, RegressorMixin):
"""Out-of-core linear system solver with Dask back-end.
Parameters
----------
alpha : float, non-negative
L2 regularization parameter, larger value means stronger effect. The value may be
increased if the system fails to converge; actual used value stored in `alpha_` parameter.
batch_size : int
Batch size for **samples and features**. Computations proceed on square blocks of data.
For optimal performance, use a number of features that is equal or a bit less than multiple
of a batch size; e.g. 8912 features with 3000 batch size.
swap_dir : str
Directory for temporary storage of Dask data that does not fit in memory. A large and fast
storage is advised, like a local SSD.
Attributes
----------
cluster_ : object
An instance of `dask.distributed.LocalCluster`.
client_ : object
Dask client for running computations.
"""
def __init__(self, alpha=1e-7, batch_size=2000, swap_dir=None):
self.alpha = alpha
self.batch_size = batch_size
self.swap_dir = swap_dir
def _init_dask(self):
self.cluster_ = LocalCluster( n_workers=2, local_dir=self.swap_dir)
self.client_ = Client(self.cluster_)
print("Running on:")
print(self.client_)
def fit(self, X, y):
self.W_ = da.random.normal
return self
def predict(self, X):
return None
class BBvdsnjvlsdnjhbgfndjvksdjkvlndsf(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='pos', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | scikit-elm | /scikit_elm-0.21a0-py3-none-any.whl/skelm/solver_dask.py | solver_dask.py | import numpy as np
import scipy as sp
import warnings
from sklearn.exceptions import DataConversionWarning
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils import check_X_y, check_array
from dask import distributed
from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask.array as da
class DaskCholeskySolver(BaseEstimator, RegressorMixin):
"""Out-of-core linear system solver with Dask back-end.
Parameters
----------
alpha : float, non-negative
L2 regularization parameter, larger value means stronger effect. The value may be
increased if the system fails to converge; actual used value stored in `alpha_` parameter.
batch_size : int
Batch size for **samples and features**. Computations proceed on square blocks of data.
For optimal performance, use a number of features that is equal or a bit less than multiple
of a batch size; e.g. 8912 features with 3000 batch size.
swap_dir : str
Directory for temporary storage of Dask data that does not fit in memory. A large and fast
storage is advised, like a local SSD.
Attributes
----------
cluster_ : object
An instance of `dask.distributed.LocalCluster`.
client_ : object
Dask client for running computations.
"""
def __init__(self, alpha=1e-7, batch_size=2000, swap_dir=None):
self.alpha = alpha
self.batch_size = batch_size
self.swap_dir = swap_dir
def _init_dask(self):
self.cluster_ = LocalCluster( n_workers=2, local_dir=self.swap_dir)
self.client_ = Client(self.cluster_)
print("Running on:")
print(self.client_)
def fit(self, X, y):
self.W_ = da.random.normal
return self
def predict(self, X):
return None
class BBvdsnjvlsdnjhbgfndjvksdjkvlndsf(BaseEstimator, RegressorMixin):
def __init__(self, alpha=1e-7):
self.alpha = alpha
def _init_XY(self, X, y):
"""Initialize covariance matrices, including a separate bias term.
"""
d_in = X.shape[1]
self._XtX = np.eye(d_in + 1) * self.alpha
self._XtX[0, 0] = 0
if len(y.shape) == 1:
self._XtY = np.zeros((d_in + 1,))
else:
self._XtY = np.zeros((d_in + 1, y.shape[1]))
@property
def XtY_(self):
return self._XtY
@property
def XtX_(self):
return self._XtX
@XtY_.setter
def XtY_(self, value):
self._XtY = value
@XtX_.setter
def XtX_(self, value):
self._XtX = value
def _solve(self):
"""Second stage of solution (X'X)B = X'Y using Cholesky decomposition.
Sets `is_fitted_` to True.
"""
B = sp.linalg.solve(self._XtX, self._XtY, assume_a='pos', overwrite_a=False, overwrite_b=False)
self.coef_ = B[1:]
self.intercept_ = B[0]
self.is_fitted_ = True
def _reset(self):
"""Erase solution and data matrices.
"""
[delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
def fit(self, X, y):
"""Solves an L2-regularized linear system like Ridge regression, overwrites any previous solutions.
"""
self._reset() # remove old solution
self.partial_fit(X, y, compute_output_weights=True)
return self
def partial_fit(self, X, y, compute_output_weights=True):
"""Update model with a new batch of data.
Output weight computation can be temporary turned off for faster processing. This will mark model as
not fit. Enable `compute_output_weights` in the final call to `partial_fit`.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
Training input samples
y : array-like, shape=[n_samples, n_targets]
Training targets
compute_output_weights : boolean, optional, default True
Whether to compute new output weights (coef_, intercept_). Disable this in intermediate `partial_fit`
steps to run computations faster, then enable in the last call to compute the new solution.
.. Note::
Solution can be updated without extra data by setting `X=None` and `y=None`.
"""
if self.alpha < 0:
raise ValueError("Regularization parameter alpha must be non-negative.")
# solution only
if X is None and y is None and compute_output_weights:
self._solve()
return self
# validate parameters
X, y = check_X_y(X, y, accept_sparse=True, multi_output=True, y_numeric=True, ensure_2d=True)
if len(y.shape) > 1 and y.shape[1] == 1:
msg = "A column-vector y was passed when a 1d array was expected.\
Please change the shape of y to (n_samples, ), for example using ravel()."
warnings.warn(msg, DataConversionWarning)
# init temporary data storage
if not hasattr(self, '_XtX'):
self._init_XY(X, y)
else:
if X.shape[1] + 1 != self._XtX.shape[0]:
n_new, n_old = X.shape[1], self._XtX.shape[0] - 1
raise ValueError("Number of features %d does not match previous data %d." % (n_new, n_old))
# compute temporary data
X_sum = safe_sparse_dot(X.T, np.ones((X.shape[0],)))
y_sum = safe_sparse_dot(y.T, np.ones((y.shape[0],)))
self._XtX[0, 0] += X.shape[0]
self._XtX[1:, 0] += X_sum
self._XtX[0, 1:] += X_sum
self._XtX[1:, 1:] += X.T @ X
self._XtY[0] += y_sum
self._XtY[1:] += X.T @ y
# solve
if not compute_output_weights:
# mark as not fitted
[delattr(self, attr) for attr in ('coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]
else:
self._solve()
return self
def predict(self, X):
check_is_fitted(self, 'is_fitted_')
X = check_array(X, accept_sparse=True)
return safe_sparse_dot(X, self.coef_, dense_output=True) + self.intercept_ | 0.84075 | 0.609292 |
# scikit-embeddings
Utilites for training word, document and sentence embeddings in scikit-learn pipelines.
## Features
- Train Word, Paragraph or Sentence embeddings in scikit-learn compatible pipelines.
- Stream texts easily from disk and chunk them so you can use large datasets for training embeddings.
- spaCy tokenizers with lemmatization, stop word removal and augmentation with POS-tags/Morphological information etc. for highest quality embeddings for literary analysis.
- Fast and performant trainable tokenizer components from `tokenizers`.
- Easy to integrate components and pipelines in your scikit-learn workflows and machine learning pipelines.
- Easy serialization and integration with HugginFace Hub for quickly publishing your embedding pipelines.
### What scikit-embeddings is not for:
- Using pretrained embeddings in scikit-learn pipelines (for these purposes I recommend [embetter](https://github.com/koaning/embetter/tree/main))
- Training transformer models and deep neural language models (if you want to do this, do it with [transformers](https://huggingface.co/docs/transformers/index))
## Examples
### Streams
scikit-embeddings comes with a handful of utilities for streaming data from disk or other sources,
chunking and filtering. Here's an example of how you would go about obtaining chunks of text from jsonl files with a "content field".
```python
from skembedding.streams import Stream
# let's say you have a list of file paths
files: list[str] = [...]
# Stream text chunks from jsonl files with a 'content' field.
text_chunks = (
Stream(files)
.read_files(lines=True)
.json()
.grab("content")
.chunk(10_000)
)
```
### Word Embeddings
You can train classic vanilla word embeddings by building a pipeline that contains a `WordLevel` tokenizer and an embedding model:
```python
from skembedding.tokenizers import WordLevelTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordLevelTokenizer(),
Word2VecEmbedding(n_components=100, algorithm="cbow")
)
embedding_pipe.fit(texts)
```
### Fasttext-like
You can train an embedding pipeline that uses subword information by using a tokenizer that does that.
You may want to use `Unigram`, `BPE` or `WordPiece` for these purposes.
Fasttext also uses skip-gram by default so let's change to that.
```python
from skembedding.tokenizers import UnigramTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
UnigramTokenizer(),
Word2VecEmbedding(n_components=250, algorithm="sg")
)
embedding_pipe.fit(texts)
```
### Sense2Vec
We provide a spaCy tokenizer that can lemmatize tokens and append morphological information so you can get fine-grained
semantic information even on relatively small corpora. I recommend using this for literary analysis.
```python
from skembeddings.models import Word2VecEmbedding
from skembeddings.tokenizers import SpacyTokenizer
from skembeddings.pipeline import EmbeddingPipeline
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Build a pipeline
embedding_pipeline = EmbeddingPipeline(
tokenizer,
Word2VecEmbedding(50, algorithm="cbow")
)
# Fitting pipeline on corpus
embedding_pipeline.fit(corpus)
```
### Paragraph Embeddings
You can train Doc2Vec paragpraph embeddings with the chosen choice of tokenization.
```python
from skembedding.tokenizers import WordPieceTokenizer
from skembedding.models import ParagraphEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordPieceTokenizer(),
ParagraphEmbedding(n_components=250, algorithm="dm")
)
embedding_pipe.fit(texts)
```
### Iterative training
In the case of large datasets you can train on individual chunks with `partial_fit()`.
```python
for chunk in text_chunks:
embedding_pipe.partial_fit(chunk)
```
### Serialization
Pipelines can be safely serialized to disk:
```python
embedding_pipe.to_disk("output_folder/")
embedding_pipe = EmbeddingPipeline.from_disk("output_folder/")
```
Or published to HugginFace Hub:
```python
from huggingface_hub import login
login()
embedding_pipe.to_hub("username/name_of_pipeline")
embedding_pipe = EmbeddingPipeline.from_hub("username/name_of_pipeline")
```
### Text Classification
You can include an embedding model in your classification pipelines by adding some classification head.
```python
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
X_train, X_test, y_train, y_test = train_test_split(X, y)
cls_pipe = make_pipeline(embedding_pipe, LogisticRegression())
cls_pipe.fit(X_train, y_train)
y_pred = cls_pipe.predict(X_test)
print(classification_report(y_test, y_pred))
```
### Feature Extraction
If you intend to use the features produced by tokenizers in other text pipelines, such as topic models,
you can use `ListCountVectorizer` or `Joiner`.
Here's an example of an NMF topic model that use lemmata enriched with POS tags.
```python
from sklearn.decomposition import NMF
from sklearn.pipelines import make_pipeline
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from skembedding.tokenizers import SpacyTokenizer
from skembedding.feature_extraction import ListCountVectorizer
from skembedding.preprocessing import Joiner
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Example with ListCountVectorizer
topic_pipeline = make_pipeline(
tokenizer,
ListCountVectorizer(),
TfidfTransformer(), # tf-idf weighting (optional)
NMF(15), # 15 topics in the model
)
# Alternatively you can just join the tokens together with whitespace
topic_pipeline = make_pipeline(
tokenizer,
Joiner(),
TfidfVectorizer(),
NMF(15),
)
```
| scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/README.md | README.md | from skembedding.streams import Stream
# let's say you have a list of file paths
files: list[str] = [...]
# Stream text chunks from jsonl files with a 'content' field.
text_chunks = (
Stream(files)
.read_files(lines=True)
.json()
.grab("content")
.chunk(10_000)
)
from skembedding.tokenizers import WordLevelTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordLevelTokenizer(),
Word2VecEmbedding(n_components=100, algorithm="cbow")
)
embedding_pipe.fit(texts)
from skembedding.tokenizers import UnigramTokenizer
from skembedding.models import Word2VecEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
UnigramTokenizer(),
Word2VecEmbedding(n_components=250, algorithm="sg")
)
embedding_pipe.fit(texts)
from skembeddings.models import Word2VecEmbedding
from skembeddings.tokenizers import SpacyTokenizer
from skembeddings.pipeline import EmbeddingPipeline
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Build a pipeline
embedding_pipeline = EmbeddingPipeline(
tokenizer,
Word2VecEmbedding(50, algorithm="cbow")
)
# Fitting pipeline on corpus
embedding_pipeline.fit(corpus)
from skembedding.tokenizers import WordPieceTokenizer
from skembedding.models import ParagraphEmbedding
from skembeddings.pipeline import EmbeddingPipeline
embedding_pipe = EmbeddingPipeline(
WordPieceTokenizer(),
ParagraphEmbedding(n_components=250, algorithm="dm")
)
embedding_pipe.fit(texts)
for chunk in text_chunks:
embedding_pipe.partial_fit(chunk)
embedding_pipe.to_disk("output_folder/")
embedding_pipe = EmbeddingPipeline.from_disk("output_folder/")
from huggingface_hub import login
login()
embedding_pipe.to_hub("username/name_of_pipeline")
embedding_pipe = EmbeddingPipeline.from_hub("username/name_of_pipeline")
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
X_train, X_test, y_train, y_test = train_test_split(X, y)
cls_pipe = make_pipeline(embedding_pipe, LogisticRegression())
cls_pipe.fit(X_train, y_train)
y_pred = cls_pipe.predict(X_test)
print(classification_report(y_test, y_pred))
from sklearn.decomposition import NMF
from sklearn.pipelines import make_pipeline
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from skembedding.tokenizers import SpacyTokenizer
from skembedding.feature_extraction import ListCountVectorizer
from skembedding.preprocessing import Joiner
# Single token pattern that lets alphabetical tokens pass, but not stopwords
pattern = [[{"IS_ALPHA": True, "IS_STOP": False}]]
# Build tokenizer that lemmatizes and appends POS-tags to the lemmas
tokenizer = SpacyTokenizer(
"en_core_web_sm",
out_attrs=("LEMMA", "UPOS"),
patterns=pattern,
)
# Example with ListCountVectorizer
topic_pipeline = make_pipeline(
tokenizer,
ListCountVectorizer(),
TfidfTransformer(), # tf-idf weighting (optional)
NMF(15), # 15 topics in the model
)
# Alternatively you can just join the tokens together with whitespace
topic_pipeline = make_pipeline(
tokenizer,
Joiner(),
TfidfVectorizer(),
NMF(15),
) | 0.762954 | 0.936576 |
import tempfile
from pathlib import Path
from typing import Union
from confection import Config, registry
from huggingface_hub import HfApi, snapshot_download
from sklearn.pipeline import Pipeline
# THIS IS IMPORTANT DO NOT REMOVE
from skembeddings import models, tokenizers
from skembeddings._hub import DEFAULT_README
from skembeddings.base import Serializable
class EmbeddingPipeline(Pipeline):
def __init__(
self,
tokenizer: Serializable,
model: Serializable,
frozen: bool = False,
):
self.tokenizer = tokenizer
self.model = model
self.frozen = frozen
steps = [("tokenizer_model", tokenizer), ("embedding_model", model)]
super().__init__(steps=steps)
def freeze(self):
self.frozen = True
return self
def unfreeze(self):
self.frozen = False
return self
def fit(self, X, y=None, **kwargs):
if self.frozen:
return self
super().fit(X, y=y, **kwargs)
def partial_fit(self, X, y=None, classes=None, **kwargs):
"""
Fits the components, but allow for batches.
"""
if self.frozen:
return self
for name, step in self.steps:
if not hasattr(step, "partial_fit"):
raise ValueError(
f"Step {name} is a {step} which does"
"not have `.partial_fit` implemented."
)
for name, step in self.steps:
if hasattr(step, "predict"):
step.partial_fit(X, y, classes=classes, **kwargs)
else:
step.partial_fit(X, y)
if hasattr(step, "transform"):
X = step.transform(X)
return self
@property
def config(self) -> Config:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
return tokenizer.config.merge(embedding.config)
def to_disk(self, path: Union[str, Path]) -> None:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
path = Path(path)
path.mkdir(exist_ok=True)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
with open(embedding_path, "wb") as embedding_file:
embedding_file.write(embedding.to_bytes())
with open(tokenizer_path, "wb") as tokenizer_file:
tokenizer_file.write(tokenizer.to_bytes())
self.config.to_disk(config_path)
@classmethod
def from_disk(cls, path: Union[str, Path]) -> "EmbeddingPipeline":
path = Path(path)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
config = Config().from_disk(config_path)
resolved = registry.resolve(config)
with open(tokenizer_path, "rb") as tokenizer_file:
tokenizer = resolved["tokenizer"].from_bytes(tokenizer_file.read())
with open(embedding_path, "rb") as embedding_file:
embedding = resolved["embedding"].from_bytes(embedding_file.read())
return cls(tokenizer, embedding)
def to_hub(self, repo_id: str, add_readme: bool = True) -> None:
api = HfApi()
api.create_repo(repo_id, exist_ok=True)
with tempfile.TemporaryDirectory() as tmp_dir:
self.to_disk(tmp_dir)
if add_readme:
with open(
Path(tmp_dir).joinpath("README.md"), "w"
) as readme_f:
readme_f.write(DEFAULT_README.format(repo=repo_id))
api.upload_folder(
folder_path=tmp_dir, repo_id=repo_id, repo_type="model"
)
@classmethod
def from_hub(cls, repo_id: str) -> "EmbeddingPipeline":
in_dir = snapshot_download(repo_id=repo_id)
res = cls.from_disk(in_dir)
return res.freeze() | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/pipeline.py | pipeline.py | import tempfile
from pathlib import Path
from typing import Union
from confection import Config, registry
from huggingface_hub import HfApi, snapshot_download
from sklearn.pipeline import Pipeline
# THIS IS IMPORTANT DO NOT REMOVE
from skembeddings import models, tokenizers
from skembeddings._hub import DEFAULT_README
from skembeddings.base import Serializable
class EmbeddingPipeline(Pipeline):
def __init__(
self,
tokenizer: Serializable,
model: Serializable,
frozen: bool = False,
):
self.tokenizer = tokenizer
self.model = model
self.frozen = frozen
steps = [("tokenizer_model", tokenizer), ("embedding_model", model)]
super().__init__(steps=steps)
def freeze(self):
self.frozen = True
return self
def unfreeze(self):
self.frozen = False
return self
def fit(self, X, y=None, **kwargs):
if self.frozen:
return self
super().fit(X, y=y, **kwargs)
def partial_fit(self, X, y=None, classes=None, **kwargs):
"""
Fits the components, but allow for batches.
"""
if self.frozen:
return self
for name, step in self.steps:
if not hasattr(step, "partial_fit"):
raise ValueError(
f"Step {name} is a {step} which does"
"not have `.partial_fit` implemented."
)
for name, step in self.steps:
if hasattr(step, "predict"):
step.partial_fit(X, y, classes=classes, **kwargs)
else:
step.partial_fit(X, y)
if hasattr(step, "transform"):
X = step.transform(X)
return self
@property
def config(self) -> Config:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
return tokenizer.config.merge(embedding.config)
def to_disk(self, path: Union[str, Path]) -> None:
embedding: Serializable = self["embedding_model"] # type: ignore
tokenizer: Serializable = self["tokenizer_model"] # type: ignore
path = Path(path)
path.mkdir(exist_ok=True)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
with open(embedding_path, "wb") as embedding_file:
embedding_file.write(embedding.to_bytes())
with open(tokenizer_path, "wb") as tokenizer_file:
tokenizer_file.write(tokenizer.to_bytes())
self.config.to_disk(config_path)
@classmethod
def from_disk(cls, path: Union[str, Path]) -> "EmbeddingPipeline":
path = Path(path)
config_path = path.joinpath("config.cfg")
tokenizer_path = path.joinpath("tokenizer.bin")
embedding_path = path.joinpath("embedding.bin")
config = Config().from_disk(config_path)
resolved = registry.resolve(config)
with open(tokenizer_path, "rb") as tokenizer_file:
tokenizer = resolved["tokenizer"].from_bytes(tokenizer_file.read())
with open(embedding_path, "rb") as embedding_file:
embedding = resolved["embedding"].from_bytes(embedding_file.read())
return cls(tokenizer, embedding)
def to_hub(self, repo_id: str, add_readme: bool = True) -> None:
api = HfApi()
api.create_repo(repo_id, exist_ok=True)
with tempfile.TemporaryDirectory() as tmp_dir:
self.to_disk(tmp_dir)
if add_readme:
with open(
Path(tmp_dir).joinpath("README.md"), "w"
) as readme_f:
readme_f.write(DEFAULT_README.format(repo=repo_id))
api.upload_folder(
folder_path=tmp_dir, repo_id=repo_id, repo_type="model"
)
@classmethod
def from_hub(cls, repo_id: str) -> "EmbeddingPipeline":
in_dir = snapshot_download(repo_id=repo_id)
res = cls.from_disk(in_dir)
return res.freeze() | 0.817829 | 0.197212 |
from abc import ABC, abstractmethod
from typing import Iterable
from confection import Config, registry
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from tokenizers import Tokenizer
from tokenizers.models import BPE, Unigram, WordLevel, WordPiece
from tokenizers.normalizers import BertNormalizer, Normalizer
from tokenizers.pre_tokenizers import ByteLevel, Whitespace
from tokenizers.trainers import (
BpeTrainer,
Trainer,
UnigramTrainer,
WordLevelTrainer,
WordPieceTrainer,
)
from skembeddings.base import Serializable
class HuggingFaceTokenizerBase(
BaseEstimator, TransformerMixin, Serializable, ABC
):
def __init__(self, normalizer: Normalizer = BertNormalizer()):
self.tokenizer = None
self.trainer = None
self.normalizer = normalizer
@abstractmethod
def _init_tokenizer(self) -> Tokenizer:
pass
@abstractmethod
def _init_trainer(self) -> Trainer:
pass
def fit(self, X: Iterable[str], y=None):
self.tokenizer = self._init_tokenizer()
self.trainer = self._init_trainer()
self.tokenizer.train_from_iterator(X, self.trainer)
return self
def partial_fit(self, X: Iterable[str], y=None):
if (self.tokenizer is None) or (self.trainer is None):
self.fit(X)
else:
new_tokenizer = self._init_tokenizer()
new_tokenizer.train_from_iterator(X, self.trainer)
new_vocab = new_tokenizer.get_vocab()
self.tokenizer.add_tokens(new_vocab)
return self
def transform(self, X: Iterable[str]) -> list[list[str]]:
if self.tokenizer is None:
raise NotFittedError("Tokenizer has not been trained yet.")
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
res = []
for text in X:
encoding = self.tokenizer.encode(text)
res.append(encoding.tokens)
return res
def get_feature_names_out(self, input_features=None):
return None
def to_bytes(self) -> bytes:
if self.tokenizer is None:
raise NotFittedError(
"Tokenizer has not been fitted, cannot serialize."
)
return self.tokenizer.to_str().encode("utf-8")
def from_bytes(self, data: bytes):
tokenizer = Tokenizer.from_str(data.decode("utf-8"))
self.tokenizer = tokenizer
return self
class WordPieceTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordPieceTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "wordpiece_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordPieceTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class WordLevelTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordLevelTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "word_level_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordLevelTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class UnigramTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(Unigram())
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return UnigramTrainer(unk_token="[UNK]", special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "unigram_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "UnigramTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class BPETokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return BpeTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "bpe_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "BPETokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"] | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/tokenizers/_huggingface.py | _huggingface.py | from abc import ABC, abstractmethod
from typing import Iterable
from confection import Config, registry
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from tokenizers import Tokenizer
from tokenizers.models import BPE, Unigram, WordLevel, WordPiece
from tokenizers.normalizers import BertNormalizer, Normalizer
from tokenizers.pre_tokenizers import ByteLevel, Whitespace
from tokenizers.trainers import (
BpeTrainer,
Trainer,
UnigramTrainer,
WordLevelTrainer,
WordPieceTrainer,
)
from skembeddings.base import Serializable
class HuggingFaceTokenizerBase(
BaseEstimator, TransformerMixin, Serializable, ABC
):
def __init__(self, normalizer: Normalizer = BertNormalizer()):
self.tokenizer = None
self.trainer = None
self.normalizer = normalizer
@abstractmethod
def _init_tokenizer(self) -> Tokenizer:
pass
@abstractmethod
def _init_trainer(self) -> Trainer:
pass
def fit(self, X: Iterable[str], y=None):
self.tokenizer = self._init_tokenizer()
self.trainer = self._init_trainer()
self.tokenizer.train_from_iterator(X, self.trainer)
return self
def partial_fit(self, X: Iterable[str], y=None):
if (self.tokenizer is None) or (self.trainer is None):
self.fit(X)
else:
new_tokenizer = self._init_tokenizer()
new_tokenizer.train_from_iterator(X, self.trainer)
new_vocab = new_tokenizer.get_vocab()
self.tokenizer.add_tokens(new_vocab)
return self
def transform(self, X: Iterable[str]) -> list[list[str]]:
if self.tokenizer is None:
raise NotFittedError("Tokenizer has not been trained yet.")
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
res = []
for text in X:
encoding = self.tokenizer.encode(text)
res.append(encoding.tokens)
return res
def get_feature_names_out(self, input_features=None):
return None
def to_bytes(self) -> bytes:
if self.tokenizer is None:
raise NotFittedError(
"Tokenizer has not been fitted, cannot serialize."
)
return self.tokenizer.to_str().encode("utf-8")
def from_bytes(self, data: bytes):
tokenizer = Tokenizer.from_str(data.decode("utf-8"))
self.tokenizer = tokenizer
return self
class WordPieceTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordPieceTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "wordpiece_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordPieceTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class WordLevelTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return WordLevelTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "word_level_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "WordLevelTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class UnigramTokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(Unigram())
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return UnigramTrainer(unk_token="[UNK]", special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "unigram_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "UnigramTokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"]
class BPETokenizer(HuggingFaceTokenizerBase):
def _init_tokenizer(self) -> Tokenizer:
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
tokenizer.pre_tokenizer = ByteLevel()
tokenizer.normalizer = self.normalizer
return tokenizer
def _init_trainer(self) -> Trainer:
return BpeTrainer(special_tokens=["[UNK]"])
@property
def config(self) -> Config:
return Config(
{
"tokenizer": {
"@tokenizers": "bpe_tokenizer.v1",
}
}
)
@classmethod
def from_config(cls, config: Config) -> "BPETokenizer":
resolved = registry.resolve(config)
return resolved["tokenizer"] | 0.893655 | 0.183832 |
from typing import Any, Iterable, Optional, Union
import spacy
from sklearn.base import BaseEstimator, TransformerMixin
from spacy.language import Language
from spacy.matcher import Matcher
from spacy.tokens import Doc, Token
from skembeddings.base import Serializable
# We create a new extension on tokens.
if not Token.has_extension("filter_pass"):
Token.set_extension("filter_pass", default=False)
ATTRIBUTES = {
"ORTH": "orth_",
"NORM": "norm_",
"LEMMA": "lemma_",
"UPOS": "pos_",
"TAG": "tag_",
"DEP": "dep_",
"LOWER": "lower_",
"SHAPE": "shape_",
"ENT_TYPE": "ent_type_",
}
class SpacyTokenizer(BaseEstimator, TransformerMixin, Serializable):
tokenizer_type_ = "spacy_tokenizer"
def __init__(
self,
model: Union[str, Language] = "en_core_web_sm",
patterns: Optional[list[list[dict[str, Any]]]] = None,
out_attrs: Iterable[str] = ("NORM",),
):
self.model = model
if isinstance(model, Language):
self.nlp = model
elif isinstance(model, str):
self.nlp = spacy.load(model)
else:
raise TypeError(
"'model' either has to be a spaCy"
"nlp object or the name of a model."
)
self.patterns = patterns
self.out_attrs = tuple(out_attrs)
for attr in self.out_attrs:
if attr not in ATTRIBUTES:
raise ValueError(f"{attr} is not a valid out attribute.")
self.matcher = Matcher(self.nlp.vocab)
self.matcher.add(
"FILTER_PASS",
patterns=[] if self.patterns is None else self.patterns,
)
def fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def partial_fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def label_matching_tokens(self, docs: list[Doc]):
"""Labels tokens that match one of the given patterns."""
for doc in docs:
if self.patterns is not None:
matches = self.matcher(doc)
else:
matches = [(None, 0, len(doc))]
for _, start, end in matches:
for token in doc[start:end]:
token._.set("filter_pass", True)
def token_to_str(self, token: Token) -> str:
"""Returns textual representation of token."""
attributes = [
getattr(token, ATTRIBUTES[attr]) for attr in self.out_attrs
]
return "|".join(attributes)
def transform(self, X: Iterable[str]) -> list[list[str]]:
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
docs = list(self.nlp.pipe(X))
# Label all tokens according to the patterns.
self.label_matching_tokens(docs)
res: list[list[str]] = []
for doc in docs:
tokens = [
self.token_to_str(token)
for token in doc
if token._.filter_pass
]
res.append(tokens)
return res
def get_feature_names_out(self, input_features=None):
return None | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/tokenizers/spacy.py | spacy.py | from typing import Any, Iterable, Optional, Union
import spacy
from sklearn.base import BaseEstimator, TransformerMixin
from spacy.language import Language
from spacy.matcher import Matcher
from spacy.tokens import Doc, Token
from skembeddings.base import Serializable
# We create a new extension on tokens.
if not Token.has_extension("filter_pass"):
Token.set_extension("filter_pass", default=False)
ATTRIBUTES = {
"ORTH": "orth_",
"NORM": "norm_",
"LEMMA": "lemma_",
"UPOS": "pos_",
"TAG": "tag_",
"DEP": "dep_",
"LOWER": "lower_",
"SHAPE": "shape_",
"ENT_TYPE": "ent_type_",
}
class SpacyTokenizer(BaseEstimator, TransformerMixin, Serializable):
tokenizer_type_ = "spacy_tokenizer"
def __init__(
self,
model: Union[str, Language] = "en_core_web_sm",
patterns: Optional[list[list[dict[str, Any]]]] = None,
out_attrs: Iterable[str] = ("NORM",),
):
self.model = model
if isinstance(model, Language):
self.nlp = model
elif isinstance(model, str):
self.nlp = spacy.load(model)
else:
raise TypeError(
"'model' either has to be a spaCy"
"nlp object or the name of a model."
)
self.patterns = patterns
self.out_attrs = tuple(out_attrs)
for attr in self.out_attrs:
if attr not in ATTRIBUTES:
raise ValueError(f"{attr} is not a valid out attribute.")
self.matcher = Matcher(self.nlp.vocab)
self.matcher.add(
"FILTER_PASS",
patterns=[] if self.patterns is None else self.patterns,
)
def fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def partial_fit(self, X, y=None):
"""Exists for compatiblity, doesn't do anything."""
return self
def label_matching_tokens(self, docs: list[Doc]):
"""Labels tokens that match one of the given patterns."""
for doc in docs:
if self.patterns is not None:
matches = self.matcher(doc)
else:
matches = [(None, 0, len(doc))]
for _, start, end in matches:
for token in doc[start:end]:
token._.set("filter_pass", True)
def token_to_str(self, token: Token) -> str:
"""Returns textual representation of token."""
attributes = [
getattr(token, ATTRIBUTES[attr]) for attr in self.out_attrs
]
return "|".join(attributes)
def transform(self, X: Iterable[str]) -> list[list[str]]:
if isinstance(X, str):
raise TypeError(
"str passed instead of iterable, did you mean to pass [X]?"
)
docs = list(self.nlp.pipe(X))
# Label all tokens according to the patterns.
self.label_matching_tokens(docs)
res: list[list[str]] = []
for doc in docs:
tokens = [
self.token_to_str(token)
for token in doc
if token._.filter_pass
]
res.append(tokens)
return res
def get_feature_names_out(self, input_features=None):
return None | 0.905659 | 0.204025 |
import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.utils import murmurhash3_32
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
def _tag_enumerate(docs: Iterable[list[str]]) -> list[TaggedDocument]:
"""Tags documents with their integer positions."""
return [TaggedDocument(doc, [i]) for i, doc in enumerate(docs)]
class ParagraphEmbedding(BaseEstimator, TransformerMixin, Serializable):
"""Scikit-learn compatible Doc2Vec model."""
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.model_ = None
self.loss_: list[float] = []
self.seen_docs_ = 0
if tagging_scheme not in ["hash", "closest"]:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
)
self.algorithm = algorithm
self.max_docs = max_docs
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.tagging_scheme = tagging_scheme
self.epochs = epochs
self.random_state = random_state
self.negative = negative
self.ns_exponent = ns_exponent
self.dm_agg = dm_agg
self.dm_tag_count = dm_tag_count
self.dbow_words = dbow_words
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
def _tag_documents(
self, documents: list[list[str]]
) -> list[TaggedDocument]:
if self.model_ is None:
raise TypeError(
"You should not call _tag_documents"
"before model is initialised."
)
res = []
for document in documents:
# While we have available slots we just add new documents to those
if self.seen_docs_ < self.max_docs:
res.append(TaggedDocument(document, [self.seen_docs_]))
else:
# If we run out, we choose a tag based on a scheme
if self.tagging_scheme == "hash":
# Here we use murmur hash
hash = murmurhash3_32("".join(document))
id = hash % self.max_docs
res.append(TaggedDocument(document, [id]))
elif self.tagging_scheme == "closest":
# We obtain the key of the most semantically
# similar document and use that.
doc_vector = self.model_.infer_vector(document)
key, _ = self.model_.dv.similar_by_key(doc_vector, topn=1)[
0
]
res.append(TaggedDocument(document, [key]))
else:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
f" but {self.tagging_scheme} was provided."
)
self.seen_docs_ += 1
return res
def _init_model(self, docs=None) -> Doc2Vec:
return Doc2Vec(
documents=docs,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
dm=int(self.algorithm == "dm"),
dm_mean=int(self.dm_agg == "mean"),
dm_concat=int(self.dm_agg == "concat"),
dbow_words=int(self.dbow_words),
dm_tag_count=self.dm_tag_count,
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def _append_loss(self):
self.loss_.append(self.model_.get_latest_training_loss()) # type: ignore
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a new doc2vec model to the given documents."""
self.seen_docs_ = 0
# Forcing evaluation
X_eval: list[list[str]] = deeplist(X)
n_docs = len(X_eval)
if self.max_docs < n_docs:
init_batch = _tag_enumerate(X_eval[: self.max_docs])
self.model_ = self._init_model(init_batch)
self._append_loss()
self.partial_fit(X_eval[self.max_docs :])
return self
docs = _tag_enumerate(X_eval)
self.model_ = self._init_model(docs)
self._append_loss()
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits doc2vec model (online fitting)."""
# Force evaluation on iterable
X_eval: list[list[str]] = deeplist(X)
if self.model_ is None:
self.fit(X_eval)
return self
# We obtained tagged documents
tagged_docs = self._tag_documents(X_eval)
# Then build vocabulary
self.model_.build_vocab(tagged_docs, update=True)
self.model_.train(
tagged_docs,
total_examples=self.model_.corpus_count,
epochs=1,
compute_loss=True,
)
self._append_loss()
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
if self.model_ is None:
raise NotFittedError(
"Model ha been not fitted, please fit before inference."
)
vectors = [self.model_.infer_vector(list(doc)) for doc in X]
return np.stack(vectors)
@property
def components_(self) -> np.ndarray:
if self.model_ is None:
raise NotFittedError("Model has not been fitted yet.")
return np.array(self.model_.dv.vectors).T
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "ParagraphEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Doc2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "paragraph_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "ParagraphEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/doc2vec.py | doc2vec.py | import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.utils import murmurhash3_32
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
def _tag_enumerate(docs: Iterable[list[str]]) -> list[TaggedDocument]:
"""Tags documents with their integer positions."""
return [TaggedDocument(doc, [i]) for i, doc in enumerate(docs)]
class ParagraphEmbedding(BaseEstimator, TransformerMixin, Serializable):
"""Scikit-learn compatible Doc2Vec model."""
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.model_ = None
self.loss_: list[float] = []
self.seen_docs_ = 0
if tagging_scheme not in ["hash", "closest"]:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
)
self.algorithm = algorithm
self.max_docs = max_docs
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.tagging_scheme = tagging_scheme
self.epochs = epochs
self.random_state = random_state
self.negative = negative
self.ns_exponent = ns_exponent
self.dm_agg = dm_agg
self.dm_tag_count = dm_tag_count
self.dbow_words = dbow_words
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
def _tag_documents(
self, documents: list[list[str]]
) -> list[TaggedDocument]:
if self.model_ is None:
raise TypeError(
"You should not call _tag_documents"
"before model is initialised."
)
res = []
for document in documents:
# While we have available slots we just add new documents to those
if self.seen_docs_ < self.max_docs:
res.append(TaggedDocument(document, [self.seen_docs_]))
else:
# If we run out, we choose a tag based on a scheme
if self.tagging_scheme == "hash":
# Here we use murmur hash
hash = murmurhash3_32("".join(document))
id = hash % self.max_docs
res.append(TaggedDocument(document, [id]))
elif self.tagging_scheme == "closest":
# We obtain the key of the most semantically
# similar document and use that.
doc_vector = self.model_.infer_vector(document)
key, _ = self.model_.dv.similar_by_key(doc_vector, topn=1)[
0
]
res.append(TaggedDocument(document, [key]))
else:
raise ValueError(
"Tagging scheme should either be 'hash' or 'closest'"
f" but {self.tagging_scheme} was provided."
)
self.seen_docs_ += 1
return res
def _init_model(self, docs=None) -> Doc2Vec:
return Doc2Vec(
documents=docs,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
dm=int(self.algorithm == "dm"),
dm_mean=int(self.dm_agg == "mean"),
dm_concat=int(self.dm_agg == "concat"),
dbow_words=int(self.dbow_words),
dm_tag_count=self.dm_tag_count,
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def _append_loss(self):
self.loss_.append(self.model_.get_latest_training_loss()) # type: ignore
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a new doc2vec model to the given documents."""
self.seen_docs_ = 0
# Forcing evaluation
X_eval: list[list[str]] = deeplist(X)
n_docs = len(X_eval)
if self.max_docs < n_docs:
init_batch = _tag_enumerate(X_eval[: self.max_docs])
self.model_ = self._init_model(init_batch)
self._append_loss()
self.partial_fit(X_eval[self.max_docs :])
return self
docs = _tag_enumerate(X_eval)
self.model_ = self._init_model(docs)
self._append_loss()
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits doc2vec model (online fitting)."""
# Force evaluation on iterable
X_eval: list[list[str]] = deeplist(X)
if self.model_ is None:
self.fit(X_eval)
return self
# We obtained tagged documents
tagged_docs = self._tag_documents(X_eval)
# Then build vocabulary
self.model_.build_vocab(tagged_docs, update=True)
self.model_.train(
tagged_docs,
total_examples=self.model_.corpus_count,
epochs=1,
compute_loss=True,
)
self._append_loss()
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
if self.model_ is None:
raise NotFittedError(
"Model ha been not fitted, please fit before inference."
)
vectors = [self.model_.infer_vector(list(doc)) for doc in X]
return np.stack(vectors)
@property
def components_(self) -> np.ndarray:
if self.model_ is None:
raise NotFittedError("Model has not been fitted yet.")
return np.array(self.model_.dv.vectors).T
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "ParagraphEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Doc2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "paragraph_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "ParagraphEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | 0.863147 | 0.212784 |
import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models import KeyedVectors, Word2Vec
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
class Word2VecEmbedding(BaseEstimator, TransformerMixin, Serializable):
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.agg = agg
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.algorithm = algorithm
self.random_state = random_state
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
self.negative = negative
self.ns_exponent = ns_exponent
self.cbow_agg = cbow_agg
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.epochs = epochs
self.model_ = None
self.loss_: list[float] = []
self.n_features_out = (
self.n_components if agg != "both" else self.n_components * 2
)
def _init_model(self, sentences=None) -> Word2Vec:
return Word2Vec(
sentences=sentences,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
sg=int(self.algorithm == "sg"),
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
cbow_mean=int(self.cbow_agg == "mean"),
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
self.loss_ = []
self.model_ = self._init_model(sentences=X)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
if self.model_ is None:
self.fit(X, y)
else:
self.model_.build_vocab(X, update=True)
self.model_.train(
X,
total_examples=self.model_.corpus_count,
epochs=self.model_.epochs,
comput_loss=True,
)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def _check_inputs(self, X):
options = ["mean", "max", "both"]
if self.agg not in options:
raise ValueError(
f"The `agg` value must be in {options}. Got {self.agg}."
)
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
embeddings = []
for token in tokens:
try:
embeddings.append(self.model_.wv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, self.n_features_out), np.nan)
return np.stack(embeddings)
def transform(self, X: Iterable[Iterable[str]], y=None):
"""Transforms the phrase text into a numeric
representation using word embeddings."""
self._check_inputs(X)
X: list[list[str]] = deeplist(X)
embeddings = np.empty((len(X), self.n_features_out))
for i_doc, doc in enumerate(X):
if not len(doc):
embeddings[i_doc, :] = np.nan
doc_vectors = self._collect_vectors_single(doc)
if self.agg == "mean":
embeddings[i_doc, :] = np.mean(doc_vectors, axis=0)
elif self.agg == "max":
embeddings[i_doc, :] = np.max(doc_vectors, axis=0)
elif self.agg == "both":
mean_vector = np.mean(doc_vectors, axis=0)
max_vector = np.max(doc_vectors, axis=0)
embeddings[i_doc, :] = np.concatenate(
(mean_vector, max_vector)
)
return embeddings
@property
def keyed_vectors(self) -> KeyedVectors:
if self.model_ is None:
raise NotFittedError(
"Can't access keyed vectors, model has not been fitted yet."
)
return self.model_.wv
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "Word2VecEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Word2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "word2vec_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "Word2VecEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/word2vec.py | word2vec.py | import tempfile
from typing import Iterable, Literal
import numpy as np
from confection import Config, registry
from gensim.models import KeyedVectors, Word2Vec
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from skembeddings.base import Serializable
from skembeddings.streams.utils import deeplist
class Word2VecEmbedding(BaseEstimator, TransformerMixin, Serializable):
def __init__(
self,
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
self.agg = agg
self.n_components = n_components
self.n_jobs = n_jobs
self.window = window
self.algorithm = algorithm
self.random_state = random_state
self.learning_rate = learning_rate
self.min_learning_rate = min_learning_rate
self.negative = negative
self.ns_exponent = ns_exponent
self.cbow_agg = cbow_agg
self.sample = sample
self.hs = hs
self.batch_words = batch_words
self.shrink_windows = shrink_windows
self.epochs = epochs
self.model_ = None
self.loss_: list[float] = []
self.n_features_out = (
self.n_components if agg != "both" else self.n_components * 2
)
def _init_model(self, sentences=None) -> Word2Vec:
return Word2Vec(
sentences=sentences,
vector_size=self.n_components,
min_count=0,
alpha=self.learning_rate,
window=self.window,
sample=self.sample,
seed=self.random_state,
workers=self.n_jobs,
min_alpha=self.min_learning_rate,
sg=int(self.algorithm == "sg"),
hs=int(self.hs),
negative=self.negative,
ns_exponent=self.ns_exponent,
cbow_mean=int(self.cbow_agg == "mean"),
epochs=self.epochs,
trim_rule=None,
batch_words=self.batch_words,
compute_loss=True,
shrink_windows=self.shrink_windows,
)
def fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
self.loss_ = []
self.model_ = self._init_model(sentences=X)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
self._check_inputs(X)
X = deeplist(X)
if self.model_ is None:
self.fit(X, y)
else:
self.model_.build_vocab(X, update=True)
self.model_.train(
X,
total_examples=self.model_.corpus_count,
epochs=self.model_.epochs,
comput_loss=True,
)
self.loss_.append(self.model_.get_latest_training_loss())
return self
def _check_inputs(self, X):
options = ["mean", "max", "both"]
if self.agg not in options:
raise ValueError(
f"The `agg` value must be in {options}. Got {self.agg}."
)
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
embeddings = []
for token in tokens:
try:
embeddings.append(self.model_.wv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, self.n_features_out), np.nan)
return np.stack(embeddings)
def transform(self, X: Iterable[Iterable[str]], y=None):
"""Transforms the phrase text into a numeric
representation using word embeddings."""
self._check_inputs(X)
X: list[list[str]] = deeplist(X)
embeddings = np.empty((len(X), self.n_features_out))
for i_doc, doc in enumerate(X):
if not len(doc):
embeddings[i_doc, :] = np.nan
doc_vectors = self._collect_vectors_single(doc)
if self.agg == "mean":
embeddings[i_doc, :] = np.mean(doc_vectors, axis=0)
elif self.agg == "max":
embeddings[i_doc, :] = np.max(doc_vectors, axis=0)
elif self.agg == "both":
mean_vector = np.mean(doc_vectors, axis=0)
max_vector = np.max(doc_vectors, axis=0)
embeddings[i_doc, :] = np.concatenate(
(mean_vector, max_vector)
)
return embeddings
@property
def keyed_vectors(self) -> KeyedVectors:
if self.model_ is None:
raise NotFittedError(
"Can't access keyed vectors, model has not been fitted yet."
)
return self.model_.wv
def to_bytes(self) -> bytes:
if self.model_ is None:
raise NotFittedError(
"Can't save model if it hasn't been fitted yet."
)
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
temporary_filepath = tmp.name
self.model_.save(temporary_filepath)
with open(temporary_filepath, "rb") as temp_buffer:
return temp_buffer.read()
def from_bytes(self, data: bytes) -> "Word2VecEmbedding":
with tempfile.NamedTemporaryFile(prefix="gensim-model-") as tmp:
tmp.write(data)
model = Word2Vec.load(tmp.name)
self.model_ = model
return self
@property
def config(self) -> Config:
return Config(
{
"embedding": {
"@models": "word2vec_embedding.v1",
**self.get_params(),
}
}
)
@classmethod
def from_config(cls, config: Config) -> "Word2VecEmbedding":
resolved = registry.resolve(config)
return resolved["embedding"] | 0.828973 | 0.182753 |
import collections
from itertools import islice
from typing import Iterable
import mmh3
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from thinc.api import Adam, CategoricalCrossentropy, Relu, Softmax, chain
from thinc.types import Floats2d
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
def sliding_window(iterable, n):
# sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def hash_embed(
tokens: list[str], n_buckets: int, seeds: tuple[int]
) -> np.ndarray:
"""Embeds ids with the bloom hashing trick."""
embedding = np.zeros((len(tokens), n_buckets), dtype=np.float16)
n_seeds = len(seeds)
prob = 1 / n_seeds
for i_token, token in enumerate(tokens):
for seed in seeds:
i_bucket = mmh3.hash(token, seed=seed) % n_buckets
embedding[i_token, i_bucket] = prob
return embedding
class BloomWordEmbedding(BaseEstimator, TransformerMixin):
def __init__(
self,
vector_size: int = 100,
window_size: int = 5,
n_buckets: int = 1000,
n_seeds: int = 4,
epochs: int = 5,
):
self.vector_size = vector_size
self.n_buckets = n_buckets
self.window_size = window_size
self.epochs = epochs
self.encoder = None
self.seeds = tuple(range(n_seeds))
self.n_seeds = n_seeds
def _extract_target_context(
self, docs: list[list[str]]
) -> tuple[list[str], list[str]]:
target: list[str] = []
context: list[str] = []
for doc in docs:
for window in sliding_window(doc, n=self.window_size * 2 + 1):
middle_index = (len(window) - 1) // 2
_target = window[middle_index]
_context = [
token
for i, token in enumerate(window)
if i != middle_index
]
target.extend([_target] * len(_context))
context.extend(_context)
return target, context
def _init_model(self):
self.encoder = Relu(self.vector_size)
self.context_predictor = chain(self.encoder, Softmax(self.n_buckets))
self.loss_calc = CategoricalCrossentropy()
self.optimizer = Adam(
learn_rate=0.001,
beta1=0.9,
beta2=0.999,
eps=1e-08,
L2=1e-6,
grad_clip=1.0,
use_averages=True,
L2_is_weight_decay=True,
)
def _hash_embed(self, tokens: list[str]) -> Floats2d:
ops = self.context_predictor.ops
emb = hash_embed(tokens, self.n_buckets, self.seeds)
return ops.asarray2f(emb)
def _train_batch(self, batch: tuple[list[str], list[str]]):
targets, contexts = batch
_targets = self._hash_embed(targets)
_contexts = self._hash_embed(contexts)
try:
Yh, backprop = self.context_predictor.begin_update(_targets)
except KeyError:
self.context_predictor.initialize(_targets, _contexts)
Yh, backprop = self.context_predictor.begin_update(_targets)
dYh = self.loss_calc.get_grad(Yh, _contexts)
backprop(dYh)
self.context_predictor.finish_update(self.optimizer)
def fit(self, X: Iterable[Iterable[str]], y=None):
X_eval = deeplist(X)
self._init_model()
ops = self.context_predictor.ops
targets, contexts = self._extract_target_context(X_eval)
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in tqdm(batches):
self._train_batch(batch)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
if self.encoder is None:
return self.fit(X)
X_eval = deeplist(X)
targets, contexts = self._extract_target_context(X_eval)
ops = self.context_predictor.ops
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in batches:
self._train_batch(batch)
return self
def transform(self, X: Iterable[Iterable[str]], y=None) -> np.ndarray:
"""Transforms the phrase text into a numeric
representation using word embeddings."""
if self.encoder is None:
raise NotFittedError(
"Model has not been trained yet, can't transform."
)
ops = self.encoder.ops
X_eval = deeplist(X)
X_new = []
for doc in X_eval:
doc_emb = hash_embed(doc, self.n_buckets, self.seeds)
doc_emb = ops.asarray2f(doc_emb) # type: ignore
doc_vecs = ops.to_numpy(self.encoder.predict(doc_emb))
X_new.append(np.nanmean(doc_vecs, axis=0))
return np.stack(X_new) | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/bloom.py | bloom.py | import collections
from itertools import islice
from typing import Iterable
import mmh3
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from thinc.api import Adam, CategoricalCrossentropy, Relu, Softmax, chain
from thinc.types import Floats2d
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
def sliding_window(iterable, n):
# sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def hash_embed(
tokens: list[str], n_buckets: int, seeds: tuple[int]
) -> np.ndarray:
"""Embeds ids with the bloom hashing trick."""
embedding = np.zeros((len(tokens), n_buckets), dtype=np.float16)
n_seeds = len(seeds)
prob = 1 / n_seeds
for i_token, token in enumerate(tokens):
for seed in seeds:
i_bucket = mmh3.hash(token, seed=seed) % n_buckets
embedding[i_token, i_bucket] = prob
return embedding
class BloomWordEmbedding(BaseEstimator, TransformerMixin):
def __init__(
self,
vector_size: int = 100,
window_size: int = 5,
n_buckets: int = 1000,
n_seeds: int = 4,
epochs: int = 5,
):
self.vector_size = vector_size
self.n_buckets = n_buckets
self.window_size = window_size
self.epochs = epochs
self.encoder = None
self.seeds = tuple(range(n_seeds))
self.n_seeds = n_seeds
def _extract_target_context(
self, docs: list[list[str]]
) -> tuple[list[str], list[str]]:
target: list[str] = []
context: list[str] = []
for doc in docs:
for window in sliding_window(doc, n=self.window_size * 2 + 1):
middle_index = (len(window) - 1) // 2
_target = window[middle_index]
_context = [
token
for i, token in enumerate(window)
if i != middle_index
]
target.extend([_target] * len(_context))
context.extend(_context)
return target, context
def _init_model(self):
self.encoder = Relu(self.vector_size)
self.context_predictor = chain(self.encoder, Softmax(self.n_buckets))
self.loss_calc = CategoricalCrossentropy()
self.optimizer = Adam(
learn_rate=0.001,
beta1=0.9,
beta2=0.999,
eps=1e-08,
L2=1e-6,
grad_clip=1.0,
use_averages=True,
L2_is_weight_decay=True,
)
def _hash_embed(self, tokens: list[str]) -> Floats2d:
ops = self.context_predictor.ops
emb = hash_embed(tokens, self.n_buckets, self.seeds)
return ops.asarray2f(emb)
def _train_batch(self, batch: tuple[list[str], list[str]]):
targets, contexts = batch
_targets = self._hash_embed(targets)
_contexts = self._hash_embed(contexts)
try:
Yh, backprop = self.context_predictor.begin_update(_targets)
except KeyError:
self.context_predictor.initialize(_targets, _contexts)
Yh, backprop = self.context_predictor.begin_update(_targets)
dYh = self.loss_calc.get_grad(Yh, _contexts)
backprop(dYh)
self.context_predictor.finish_update(self.optimizer)
def fit(self, X: Iterable[Iterable[str]], y=None):
X_eval = deeplist(X)
self._init_model()
ops = self.context_predictor.ops
targets, contexts = self._extract_target_context(X_eval)
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in tqdm(batches):
self._train_batch(batch)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
if self.encoder is None:
return self.fit(X)
X_eval = deeplist(X)
targets, contexts = self._extract_target_context(X_eval)
ops = self.context_predictor.ops
batches = ops.multibatch(128, targets, contexts, shuffle=True)
for batch in batches:
self._train_batch(batch)
return self
def transform(self, X: Iterable[Iterable[str]], y=None) -> np.ndarray:
"""Transforms the phrase text into a numeric
representation using word embeddings."""
if self.encoder is None:
raise NotFittedError(
"Model has not been trained yet, can't transform."
)
ops = self.encoder.ops
X_eval = deeplist(X)
X_new = []
for doc in X_eval:
doc_emb = hash_embed(doc, self.n_buckets, self.seeds)
doc_emb = ops.asarray2f(doc_emb) # type: ignore
doc_vecs = ops.to_numpy(self.encoder.predict(doc_emb))
X_new.append(np.nanmean(doc_vecs, axis=0))
return np.stack(X_new) | 0.855791 | 0.228028 |
from typing import Literal
from confection import registry
from skembeddings.error import NotInstalled
try:
from skembeddings.models.word2vec import Word2VecEmbedding
except ModuleNotFoundError:
Word2VecEmbedding = NotInstalled("Word2VecEmbedding", "gensim")
try:
from skembeddings.models.doc2vec import ParagraphEmbedding
except ModuleNotFoundError:
ParagraphEmbedding = NotInstalled("ParagraphEmbedding", "gensim")
@registry.models.register("word2vec_embedding.v1")
def make_word2vec_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return Word2VecEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
agg=agg,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
cbow_agg=cbow_agg,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
@registry.models.register("paragraph_embedding.v1")
def make_paragraph_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return ParagraphEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
tagging_scheme=tagging_scheme,
max_docs=max_docs,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
dm_agg=dm_agg,
dm_tag_count=dm_tag_count,
dbow_words=dbow_words,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
__all__ = ["Word2VecEmbedding", "ParagraphEmbedding"] | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/__init__.py | __init__.py | from typing import Literal
from confection import registry
from skembeddings.error import NotInstalled
try:
from skembeddings.models.word2vec import Word2VecEmbedding
except ModuleNotFoundError:
Word2VecEmbedding = NotInstalled("Word2VecEmbedding", "gensim")
try:
from skembeddings.models.doc2vec import ParagraphEmbedding
except ModuleNotFoundError:
ParagraphEmbedding = NotInstalled("ParagraphEmbedding", "gensim")
@registry.models.register("word2vec_embedding.v1")
def make_word2vec_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["cbow", "sg"] = "cbow",
agg: Literal["mean", "max", "both"] = "mean",
epochs: int = 5,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
cbow_agg: Literal["mean", "sum"] = "mean",
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return Word2VecEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
agg=agg,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
cbow_agg=cbow_agg,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
@registry.models.register("paragraph_embedding.v1")
def make_paragraph_embedding(
n_components: int = 100,
window: int = 5,
algorithm: Literal["dm", "dbow"] = "dm",
tagging_scheme: Literal["hash", "closest"] = "hash",
max_docs: int = 100_000,
epochs: int = 10,
random_state: int = 0,
negative: int = 5,
ns_exponent: float = 0.75,
dm_agg: Literal["mean", "sum", "concat"] = "mean",
dm_tag_count: int = 1,
dbow_words: bool = False,
sample: float = 0.001,
hs: bool = False,
batch_words: int = 10000,
shrink_windows: bool = True,
learning_rate: float = 0.025,
min_learning_rate: float = 0.0001,
n_jobs: int = 1,
):
return ParagraphEmbedding(
n_components=n_components,
window=window,
algorithm=algorithm,
tagging_scheme=tagging_scheme,
max_docs=max_docs,
epochs=epochs,
random_state=random_state,
negative=negative,
ns_exponent=ns_exponent,
dm_agg=dm_agg,
dm_tag_count=dm_tag_count,
dbow_words=dbow_words,
sample=sample,
hs=hs,
batch_words=batch_words,
shrink_windows=shrink_windows,
learning_rate=learning_rate,
min_learning_rate=min_learning_rate,
n_jobs=n_jobs,
)
__all__ = ["Word2VecEmbedding", "ParagraphEmbedding"] | 0.791781 | 0.164315 |
from typing import Iterable, Literal, Union
import numpy as np
from gensim.models import KeyedVectors
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.exceptions import NotFittedError
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
class VlaweEmbedding(BaseEstimator, TransformerMixin):
"""Scikit-learn compatible VLAWE model."""
def __init__(
self,
word_embeddings: Union[TransformerMixin, KeyedVectors],
prefit: bool = False,
n_clusters: int = 10,
):
self.word_embeddings = word_embeddings
self.prefit = prefit
self.kmeans = None
self.n_clusters = n_clusters
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
if isinstance(self.word_embeddings, KeyedVectors):
kv = self.word_embeddings
embeddings = []
for token in tokens:
try:
embeddings.append(kv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, kv.vector_size), np.nan)
return np.stack(embeddings)
else:
return self.word_embeddings.transform(tokens)
def _infer_single(self, doc: list[str]) -> np.ndarray:
if self.kmeans is None:
raise NotFittedError(
"Embeddings have not been fitted yet, can't infer."
)
vectors = self._collect_vectors_single(doc)
residuals = []
for centroid in self.kmeans.cluster_centers_:
residual = np.sum(vectors - centroid, axis=0)
residuals.append(residual)
return np.concatenate(residuals)
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a model to the given documents."""
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
print("Fitting word embeddings")
self.word_embeddings.fit(X_eval)
print("Collecting vectors")
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
print("Fitting Kmeans")
self.kmeans = MiniBatchKMeans(n_clusters=self.n_clusters)
self.kmeans.fit(all_vecs)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits model (online fitting)."""
if self.kmeans is None:
return self.fit(X)
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
self.word_embeddings.partial_fit(X_eval)
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
self.kmeans.partial_fit(all_vecs)
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
vectors = [self._infer_single(doc) for doc in tqdm(deeplist(X))]
return np.stack(vectors) | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/models/vlawe.py | vlawe.py | from typing import Iterable, Literal, Union
import numpy as np
from gensim.models import KeyedVectors
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import MiniBatchKMeans
from sklearn.exceptions import NotFittedError
from tqdm import tqdm
from skembeddings.streams.utils import deeplist
class VlaweEmbedding(BaseEstimator, TransformerMixin):
"""Scikit-learn compatible VLAWE model."""
def __init__(
self,
word_embeddings: Union[TransformerMixin, KeyedVectors],
prefit: bool = False,
n_clusters: int = 10,
):
self.word_embeddings = word_embeddings
self.prefit = prefit
self.kmeans = None
self.n_clusters = n_clusters
def _collect_vectors_single(self, tokens: list[str]) -> np.ndarray:
if isinstance(self.word_embeddings, KeyedVectors):
kv = self.word_embeddings
embeddings = []
for token in tokens:
try:
embeddings.append(kv[token]) # type: ignore
except KeyError:
continue
if not embeddings:
return np.full((1, kv.vector_size), np.nan)
return np.stack(embeddings)
else:
return self.word_embeddings.transform(tokens)
def _infer_single(self, doc: list[str]) -> np.ndarray:
if self.kmeans is None:
raise NotFittedError(
"Embeddings have not been fitted yet, can't infer."
)
vectors = self._collect_vectors_single(doc)
residuals = []
for centroid in self.kmeans.cluster_centers_:
residual = np.sum(vectors - centroid, axis=0)
residuals.append(residual)
return np.concatenate(residuals)
def fit(self, X: Iterable[Iterable[str]], y=None):
"""Fits a model to the given documents."""
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
print("Fitting word embeddings")
self.word_embeddings.fit(X_eval)
print("Collecting vectors")
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
print("Fitting Kmeans")
self.kmeans = MiniBatchKMeans(n_clusters=self.n_clusters)
self.kmeans.fit(all_vecs)
return self
def partial_fit(self, X: Iterable[Iterable[str]], y=None):
"""Partially fits model (online fitting)."""
if self.kmeans is None:
return self.fit(X)
X_eval = deeplist(X)
if (
not isinstance(self.word_embeddings, KeyedVectors)
and not self.prefit
):
self.word_embeddings.partial_fit(X_eval)
all_vecs = np.concatenate(
[self._collect_vectors_single(doc) for doc in X_eval]
)
self.kmeans.partial_fit(all_vecs)
return self
def transform(self, X: Iterable[Iterable[str]]) -> np.ndarray:
"""Infers vectors for all of the given documents."""
vectors = [self._infer_single(doc) for doc in tqdm(deeplist(X))]
return np.stack(vectors) | 0.883808 | 0.287893 |
import functools
import random
from itertools import islice
from typing import Callable, Iterable, List, Literal, Optional, TypeVar
from sklearn.base import BaseEstimator
def filter_batches(
chunks: Iterable[list], estimator: BaseEstimator, prefit: bool
) -> Iterable[list]:
for chunk in chunks:
if prefit:
predictions = estimator.predict(chunk) # type: ignore
else:
predictions = estimator.fit_predict(chunk) # type: ignore
passes = predictions != -1
filtered_chunk = [elem for elem, _pass in zip(chunk, passes) if _pass]
yield filtered_chunk
def pipe_streams(*transforms: Callable) -> Callable:
"""Pipes iterator transformations together.
Parameters
----------
*transforms: Callable
Generator funcitons that transform an iterable into another iterable.
Returns
-------
Callable
Generator function composing all of the other ones.
"""
def _pipe(x: Iterable) -> Iterable:
for f in transforms:
x = f(x)
return x
return _pipe
def reusable(gen_func: Callable) -> Callable:
"""
Function decorator that turns your generator function into an
iterator, thereby making it reusable.
Parameters
----------
gen_func: Callable
Generator function, that you want to be reusable
Returns
----------
_multigen: Callable
Sneakily created iterator class wrapping the generator function
"""
@functools.wraps(gen_func, updated=())
class _multigen:
def __init__(self, *args, limit=None, **kwargs):
self.__args = args
self.__kwargs = kwargs
self.limit = limit
# functools.update_wrapper(self, gen_func)
def __iter__(self):
if self.limit is not None:
return islice(
gen_func(*self.__args, **self.__kwargs), self.limit
)
return gen_func(*self.__args, **self.__kwargs)
return _multigen
U = TypeVar("U")
def chunk(
iterable: Iterable[U], chunk_size: int, sample_size: Optional[int] = None
) -> Iterable[List[U]]:
"""
Generator function that chunks an iterable for you.
Parameters
----------
iterable: Iterable of T
The iterable you'd like to chunk.
chunk_size: int
The size of chunks you would like to get back
sample_size: int or None, default None
If specified the yielded lists will be randomly sampled with the buffer
with replacement. Sample size determines how big you want
those lists to be.
Yields
------
buffer: list of T
sample_size or chunk_size sized lists chunked from
the original iterable.
"""
buffer = []
for index, elem in enumerate(iterable):
buffer.append(elem)
if (index % chunk_size == (chunk_size - 1)) and (index != 0):
if sample_size is None:
yield buffer
else:
yield random.choices(buffer, k=sample_size)
buffer = []
def stream_files(
paths: Iterable[str],
lines: bool = False,
not_found_action: Literal["exception", "none", "drop"] = "exception",
) -> Iterable[Optional[str]]:
"""Streams text contents from files on disk.
Parameters
----------
paths: iterable of str
Iterable of file paths on disk.
lines: bool, default False
Indicates whether you want to get a stream over lines
or file contents.
not_found_action: {'exception', 'none', 'drop'}, default 'exception'
Indicates what should happen if a file was not found.
'exception' propagates the exception to top level, 'none' yields
None for each file that fails, 'drop' ignores them completely.
Yields
------
str or None
File contents or lines in files if lines is True.
Can only yield None if not_found_action is 'none'.
"""
for path in paths:
try:
with open(path) as in_file:
if lines:
for line in in_file:
yield line
else:
yield in_file.read()
except FileNotFoundError as e:
if not_found_action == "exception":
raise FileNotFoundError(
f"Streaming failed as file {path} could not be found"
) from e
elif not_found_action == "none":
yield None
elif not_found_action == "drop":
continue
else:
raise ValueError(
"""Unrecognized `not_found_action`.
Please chose one of `"exception", "none", "drop"`"""
)
def flatten_stream(nested: Iterable, axis: int = 1) -> Iterable:
"""Turns nested stream into a flat stream.
If multiple levels are nested, the iterable will be flattenned along
the given axis.
To match the behaviour of Awkward Array flattening, axis=0 only
removes None elements from the array along the outermost axis.
Negative axis values are not yet supported.
Parameters
----------
nested: iterable
Iterable of iterables of unknown depth.
axis: int, default 1
Axis/level of depth at which the iterable should be flattened.
Returns
-------
iterable
Iterable with one lower level of nesting.
"""
if not isinstance(nested, Iterable):
raise ValueError(
f"Nesting is too deep, values at level {axis} are not iterables"
)
if axis == 0:
return (elem for elem in nested if elem is not None and (elem != []))
if axis == 1:
for sub in nested:
for elem in sub:
yield elem
elif axis > 1:
for sub in nested:
yield flatten_stream(sub, axis=axis - 1)
else:
raise ValueError("Flattening axis needs to be greater than 0.")
def deeplist(nested) -> list:
"""Recursively turns nested iterable to list.
Parameters
----------
nested: iterable
Nested iterable.
Returns
-------
list
Nested list.
"""
if not isinstance(nested, Iterable) or isinstance(nested, str):
return nested # type: ignore
else:
return [deeplist(sub) for sub in nested] | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/streams/utils.py | utils.py | import functools
import random
from itertools import islice
from typing import Callable, Iterable, List, Literal, Optional, TypeVar
from sklearn.base import BaseEstimator
def filter_batches(
chunks: Iterable[list], estimator: BaseEstimator, prefit: bool
) -> Iterable[list]:
for chunk in chunks:
if prefit:
predictions = estimator.predict(chunk) # type: ignore
else:
predictions = estimator.fit_predict(chunk) # type: ignore
passes = predictions != -1
filtered_chunk = [elem for elem, _pass in zip(chunk, passes) if _pass]
yield filtered_chunk
def pipe_streams(*transforms: Callable) -> Callable:
"""Pipes iterator transformations together.
Parameters
----------
*transforms: Callable
Generator funcitons that transform an iterable into another iterable.
Returns
-------
Callable
Generator function composing all of the other ones.
"""
def _pipe(x: Iterable) -> Iterable:
for f in transforms:
x = f(x)
return x
return _pipe
def reusable(gen_func: Callable) -> Callable:
"""
Function decorator that turns your generator function into an
iterator, thereby making it reusable.
Parameters
----------
gen_func: Callable
Generator function, that you want to be reusable
Returns
----------
_multigen: Callable
Sneakily created iterator class wrapping the generator function
"""
@functools.wraps(gen_func, updated=())
class _multigen:
def __init__(self, *args, limit=None, **kwargs):
self.__args = args
self.__kwargs = kwargs
self.limit = limit
# functools.update_wrapper(self, gen_func)
def __iter__(self):
if self.limit is not None:
return islice(
gen_func(*self.__args, **self.__kwargs), self.limit
)
return gen_func(*self.__args, **self.__kwargs)
return _multigen
U = TypeVar("U")
def chunk(
iterable: Iterable[U], chunk_size: int, sample_size: Optional[int] = None
) -> Iterable[List[U]]:
"""
Generator function that chunks an iterable for you.
Parameters
----------
iterable: Iterable of T
The iterable you'd like to chunk.
chunk_size: int
The size of chunks you would like to get back
sample_size: int or None, default None
If specified the yielded lists will be randomly sampled with the buffer
with replacement. Sample size determines how big you want
those lists to be.
Yields
------
buffer: list of T
sample_size or chunk_size sized lists chunked from
the original iterable.
"""
buffer = []
for index, elem in enumerate(iterable):
buffer.append(elem)
if (index % chunk_size == (chunk_size - 1)) and (index != 0):
if sample_size is None:
yield buffer
else:
yield random.choices(buffer, k=sample_size)
buffer = []
def stream_files(
paths: Iterable[str],
lines: bool = False,
not_found_action: Literal["exception", "none", "drop"] = "exception",
) -> Iterable[Optional[str]]:
"""Streams text contents from files on disk.
Parameters
----------
paths: iterable of str
Iterable of file paths on disk.
lines: bool, default False
Indicates whether you want to get a stream over lines
or file contents.
not_found_action: {'exception', 'none', 'drop'}, default 'exception'
Indicates what should happen if a file was not found.
'exception' propagates the exception to top level, 'none' yields
None for each file that fails, 'drop' ignores them completely.
Yields
------
str or None
File contents or lines in files if lines is True.
Can only yield None if not_found_action is 'none'.
"""
for path in paths:
try:
with open(path) as in_file:
if lines:
for line in in_file:
yield line
else:
yield in_file.read()
except FileNotFoundError as e:
if not_found_action == "exception":
raise FileNotFoundError(
f"Streaming failed as file {path} could not be found"
) from e
elif not_found_action == "none":
yield None
elif not_found_action == "drop":
continue
else:
raise ValueError(
"""Unrecognized `not_found_action`.
Please chose one of `"exception", "none", "drop"`"""
)
def flatten_stream(nested: Iterable, axis: int = 1) -> Iterable:
"""Turns nested stream into a flat stream.
If multiple levels are nested, the iterable will be flattenned along
the given axis.
To match the behaviour of Awkward Array flattening, axis=0 only
removes None elements from the array along the outermost axis.
Negative axis values are not yet supported.
Parameters
----------
nested: iterable
Iterable of iterables of unknown depth.
axis: int, default 1
Axis/level of depth at which the iterable should be flattened.
Returns
-------
iterable
Iterable with one lower level of nesting.
"""
if not isinstance(nested, Iterable):
raise ValueError(
f"Nesting is too deep, values at level {axis} are not iterables"
)
if axis == 0:
return (elem for elem in nested if elem is not None and (elem != []))
if axis == 1:
for sub in nested:
for elem in sub:
yield elem
elif axis > 1:
for sub in nested:
yield flatten_stream(sub, axis=axis - 1)
else:
raise ValueError("Flattening axis needs to be greater than 0.")
def deeplist(nested) -> list:
"""Recursively turns nested iterable to list.
Parameters
----------
nested: iterable
Nested iterable.
Returns
-------
list
Nested list.
"""
if not isinstance(nested, Iterable) or isinstance(nested, str):
return nested # type: ignore
else:
return [deeplist(sub) for sub in nested] | 0.868381 | 0.326352 |
import functools
import json
from dataclasses import dataclass
from itertools import islice
from typing import Callable, Iterable, Literal
from sklearn.base import BaseEstimator
from skembeddings.streams.utils import (chunk, deeplist, filter_batches,
flatten_stream, reusable, stream_files)
@dataclass
class Stream:
"""Utility class for streaming, batching and filtering texts
from an external source.
Parameters
----------
iterable: Iterable
Core iterable object in the stream.
"""
iterable: Iterable
def __iter__(self):
return iter(self.iterable)
def filter(self, func: Callable, *args, **kwargs):
"""Filters the stream given a function that returns a bool."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(filter)(_func, self.iterable)
return Stream(_iterable)
def map(self, func: Callable, *args, **kwargs):
"""Maps a function over the stream."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(map)(_func, self.iterable)
return Stream(_iterable)
def pipe(self, func: Callable, *args, **kwargs):
"""Pipes the stream into a function that takes
the whole stream and returns a new one."""
@functools.wraps(func)
def _func(iterable):
return func(iterable, *args, **kwargs)
_iterable = reusable(_func)(self.iterable)
return Stream(_iterable)
def islice(self, *args):
"""Equivalent to itertools.islice()."""
return self.pipe(islice, *args)
def evaluate(self, deep: bool = False):
"""Evaluates the entire iterable and collects it into
a list.
Parameters
----------
deep: bool, default False
Indicates whether nested iterables should be deeply
evaluated. Uses deeplist() internally.
"""
if deep:
_iterable = deeplist(self.iterable)
else:
_iterable = list(self.iterable)
return Stream(_iterable)
def read_files(
self,
lines: bool = True,
not_found_action: Literal["exception", "none", "drop"] = "exception",
):
"""Reads a stream of file paths from disk.
Parameters
----------
lines: bool, default True
Indicates whether lines should be streamed or not.
not_found_action: str, default 'exception'
Indicates what should be done if a given file is not found.
'exception' raises an exception,
'drop' ignores it,
'none' returns a None for each nonexistent file.
"""
return self.pipe(
stream_files,
lines=lines,
not_found_action=not_found_action,
)
def json(self):
"""Parses a stream of texts into JSON objects."""
return self.map(json.loads)
def grab(self, field: str):
"""Grabs one field from a stream of records."""
return self.map(lambda record: record[field])
def flatten(self, axis=1):
"""Flattens a nested stream along a given axis."""
return self.pipe(flatten_stream, axis=axis)
def chunk(self, size: int):
"""Chunks stream with the given batch size."""
return self.pipe(chunk, chunk_size=size)
def filter_batches(self, estimator: BaseEstimator, prefit: bool = True):
"""Filters batches with a scikit-learn compatible
estimator.
Parameters
----------
estimator: BaseEstimator
Scikit-learn estimator to use for filtering the batches.
Either needs a .predict() or .fit_predict() method.
Every sample that gets labeled -1 will be removed from the
batch.
prefit: bool, default True
Indicates whether the estimator is prefit.
If it is .predict() will be used (novelty detection), else
.fit_predict() will be used (outlier detection).
"""
return self.pipe(filter_batches, estimator=estimator, prefit=prefit)
def collect(self, deep: bool = False):
"""Does the same as evaluate()."""
return self.evaluate(deep) | scikit-embeddings | /scikit_embeddings-0.2.0.tar.gz/scikit_embeddings-0.2.0/skembeddings/streams/_stream.py | _stream.py | import functools
import json
from dataclasses import dataclass
from itertools import islice
from typing import Callable, Iterable, Literal
from sklearn.base import BaseEstimator
from skembeddings.streams.utils import (chunk, deeplist, filter_batches,
flatten_stream, reusable, stream_files)
@dataclass
class Stream:
"""Utility class for streaming, batching and filtering texts
from an external source.
Parameters
----------
iterable: Iterable
Core iterable object in the stream.
"""
iterable: Iterable
def __iter__(self):
return iter(self.iterable)
def filter(self, func: Callable, *args, **kwargs):
"""Filters the stream given a function that returns a bool."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(filter)(_func, self.iterable)
return Stream(_iterable)
def map(self, func: Callable, *args, **kwargs):
"""Maps a function over the stream."""
@functools.wraps(func)
def _func(elem):
return func(elem, *args, **kwargs)
_iterable = reusable(map)(_func, self.iterable)
return Stream(_iterable)
def pipe(self, func: Callable, *args, **kwargs):
"""Pipes the stream into a function that takes
the whole stream and returns a new one."""
@functools.wraps(func)
def _func(iterable):
return func(iterable, *args, **kwargs)
_iterable = reusable(_func)(self.iterable)
return Stream(_iterable)
def islice(self, *args):
"""Equivalent to itertools.islice()."""
return self.pipe(islice, *args)
def evaluate(self, deep: bool = False):
"""Evaluates the entire iterable and collects it into
a list.
Parameters
----------
deep: bool, default False
Indicates whether nested iterables should be deeply
evaluated. Uses deeplist() internally.
"""
if deep:
_iterable = deeplist(self.iterable)
else:
_iterable = list(self.iterable)
return Stream(_iterable)
def read_files(
self,
lines: bool = True,
not_found_action: Literal["exception", "none", "drop"] = "exception",
):
"""Reads a stream of file paths from disk.
Parameters
----------
lines: bool, default True
Indicates whether lines should be streamed or not.
not_found_action: str, default 'exception'
Indicates what should be done if a given file is not found.
'exception' raises an exception,
'drop' ignores it,
'none' returns a None for each nonexistent file.
"""
return self.pipe(
stream_files,
lines=lines,
not_found_action=not_found_action,
)
def json(self):
"""Parses a stream of texts into JSON objects."""
return self.map(json.loads)
def grab(self, field: str):
"""Grabs one field from a stream of records."""
return self.map(lambda record: record[field])
def flatten(self, axis=1):
"""Flattens a nested stream along a given axis."""
return self.pipe(flatten_stream, axis=axis)
def chunk(self, size: int):
"""Chunks stream with the given batch size."""
return self.pipe(chunk, chunk_size=size)
def filter_batches(self, estimator: BaseEstimator, prefit: bool = True):
"""Filters batches with a scikit-learn compatible
estimator.
Parameters
----------
estimator: BaseEstimator
Scikit-learn estimator to use for filtering the batches.
Either needs a .predict() or .fit_predict() method.
Every sample that gets labeled -1 will be removed from the
batch.
prefit: bool, default True
Indicates whether the estimator is prefit.
If it is .predict() will be used (novelty detection), else
.fit_predict() will be used (outlier detection).
"""
return self.pipe(filter_batches, estimator=estimator, prefit=prefit)
def collect(self, deep: bool = False):
"""Does the same as evaluate()."""
return self.evaluate(deep) | 0.906366 | 0.326258 |
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/pure-predict.png
:alt: pure-predict
pure-predict: Machine learning prediction in pure Python
========================================================
|License| |Build Status| |PyPI Package| |Downloads| |Python Versions|
``pure-predict`` speeds up and slims down machine learning prediction applications. It is
a foundational tool for serverless inference or small batch prediction with popular machine
learning frameworks like `scikit-learn <https://scikit-learn.org/stable/>`__ and `fasttext <https://fasttext.cc/>`__.
It implements the predict methods of these frameworks in pure Python.
Primary Use Cases
-----------------
The primary use case for ``pure-predict`` is the following scenario:
#. A model is trained in an environment without strong container footprint constraints. Perhaps a long running "offline" job on one or many machines where installing a number of python packages from PyPI is not at all problematic.
#. At prediction time the model needs to be served behind an API. Typical access patterns are to request a prediction for one "record" (one "row" in a ``numpy`` array or one string of text to classify) per request or a mini-batch of records per request.
#. Preferred infrastructure for the prediction service is either serverless (`AWS Lambda <https://aws.amazon.com/lambda/>`__) or a container service where the memory footprint of the container is constrained.
#. The fitted model object's artifacts needed for prediction (coefficients, weights, vocabulary, decision tree artifacts, etc.) are relatively small (10s to 100s of MBs).
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/diagram.png
:alt: diagram
In this scenario, a container service with a large dependency footprint can be overkill for a microservice, particularly if the access patterns favor the pricing model of a serverless application. Additionally, for smaller models and single record predictions per request, the ``numpy`` and ``scipy`` functionality in the prediction methods of popular machine learning frameworks work against the application in terms of latency, `underperforming pure python <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__ in some cases.
Check out the `blog post <https://medium.com/building-ibotta/predict-with-sklearn-20x-faster-9f2803944446>`__
for more information on the motivation and use cases of ``pure-predict``.
Package Details
---------------
It is a Python package for machine learning prediction distributed under
the `Apache 2.0 software license <https://github.com/Ibotta/sk-dist/blob/master/LICENSE>`__.
It contains multiple subpackages which mirror their open source
counterpart (``scikit-learn``, ``fasttext``, etc.). Each subpackage has utilities to
convert a fitted machine learning model into a custom object containing prediction methods
that mirror their native counterparts, but converted to pure python. Additionally, all
relevant model artifacts needed for prediction are converted to pure python.
A ``pure-predict`` model object can then be pickled and later
unpickled without any 3rd party dependencies other than ``pure-predict``.
This eliminates the need to have large dependency packages installed in order to
make predictions with fitted machine learning models using popular open source packages for
training models. These dependencies (``numpy``, ``scipy``, ``scikit-learn``, ``fasttext``, etc.)
are large in size and `not always necessary to make fast and accurate
predictions <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__.
Additionally, they rely on C extensions that may not be ideal for serverless applications with a python runtime.
Quick Start Example
-------------------
In a python enviornment with ``scikit-learn`` and its dependencies installed:
.. code-block:: python
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
# fit sklearn estimator
X, y = load_iris(return_X_y=True)
clf = RandomForestClassifier()
clf.fit(X, y)
# convert to pure python estimator
clf_pure_predict = convert_estimator(clf)
with open("model.pkl", "wb") as f:
pickle.dump(clf_pure_predict, f)
# make prediction with sklearn estimator
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
In a python enviornment with only ``pure-predict`` installed:
.. code-block:: python
import pickle
# load pickled model
with open("model.pkl", "rb") as f:
clf = pickle.load(f)
# make prediction with pure-predict object
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
Subpackages
-----------
`pure_sklearn <https://github.com/Ibotta/pure-predict/tree/master/pure_sklearn>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for a subset of ``scikit-learn`` estimators and transformers.
- **estimators**
- **linear models** - supports the majority of linear models for classification
- **trees** - decision trees, random forests, gradient boosting and xgboost
- **naive bayes** - a number of popular naive bayes classifiers
- **svm** - linear SVC
- **transformers**
- **preprocessing** - normalization and onehot/ordinal encoders
- **impute** - simple imputation
- **feature extraction** - text (tfidf, count vectorizer, hashing vectorizer) and dictionary vectorization
- **pipeline** - pipelines and feature unions
Sparse data - supports a custom pure python sparse data object - sparse data is handled as would be expected by the relevent transformers and estimators
`pure_fasttext <https://github.com/Ibotta/pure-predict/tree/master/pure_fasttext>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for ``fasttext``.
- **supervised** - predicts labels for supervised models; no support for quantized models (blocked by `this issue <https://github.com/facebookresearch/fastText/issues/984>`__)
- **unsupervised** - lookup of word or sentence embeddings given input text
Installation
------------
Dependencies
~~~~~~~~~~~~
``pure-predict`` requires:
- `Python <https://www.python.org/>`__ (>= 3.6)
Dependency Notes
~~~~~~~~~~~~~~~~
- ``pure_sklearn`` has been tested with ``scikit-learn`` versions >= 0.20 -- certain functionality may work with lower versions but are not guaranteed. Some functionality is explicitly not supported for certain ``scikit-learn`` versions and exceptions will be raised as appropriate.
- ``xgboost`` requires version >= 0.82 for support with ``pure_sklearn``.
- ``pure-predict`` is not supported with Python 2.
- ``fasttext`` versions <= 0.9.1 have been tested.
User Installation
~~~~~~~~~~~~~~~~~
The easiest way to install ``pure-predict`` is with ``pip``:
::
pip install --upgrade pure-predict
You can also download the source code:
::
git clone https://github.com/Ibotta/pure-predict.git
Testing
~~~~~~~
With ``pytest`` installed, you can run tests locally:
::
pytest pure-predict
Examples
--------
The package contains `examples <https://github.com/Ibotta/pure-predict/tree/master/examples>`__
on how to use ``pure-predict`` in practice.
Calls for Contributors
----------------------
Contributing to ``pure-predict`` is `welcomed by any contributors <https://github.com/Ibotta/pure-predict/blob/master/CONTRIBUTING.md>`__. Specific calls for contribution are as follows:
#. Examples, tests and documentation -- particularly more detailed examples with performance testing of various estimators under various constraints.
#. Adding more ``pure_sklearn`` estimators. The ``scikit-learn`` package is extensive and only partially covered by ``pure_sklearn``. `Regression <https://scikit-learn.org/stable/supervised_learning.html#supervised-learning>`__ tasks in particular missing from ``pure_sklearn``. `Clustering <https://scikit-learn.org/stable/modules/clustering.html#clustering>`__, `dimensionality reduction <https://scikit-learn.org/stable/modules/decomposition.html#decompositions>`__, `nearest neighbors <https://scikit-learn.org/stable/modules/neighbors.html>`__, `feature selection <https://scikit-learn.org/stable/modules/feature_selection.html>`__, non-linear `SVM <https://scikit-learn.org/stable/modules/svm.html>`__, and more are also omitted and would be good candidates for extending ``pure_sklearn``.
#. General efficiency. There is likely low hanging fruit for improving the efficiency of the ``numpy`` and ``scipy`` functionality that has been ported to ``pure-predict``.
#. `Threading <https://docs.python.org/3/library/threading.html>`__ could be considered to improve performance -- particularly for making predictions with multiple records.
#. A public `AWS lambda layer <https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html>`__ containing ``pure-predict``.
Background
----------
The project was started at `Ibotta
Inc. <https://medium.com/building-ibotta>`__ on the machine learning
team and open sourced in 2020. It is currently maintained by the machine
learning team at Ibotta.
Acknowledgements
~~~~~~~~~~~~~~~~
Thanks to `David Mitchell <https://github.com/dlmitchell>`__ and `Andrew Tilley <https://github.com/tilleyand>`__ for internal review before open source. Thanks to `James Foley <https://github.com/chadfoley36>`__ for logo artwork.
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/ibottaml.png
:alt: IbottaML
.. |License| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg
:target: https://opensource.org/licenses/Apache-2.0
.. |Build Status| image:: https://travis-ci.com/Ibotta/pure-predict.png?branch=master
:target: https://travis-ci.com/Ibotta/pure-predict
.. |PyPI Package| image:: https://badge.fury.io/py/pure-predict.svg
:target: https://pypi.org/project/pure-predict/
.. |Downloads| image:: https://pepy.tech/badge/pure-predict
:target: https://pepy.tech/project/pure-predict
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pure-predict
:target: https://pypi.org/project/pure-predict/
| scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/README.rst | README.rst | .. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/pure-predict.png
:alt: pure-predict
pure-predict: Machine learning prediction in pure Python
========================================================
|License| |Build Status| |PyPI Package| |Downloads| |Python Versions|
``pure-predict`` speeds up and slims down machine learning prediction applications. It is
a foundational tool for serverless inference or small batch prediction with popular machine
learning frameworks like `scikit-learn <https://scikit-learn.org/stable/>`__ and `fasttext <https://fasttext.cc/>`__.
It implements the predict methods of these frameworks in pure Python.
Primary Use Cases
-----------------
The primary use case for ``pure-predict`` is the following scenario:
#. A model is trained in an environment without strong container footprint constraints. Perhaps a long running "offline" job on one or many machines where installing a number of python packages from PyPI is not at all problematic.
#. At prediction time the model needs to be served behind an API. Typical access patterns are to request a prediction for one "record" (one "row" in a ``numpy`` array or one string of text to classify) per request or a mini-batch of records per request.
#. Preferred infrastructure for the prediction service is either serverless (`AWS Lambda <https://aws.amazon.com/lambda/>`__) or a container service where the memory footprint of the container is constrained.
#. The fitted model object's artifacts needed for prediction (coefficients, weights, vocabulary, decision tree artifacts, etc.) are relatively small (10s to 100s of MBs).
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/diagram.png
:alt: diagram
In this scenario, a container service with a large dependency footprint can be overkill for a microservice, particularly if the access patterns favor the pricing model of a serverless application. Additionally, for smaller models and single record predictions per request, the ``numpy`` and ``scipy`` functionality in the prediction methods of popular machine learning frameworks work against the application in terms of latency, `underperforming pure python <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__ in some cases.
Check out the `blog post <https://medium.com/building-ibotta/predict-with-sklearn-20x-faster-9f2803944446>`__
for more information on the motivation and use cases of ``pure-predict``.
Package Details
---------------
It is a Python package for machine learning prediction distributed under
the `Apache 2.0 software license <https://github.com/Ibotta/sk-dist/blob/master/LICENSE>`__.
It contains multiple subpackages which mirror their open source
counterpart (``scikit-learn``, ``fasttext``, etc.). Each subpackage has utilities to
convert a fitted machine learning model into a custom object containing prediction methods
that mirror their native counterparts, but converted to pure python. Additionally, all
relevant model artifacts needed for prediction are converted to pure python.
A ``pure-predict`` model object can then be pickled and later
unpickled without any 3rd party dependencies other than ``pure-predict``.
This eliminates the need to have large dependency packages installed in order to
make predictions with fitted machine learning models using popular open source packages for
training models. These dependencies (``numpy``, ``scipy``, ``scikit-learn``, ``fasttext``, etc.)
are large in size and `not always necessary to make fast and accurate
predictions <https://github.com/Ibotta/pure-predict/blob/master/examples/performance_rf.py>`__.
Additionally, they rely on C extensions that may not be ideal for serverless applications with a python runtime.
Quick Start Example
-------------------
In a python enviornment with ``scikit-learn`` and its dependencies installed:
.. code-block:: python
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
from pure_sklearn.map import convert_estimator
# fit sklearn estimator
X, y = load_iris(return_X_y=True)
clf = RandomForestClassifier()
clf.fit(X, y)
# convert to pure python estimator
clf_pure_predict = convert_estimator(clf)
with open("model.pkl", "wb") as f:
pickle.dump(clf_pure_predict, f)
# make prediction with sklearn estimator
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
In a python enviornment with only ``pure-predict`` installed:
.. code-block:: python
import pickle
# load pickled model
with open("model.pkl", "rb") as f:
clf = pickle.load(f)
# make prediction with pure-predict object
y_pred = clf.predict([[0.25, 2.0, 8.3, 1.0]])
print(y_pred)
[2]
Subpackages
-----------
`pure_sklearn <https://github.com/Ibotta/pure-predict/tree/master/pure_sklearn>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for a subset of ``scikit-learn`` estimators and transformers.
- **estimators**
- **linear models** - supports the majority of linear models for classification
- **trees** - decision trees, random forests, gradient boosting and xgboost
- **naive bayes** - a number of popular naive bayes classifiers
- **svm** - linear SVC
- **transformers**
- **preprocessing** - normalization and onehot/ordinal encoders
- **impute** - simple imputation
- **feature extraction** - text (tfidf, count vectorizer, hashing vectorizer) and dictionary vectorization
- **pipeline** - pipelines and feature unions
Sparse data - supports a custom pure python sparse data object - sparse data is handled as would be expected by the relevent transformers and estimators
`pure_fasttext <https://github.com/Ibotta/pure-predict/tree/master/pure_fasttext>`__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prediction in pure python for ``fasttext``.
- **supervised** - predicts labels for supervised models; no support for quantized models (blocked by `this issue <https://github.com/facebookresearch/fastText/issues/984>`__)
- **unsupervised** - lookup of word or sentence embeddings given input text
Installation
------------
Dependencies
~~~~~~~~~~~~
``pure-predict`` requires:
- `Python <https://www.python.org/>`__ (>= 3.6)
Dependency Notes
~~~~~~~~~~~~~~~~
- ``pure_sklearn`` has been tested with ``scikit-learn`` versions >= 0.20 -- certain functionality may work with lower versions but are not guaranteed. Some functionality is explicitly not supported for certain ``scikit-learn`` versions and exceptions will be raised as appropriate.
- ``xgboost`` requires version >= 0.82 for support with ``pure_sklearn``.
- ``pure-predict`` is not supported with Python 2.
- ``fasttext`` versions <= 0.9.1 have been tested.
User Installation
~~~~~~~~~~~~~~~~~
The easiest way to install ``pure-predict`` is with ``pip``:
::
pip install --upgrade pure-predict
You can also download the source code:
::
git clone https://github.com/Ibotta/pure-predict.git
Testing
~~~~~~~
With ``pytest`` installed, you can run tests locally:
::
pytest pure-predict
Examples
--------
The package contains `examples <https://github.com/Ibotta/pure-predict/tree/master/examples>`__
on how to use ``pure-predict`` in practice.
Calls for Contributors
----------------------
Contributing to ``pure-predict`` is `welcomed by any contributors <https://github.com/Ibotta/pure-predict/blob/master/CONTRIBUTING.md>`__. Specific calls for contribution are as follows:
#. Examples, tests and documentation -- particularly more detailed examples with performance testing of various estimators under various constraints.
#. Adding more ``pure_sklearn`` estimators. The ``scikit-learn`` package is extensive and only partially covered by ``pure_sklearn``. `Regression <https://scikit-learn.org/stable/supervised_learning.html#supervised-learning>`__ tasks in particular missing from ``pure_sklearn``. `Clustering <https://scikit-learn.org/stable/modules/clustering.html#clustering>`__, `dimensionality reduction <https://scikit-learn.org/stable/modules/decomposition.html#decompositions>`__, `nearest neighbors <https://scikit-learn.org/stable/modules/neighbors.html>`__, `feature selection <https://scikit-learn.org/stable/modules/feature_selection.html>`__, non-linear `SVM <https://scikit-learn.org/stable/modules/svm.html>`__, and more are also omitted and would be good candidates for extending ``pure_sklearn``.
#. General efficiency. There is likely low hanging fruit for improving the efficiency of the ``numpy`` and ``scipy`` functionality that has been ported to ``pure-predict``.
#. `Threading <https://docs.python.org/3/library/threading.html>`__ could be considered to improve performance -- particularly for making predictions with multiple records.
#. A public `AWS lambda layer <https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html>`__ containing ``pure-predict``.
Background
----------
The project was started at `Ibotta
Inc. <https://medium.com/building-ibotta>`__ on the machine learning
team and open sourced in 2020. It is currently maintained by the machine
learning team at Ibotta.
Acknowledgements
~~~~~~~~~~~~~~~~
Thanks to `David Mitchell <https://github.com/dlmitchell>`__ and `Andrew Tilley <https://github.com/tilleyand>`__ for internal review before open source. Thanks to `James Foley <https://github.com/chadfoley36>`__ for logo artwork.
.. figure:: https://github.com/Ibotta/pure-predict/blob/master/doc/images/ibottaml.png
:alt: IbottaML
.. |License| image:: https://img.shields.io/badge/License-Apache%202.0-blue.svg
:target: https://opensource.org/licenses/Apache-2.0
.. |Build Status| image:: https://travis-ci.com/Ibotta/pure-predict.png?branch=master
:target: https://travis-ci.com/Ibotta/pure-predict
.. |PyPI Package| image:: https://badge.fury.io/py/pure-predict.svg
:target: https://pypi.org/project/pure-predict/
.. |Downloads| image:: https://pepy.tech/badge/pure-predict
:target: https://pepy.tech/project/pure-predict
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pure-predict
:target: https://pypi.org/project/pure-predict/
| 0.968738 | 0.825027 |
MAPPING = {
"LogisticRegression": "scikit_endpoint.linear_model.LogisticRegressionPure",
"RidgeClassifier": "scikit_endpoint.linear_model.RidgeClassifierPure",
"SGDClassifier": "scikit_endpoint.linear_model.SGDClassifierPure",
"Perceptron": "scikit_endpoint.linear_model.PerceptronPure",
"PassiveAggressiveClassifier": "scikit_endpoint.linear_model.PassiveAggressiveClassifierPure",
"LinearSVC": "scikit_endpoint.svm.LinearSVCPure",
"DecisionTreeClassifier": "scikit_endpoint.tree.DecisionTreeClassifierPure",
"DecisionTreeRegressor": "scikit_endpoint.tree.DecisionTreeRegressorPure",
"ExtraTreeClassifier": "scikit_endpoint.tree.ExtraTreeClassifierPure",
"ExtraTreeRegressor": "scikit_endpoint.tree.ExtraTreeRegressorPure",
"RandomForestClassifier": "scikit_endpoint.ensemble.RandomForestClassifierPure",
"BaggingClassifier": "scikit_endpoint.ensemble.BaggingClassifierPure",
"GradientBoostingClassifier": "scikit_endpoint.ensemble.GradientBoostingClassifierPure",
"XGBClassifier": "scikit_endpoint.xgboost.XGBClassifierPure",
"ExtraTreesClassifier": "scikit_endpoint.ensemble.ExtraTreesClassifierPure",
"GaussianNB": "scikit_endpoint.naive_bayes.GaussianNBPure",
"MultinomialNB": "scikit_endpoint.naive_bayes.MultinomialNBPure",
"ComplementNB": "scikit_endpoint.naive_bayes.ComplementNBPure",
"SimpleImputer": "scikit_endpoint.impute.SimpleImputerPure",
"MissingIndicator": "scikit_endpoint.impute.MissingIndicatorPure",
"DummyClassifier": "scikit_endpoint.dummy.DummyClassifierPure",
"Pipeline": "scikit_endpoint.pipeline.PipelinePure",
"FeatureUnion": "scikit_endpoint.pipeline.FeatureUnionPure",
"OneHotEncoder": "scikit_endpoint.preprocessing.OneHotEncoderPure",
"OrdinalEncoder": "scikit_endpoint.preprocessing.OrdinalEncoderPure",
"StandardScaler": "scikit_endpoint.preprocessing.StandardScalerPure",
"MinMaxScaler": "scikit_endpoint.preprocessing.MinMaxScalerPure",
"MaxAbsScaler": "scikit_endpoint.preprocessing.MaxAbsScalerPure",
"Normalizer": "scikit_endpoint.preprocessing.NormalizerPure",
"DictVectorizer": "scikit_endpoint.feature_extraction.DictVectorizerPure",
"TfidfVectorizer": "scikit_endpoint.feature_extraction.text.TfidfVectorizerPure",
"CountVectorizer": "scikit_endpoint.feature_extraction.text.CountVectorizerPure",
"TfidfTransformer": "scikit_endpoint.feature_extraction.text.TfidfTransformerPure",
"HashingVectorizer": "scikit_endpoint.feature_extraction.text.HashingVectorizerPure",
"VarianceThreshold": "scikit_endpoint.feature_selection.VarianceThresholdPure",
}
def _instantiate_class(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def convert_estimator(est, min_version=None):
"""Convert scikit-learn estimator to its scikit_endpoint counterpart"""
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if pure_est_name is None:
raise ValueError(
"Cannot find 'scikit_endpoint' counterpart for {}".format(est_name)
)
module = ".".join(pure_est_name.split(".")[:-1])
name = pure_est_name.split(".")[-1]
return _instantiate_class(module, name)(est) | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/map.py | map.py | MAPPING = {
"LogisticRegression": "scikit_endpoint.linear_model.LogisticRegressionPure",
"RidgeClassifier": "scikit_endpoint.linear_model.RidgeClassifierPure",
"SGDClassifier": "scikit_endpoint.linear_model.SGDClassifierPure",
"Perceptron": "scikit_endpoint.linear_model.PerceptronPure",
"PassiveAggressiveClassifier": "scikit_endpoint.linear_model.PassiveAggressiveClassifierPure",
"LinearSVC": "scikit_endpoint.svm.LinearSVCPure",
"DecisionTreeClassifier": "scikit_endpoint.tree.DecisionTreeClassifierPure",
"DecisionTreeRegressor": "scikit_endpoint.tree.DecisionTreeRegressorPure",
"ExtraTreeClassifier": "scikit_endpoint.tree.ExtraTreeClassifierPure",
"ExtraTreeRegressor": "scikit_endpoint.tree.ExtraTreeRegressorPure",
"RandomForestClassifier": "scikit_endpoint.ensemble.RandomForestClassifierPure",
"BaggingClassifier": "scikit_endpoint.ensemble.BaggingClassifierPure",
"GradientBoostingClassifier": "scikit_endpoint.ensemble.GradientBoostingClassifierPure",
"XGBClassifier": "scikit_endpoint.xgboost.XGBClassifierPure",
"ExtraTreesClassifier": "scikit_endpoint.ensemble.ExtraTreesClassifierPure",
"GaussianNB": "scikit_endpoint.naive_bayes.GaussianNBPure",
"MultinomialNB": "scikit_endpoint.naive_bayes.MultinomialNBPure",
"ComplementNB": "scikit_endpoint.naive_bayes.ComplementNBPure",
"SimpleImputer": "scikit_endpoint.impute.SimpleImputerPure",
"MissingIndicator": "scikit_endpoint.impute.MissingIndicatorPure",
"DummyClassifier": "scikit_endpoint.dummy.DummyClassifierPure",
"Pipeline": "scikit_endpoint.pipeline.PipelinePure",
"FeatureUnion": "scikit_endpoint.pipeline.FeatureUnionPure",
"OneHotEncoder": "scikit_endpoint.preprocessing.OneHotEncoderPure",
"OrdinalEncoder": "scikit_endpoint.preprocessing.OrdinalEncoderPure",
"StandardScaler": "scikit_endpoint.preprocessing.StandardScalerPure",
"MinMaxScaler": "scikit_endpoint.preprocessing.MinMaxScalerPure",
"MaxAbsScaler": "scikit_endpoint.preprocessing.MaxAbsScalerPure",
"Normalizer": "scikit_endpoint.preprocessing.NormalizerPure",
"DictVectorizer": "scikit_endpoint.feature_extraction.DictVectorizerPure",
"TfidfVectorizer": "scikit_endpoint.feature_extraction.text.TfidfVectorizerPure",
"CountVectorizer": "scikit_endpoint.feature_extraction.text.CountVectorizerPure",
"TfidfTransformer": "scikit_endpoint.feature_extraction.text.TfidfTransformerPure",
"HashingVectorizer": "scikit_endpoint.feature_extraction.text.HashingVectorizerPure",
"VarianceThreshold": "scikit_endpoint.feature_selection.VarianceThresholdPure",
}
def _instantiate_class(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def convert_estimator(est, min_version=None):
"""Convert scikit-learn estimator to its scikit_endpoint counterpart"""
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if pure_est_name is None:
raise ValueError(
"Cannot find 'scikit_endpoint' counterpart for {}".format(est_name)
)
module = ".".join(pure_est_name.split(".")[:-1])
name = pure_est_name.split(".")[-1]
return _instantiate_class(module, name)(est) | 0.580828 | 0.511717 |
from math import exp, log
from operator import mul
from .utils import shape, sparse_list, issparse
def dot(A, B):
"""
Dot product between two arrays.
A -> n_dim = 1
B -> n_dim = 2
"""
arr = []
for i in range(len(B)):
if isinstance(A, dict):
val = sum([v * B[i][k] for k, v in A.items()])
else:
val = sum(map(mul, A, B[i]))
arr.append(val)
return arr
def dot_2d(A, B):
"""
Dot product between two arrays.
A -> n_dim = 2
B -> n_dim = 2
"""
return [dot(a, B) for a in A]
def matmult_same_dim(A, B):
"""Multiply two matrices of the same dimension"""
shape_A = shape(A)
issparse_A = issparse(A)
issparse_B = issparse(B)
if shape_A != shape(B):
raise ValueError("Shape A must equal shape B.")
if not (issparse_A == issparse_B):
raise ValueError("Both A and B must be sparse or dense.")
X = []
if not issparse_A:
for i in range(shape_A[0]):
X.append([(A[i][j] * B[i][j]) for j in range(shape_A[1])])
else:
for i in range(shape_A[0]):
nested_res = [
[(k_b, v_a * v_b) for k_b, v_b in B[i].items() if k_b == k_a]
for k_a, v_a in A[i].items()
]
X.append(dict([item for sublist in nested_res for item in sublist]))
X = sparse_list(X, size=A.size, dtype=A.dtype)
return X
def transpose(A):
"""Transpose 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(map(list, [*zip(*A)]))
def expit(x):
"""Expit function for scaler input"""
return 1.0 / (1.0 + safe_exp(-x))
def sfmax(arr):
"""Softmax function for 1-D list or a single sparse_list element"""
if isinstance(arr, dict):
expons = {k: safe_exp(v) for k, v in arr.items()}
denom = sum(expons.values())
out = {k: (v / float(denom)) for k, v in expons.items()}
else:
expons = list(map(safe_exp, arr))
out = list(map(lambda x: x / float(sum(expons)), expons))
return out
def safe_log(x):
"""Equivalent to numpy log with scalar input"""
if x == 0:
return -float("Inf")
elif x < 0:
return float("Nan")
else:
return log(x)
def safe_exp(x):
"""Equivalent to numpy exp with scalar input"""
try:
return exp(x)
except OverflowError:
return float("Inf")
def operate_2d(A, B, func):
"""Apply elementwise function to 2-D lists"""
if issparse(A) or issparse(B):
raise ValueError("Sparse input not supported.")
if shape(A) != shape(B):
raise ValueError("'A' and 'B' must have the same shape")
return [list(map(func, A[index], B[index])) for index in range(len(A))]
def apply_2d(A, func):
"""Apply function to every element of 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return [list(map(func, a)) for a in A]
def apply_2d_sparse(A, func):
"""Apply function to every non-zero element of sparse_list"""
if not issparse(A):
raise ValueError("Dense input not supported.")
A_ = [{k: func(v) for k, v in a.items()} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
def apply_axis_2d(A, func, axis=1):
"""
Apply function along axis of 2-D list or non-zero
elements of sparse_list.
"""
if issparse(A) and (axis == 0):
raise ValueError("Sparse input not supported when axis=0.")
if axis == 1:
if issparse(A):
return [func(a.values()) for a in A]
else:
return [func(a) for a in A]
elif axis == 0:
return [func(a) for a in transpose(A)]
else:
raise ValueError("Input 'axis' must be 0 or 1")
def ravel(A):
"""Equivalent of numpy ravel on 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(transpose(A)[0])
def slice_column(A, idx):
"""Slice columns from 2-D list A. Handles sparse data"""
if isinstance(idx, int):
if issparse(A):
return [a.get(idx, A.dtype(0)) for a in A]
else:
return [a[idx] for a in A]
if isinstance(idx, (list, tuple)):
if issparse(A):
A_ = [{k: v for k, v in a.items() if k in idx} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
else:
return [[a[i] for i in idx] for a in A]
def accumu(lis):
"""Cumulative sum of list"""
total = 0
for x in lis:
total += x
yield total | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/base.py | base.py | from math import exp, log
from operator import mul
from .utils import shape, sparse_list, issparse
def dot(A, B):
"""
Dot product between two arrays.
A -> n_dim = 1
B -> n_dim = 2
"""
arr = []
for i in range(len(B)):
if isinstance(A, dict):
val = sum([v * B[i][k] for k, v in A.items()])
else:
val = sum(map(mul, A, B[i]))
arr.append(val)
return arr
def dot_2d(A, B):
"""
Dot product between two arrays.
A -> n_dim = 2
B -> n_dim = 2
"""
return [dot(a, B) for a in A]
def matmult_same_dim(A, B):
"""Multiply two matrices of the same dimension"""
shape_A = shape(A)
issparse_A = issparse(A)
issparse_B = issparse(B)
if shape_A != shape(B):
raise ValueError("Shape A must equal shape B.")
if not (issparse_A == issparse_B):
raise ValueError("Both A and B must be sparse or dense.")
X = []
if not issparse_A:
for i in range(shape_A[0]):
X.append([(A[i][j] * B[i][j]) for j in range(shape_A[1])])
else:
for i in range(shape_A[0]):
nested_res = [
[(k_b, v_a * v_b) for k_b, v_b in B[i].items() if k_b == k_a]
for k_a, v_a in A[i].items()
]
X.append(dict([item for sublist in nested_res for item in sublist]))
X = sparse_list(X, size=A.size, dtype=A.dtype)
return X
def transpose(A):
"""Transpose 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(map(list, [*zip(*A)]))
def expit(x):
"""Expit function for scaler input"""
return 1.0 / (1.0 + safe_exp(-x))
def sfmax(arr):
"""Softmax function for 1-D list or a single sparse_list element"""
if isinstance(arr, dict):
expons = {k: safe_exp(v) for k, v in arr.items()}
denom = sum(expons.values())
out = {k: (v / float(denom)) for k, v in expons.items()}
else:
expons = list(map(safe_exp, arr))
out = list(map(lambda x: x / float(sum(expons)), expons))
return out
def safe_log(x):
"""Equivalent to numpy log with scalar input"""
if x == 0:
return -float("Inf")
elif x < 0:
return float("Nan")
else:
return log(x)
def safe_exp(x):
"""Equivalent to numpy exp with scalar input"""
try:
return exp(x)
except OverflowError:
return float("Inf")
def operate_2d(A, B, func):
"""Apply elementwise function to 2-D lists"""
if issparse(A) or issparse(B):
raise ValueError("Sparse input not supported.")
if shape(A) != shape(B):
raise ValueError("'A' and 'B' must have the same shape")
return [list(map(func, A[index], B[index])) for index in range(len(A))]
def apply_2d(A, func):
"""Apply function to every element of 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return [list(map(func, a)) for a in A]
def apply_2d_sparse(A, func):
"""Apply function to every non-zero element of sparse_list"""
if not issparse(A):
raise ValueError("Dense input not supported.")
A_ = [{k: func(v) for k, v in a.items()} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
def apply_axis_2d(A, func, axis=1):
"""
Apply function along axis of 2-D list or non-zero
elements of sparse_list.
"""
if issparse(A) and (axis == 0):
raise ValueError("Sparse input not supported when axis=0.")
if axis == 1:
if issparse(A):
return [func(a.values()) for a in A]
else:
return [func(a) for a in A]
elif axis == 0:
return [func(a) for a in transpose(A)]
else:
raise ValueError("Input 'axis' must be 0 or 1")
def ravel(A):
"""Equivalent of numpy ravel on 2-D list"""
if issparse(A):
raise ValueError("Sparse input not supported.")
return list(transpose(A)[0])
def slice_column(A, idx):
"""Slice columns from 2-D list A. Handles sparse data"""
if isinstance(idx, int):
if issparse(A):
return [a.get(idx, A.dtype(0)) for a in A]
else:
return [a[idx] for a in A]
if isinstance(idx, (list, tuple)):
if issparse(A):
A_ = [{k: v for k, v in a.items() if k in idx} for a in A]
return sparse_list(A_, size=A.size, dtype=A.dtype)
else:
return [[a[i] for i in idx] for a in A]
def accumu(lis):
"""Cumulative sum of list"""
total = 0
for x in lis:
total += x
yield total | 0.723798 | 0.71867 |
import pickle
import time
from warnings import warn
from distutils.version import LooseVersion
CONTAINERS = (list, dict, tuple)
TYPES = (int, float, str, bool, type)
MIN_VERSION = "0.20"
def check_types(obj, containers=CONTAINERS, types=TYPES):
"""
Checks if input object is an allowed type. Objects can be
acceptable containers or acceptable types themselves.
Containers are checked recursively to ensure all contained
types are valid. If object is a `scikit_endpoint` type, its
attributes are all recursively checked.
"""
if isinstance(obj, containers):
if isinstance(obj, (list, tuple)):
for ob in obj:
check_types(ob)
else:
for k, v in obj.items():
check_types(k)
check_types(v)
elif isinstance(obj, types):
pass
elif "scikit_endpoint" in str(type(obj)):
for attr in vars(obj):
check_types(getattr(obj, attr))
elif obj is None:
pass
else:
raise ValueError("Object contains invalid type: {}".format(type(obj)))
def check_version(estimator, min_version=None):
"""Checks the version of the scikit-learn estimator"""
warning_str = (
"Estimators fitted with sklearn version < {} are not guaranteed to work".format(
MIN_VERSION
)
)
try:
version_ = estimator.__getstate__()["_sklearn_version"]
except: # noqa E722
warn(warning_str)
return
if (min_version is not None) and (
LooseVersion(version_) < LooseVersion(min_version)
):
raise Exception(
"The sklearn version is too low for this estimator; must be >= {}".format(
min_version
)
)
elif LooseVersion(version_) < LooseVersion(MIN_VERSION):
warn(warning_str)
def convert_type(dtype):
"""Converts a datatype to its pure python equivalent"""
val = dtype(0)
if hasattr(val, "item"):
return type(val.item())
else:
return dtype
def check_array(X, handle_sparse="error"):
"""
Checks if array is compatible for prediction with
`scikit_endpoint` classes. Input 'X' should be a non-empty
`list` or `sparse_list`. If 'X' is sparse, flexible
sparse handling is applied, allowing sparse by default,
or optionally erroring on sparse input.
"""
if issparse(X):
if handle_sparse == "allow":
return X
elif handle_sparse == "error":
raise ValueError("Sparse input is not supported " "for this estimator")
else:
raise ValueError(
"Invalid value for 'handle_sparse' "
"input. Acceptable values are 'allow' or 'error'"
)
if not isinstance(X, list):
raise TypeError("Input 'X' must be a list")
if len(X) == 0:
return ValueError("Input 'X' must not be empty")
return X
def shape(X):
"""
Checks the shape of input list. Similar to
numpy `ndarray.shape()`. Handles `list` or
`sparse_list` input.
"""
if ndim(X) == 1:
return (len(X),)
elif ndim(X) == 2:
if issparse(X):
return (len(X), X.size)
else:
return (len(X), len(X[0]))
def ndim(X):
"""Computes the dimension of input list"""
if isinstance(X[0], (list, dict)):
return 2
else:
return 1
def tosparse(A):
"""Converts input dense list to a `sparse_list`"""
return sparse_list(A)
def todense(A):
"""Converts input `sparse_list` to a dense list"""
return A.todense()
def issparse(A):
"""Checks if input list is a `sparse_list`"""
return isinstance(A, sparse_list)
class sparse_list(list):
"""
Pure python implementation of a 2-D sparse data structure.
The data structure is a list of dictionaries. Each dictionary
represents a 'row' of data. The dictionary keys correspond to the
indices of 'columns' and the dictionary values correspond to the
data value associated with that index. Missing keys are assumed
to have values of 0.
Args:
A (list): 2-D list of lists or list of dicts
size (int): Number of 'columns' of the data structure
dtype (type): Data type of data values
Examples:
>>> A = [[0,1,0], [0,1,1]]
>>> print(sparse_list(A))
... [{1:1}, {2:1, 3:1}]
>>>
>>> B = [{3:0.5}, {1:0.9, 10:0.2}]
>>> print(sparse_list(B, size=11, dtype=float))
... [{3:0.5}, {1:0.9, 10:0.2}]
"""
def __init__(self, A, size=None, dtype=None):
if isinstance(A[0], dict):
self.dtype = float if dtype is None else dtype
self.size = size
for row in A:
self.append(row)
else:
A = check_array(A)
self.size = shape(A)[1]
self.dtype = type(A[0][0])
for row in A:
self.append(
dict([(i, row[i]) for i in range(self.size) if row[i] != 0])
)
def todense(self):
"""Converts `sparse_list` instance to a dense list"""
A_dense = []
zero_val = self.dtype(0)
for row in self:
A_dense.append([row.get(i, zero_val) for i in range(self.size)])
return A_dense
def performance_comparison(sklearn_estimator, pure_sklearn_estimator, X):
"""
Profile performance characteristics between sklearn estimator and
corresponding pure-predict estimator.
Args:
sklearn_estimator (object)
pure_sklearn_estimator (object)
X (numpy ndarray): features for prediction
"""
# -- profile pickled object size: sklearn vs pure-predict
pickled = pickle.dumps(sklearn_estimator)
pickled_ = pickle.dumps(pure_sklearn_estimator)
print("Pickle Size sklearn: {}".format(len(pickled)))
print("Pickle Size pure-predict: {}".format(len(pickled_)))
print("Difference: {}".format(len(pickled_) / float(len(pickled))))
# -- profile unpickle time: sklearn vs pure-predict
start = time.time()
_ = pickle.loads(pickled)
pickle_t = time.time() - start
print("Unpickle time sklearn: {}".format(pickle_t))
start = time.time()
_ = pickle.loads(pickled_)
pickle_t_ = time.time() - start
print("Unpickle time pure-predict: {}".format(pickle_t_))
print("Difference: {}".format(pickle_t_ / pickle_t))
# -- profile single record predict latency: sklearn vs pure-predict
X_pred = X[:1]
X_pred_ = X_pred if isinstance(X_pred, list) else X_pred.tolist()
start = time.time()
_ = sklearn_estimator.predict(X_pred)
pred_t = time.time() - start
print("Predict 1 record sklearn: {}".format(pred_t))
start = time.time()
_ = pure_sklearn_estimator.predict(X_pred_)
pred_t_ = time.time() - start
print("Predict 1 record pure-predict: {}".format(pred_t_))
print("Difference: {}".format(pred_t_ / pred_t)) | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/utils.py | utils.py | import pickle
import time
from warnings import warn
from distutils.version import LooseVersion
CONTAINERS = (list, dict, tuple)
TYPES = (int, float, str, bool, type)
MIN_VERSION = "0.20"
def check_types(obj, containers=CONTAINERS, types=TYPES):
"""
Checks if input object is an allowed type. Objects can be
acceptable containers or acceptable types themselves.
Containers are checked recursively to ensure all contained
types are valid. If object is a `scikit_endpoint` type, its
attributes are all recursively checked.
"""
if isinstance(obj, containers):
if isinstance(obj, (list, tuple)):
for ob in obj:
check_types(ob)
else:
for k, v in obj.items():
check_types(k)
check_types(v)
elif isinstance(obj, types):
pass
elif "scikit_endpoint" in str(type(obj)):
for attr in vars(obj):
check_types(getattr(obj, attr))
elif obj is None:
pass
else:
raise ValueError("Object contains invalid type: {}".format(type(obj)))
def check_version(estimator, min_version=None):
"""Checks the version of the scikit-learn estimator"""
warning_str = (
"Estimators fitted with sklearn version < {} are not guaranteed to work".format(
MIN_VERSION
)
)
try:
version_ = estimator.__getstate__()["_sklearn_version"]
except: # noqa E722
warn(warning_str)
return
if (min_version is not None) and (
LooseVersion(version_) < LooseVersion(min_version)
):
raise Exception(
"The sklearn version is too low for this estimator; must be >= {}".format(
min_version
)
)
elif LooseVersion(version_) < LooseVersion(MIN_VERSION):
warn(warning_str)
def convert_type(dtype):
"""Converts a datatype to its pure python equivalent"""
val = dtype(0)
if hasattr(val, "item"):
return type(val.item())
else:
return dtype
def check_array(X, handle_sparse="error"):
"""
Checks if array is compatible for prediction with
`scikit_endpoint` classes. Input 'X' should be a non-empty
`list` or `sparse_list`. If 'X' is sparse, flexible
sparse handling is applied, allowing sparse by default,
or optionally erroring on sparse input.
"""
if issparse(X):
if handle_sparse == "allow":
return X
elif handle_sparse == "error":
raise ValueError("Sparse input is not supported " "for this estimator")
else:
raise ValueError(
"Invalid value for 'handle_sparse' "
"input. Acceptable values are 'allow' or 'error'"
)
if not isinstance(X, list):
raise TypeError("Input 'X' must be a list")
if len(X) == 0:
return ValueError("Input 'X' must not be empty")
return X
def shape(X):
"""
Checks the shape of input list. Similar to
numpy `ndarray.shape()`. Handles `list` or
`sparse_list` input.
"""
if ndim(X) == 1:
return (len(X),)
elif ndim(X) == 2:
if issparse(X):
return (len(X), X.size)
else:
return (len(X), len(X[0]))
def ndim(X):
"""Computes the dimension of input list"""
if isinstance(X[0], (list, dict)):
return 2
else:
return 1
def tosparse(A):
"""Converts input dense list to a `sparse_list`"""
return sparse_list(A)
def todense(A):
"""Converts input `sparse_list` to a dense list"""
return A.todense()
def issparse(A):
"""Checks if input list is a `sparse_list`"""
return isinstance(A, sparse_list)
class sparse_list(list):
"""
Pure python implementation of a 2-D sparse data structure.
The data structure is a list of dictionaries. Each dictionary
represents a 'row' of data. The dictionary keys correspond to the
indices of 'columns' and the dictionary values correspond to the
data value associated with that index. Missing keys are assumed
to have values of 0.
Args:
A (list): 2-D list of lists or list of dicts
size (int): Number of 'columns' of the data structure
dtype (type): Data type of data values
Examples:
>>> A = [[0,1,0], [0,1,1]]
>>> print(sparse_list(A))
... [{1:1}, {2:1, 3:1}]
>>>
>>> B = [{3:0.5}, {1:0.9, 10:0.2}]
>>> print(sparse_list(B, size=11, dtype=float))
... [{3:0.5}, {1:0.9, 10:0.2}]
"""
def __init__(self, A, size=None, dtype=None):
if isinstance(A[0], dict):
self.dtype = float if dtype is None else dtype
self.size = size
for row in A:
self.append(row)
else:
A = check_array(A)
self.size = shape(A)[1]
self.dtype = type(A[0][0])
for row in A:
self.append(
dict([(i, row[i]) for i in range(self.size) if row[i] != 0])
)
def todense(self):
"""Converts `sparse_list` instance to a dense list"""
A_dense = []
zero_val = self.dtype(0)
for row in self:
A_dense.append([row.get(i, zero_val) for i in range(self.size)])
return A_dense
def performance_comparison(sklearn_estimator, pure_sklearn_estimator, X):
"""
Profile performance characteristics between sklearn estimator and
corresponding pure-predict estimator.
Args:
sklearn_estimator (object)
pure_sklearn_estimator (object)
X (numpy ndarray): features for prediction
"""
# -- profile pickled object size: sklearn vs pure-predict
pickled = pickle.dumps(sklearn_estimator)
pickled_ = pickle.dumps(pure_sklearn_estimator)
print("Pickle Size sklearn: {}".format(len(pickled)))
print("Pickle Size pure-predict: {}".format(len(pickled_)))
print("Difference: {}".format(len(pickled_) / float(len(pickled))))
# -- profile unpickle time: sklearn vs pure-predict
start = time.time()
_ = pickle.loads(pickled)
pickle_t = time.time() - start
print("Unpickle time sklearn: {}".format(pickle_t))
start = time.time()
_ = pickle.loads(pickled_)
pickle_t_ = time.time() - start
print("Unpickle time pure-predict: {}".format(pickle_t_))
print("Difference: {}".format(pickle_t_ / pickle_t))
# -- profile single record predict latency: sklearn vs pure-predict
X_pred = X[:1]
X_pred_ = X_pred if isinstance(X_pred, list) else X_pred.tolist()
start = time.time()
_ = sklearn_estimator.predict(X_pred)
pred_t = time.time() - start
print("Predict 1 record sklearn: {}".format(pred_t))
start = time.time()
_ = pure_sklearn_estimator.predict(X_pred_)
pred_t_ = time.time() - start
print("Predict 1 record pure-predict: {}".format(pred_t_))
print("Difference: {}".format(pred_t_ / pred_t)) | 0.785267 | 0.296508 |
from abc import abstractmethod
from math import pi
from .base import dot, transpose, safe_log, safe_exp
from .utils import check_array, check_types, check_version
__all__ = ["GaussianNBPure", "MultinomialNBPure", "ComplementNBPure"]
class _BaseNBPure:
"""Base class for naive Bayes classifiers"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
indices = map(lambda a: a.index(max(a)), jll)
return [self.classes_[i] for i in indices]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
log_prob_x = list(map(lambda a: safe_log(sum(map(safe_exp, a))), jll))
return [
list(map(lambda a: a - log_prob_x[index], jll[index]))
for index in range(len(jll))
]
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
"""
return [list(map(safe_exp, a)) for a in self.predict_log_proba(X)]
class GaussianNBPure(_BaseNBPure):
"""
Pure python implementation of `GaussianNB`.
Args:
estimator (sklearn estimator): fitted `GaussianNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_prior_ = estimator.class_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.var_ = estimator.var_.tolist()
self.theta_ = estimator.theta_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
joint_log_likelihood = []
for i in range(len(self.classes_)):
jointi = safe_log(self.class_prior_[i])
n_ij = -0.5 * sum(list(map(lambda x: safe_log(2.0 * pi * x), self.var_[i])))
jll = [
list(
map(
lambda b: ((a[b] - self.theta_[i][b]) ** 2) / self.var_[i][b],
range(len(a)),
)
)
for a in X
]
jll = list(map(lambda a: 0.5 * sum(a), jll))
jll = [(n_ij - a) + jointi for a in jll]
joint_log_likelihood.append(jll)
return transpose(joint_log_likelihood)
class MultinomialNBPure(_BaseNBPure):
"""
Pure python implementation of `MultinomialNB`.
Args:
estimator (sklearn estimator): fitted `MultinomialNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return [self._jll(a) for a in X]
def _jll(self, x):
"""Calculate the joint log likelihood for one sample"""
dot_prod = dot(x, self.feature_log_prob_)
return [
(dot_prod[index] + self.class_log_prior_[index])
for index in range(len(self.classes_))
]
class ComplementNBPure(_BaseNBPure):
"""
Pure python implementation of `ComplementNB`.
Args:
estimator (sklearn estimator): fitted `ComplementNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X"""
jll = [dot(x, self.feature_log_prob_) for x in X]
if len(self.classes_) == 1:
jll = [[x[0] + self.class_log_prior_[0]] for x in jll]
return jll | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/naive_bayes.py | naive_bayes.py | from abc import abstractmethod
from math import pi
from .base import dot, transpose, safe_log, safe_exp
from .utils import check_array, check_types, check_version
__all__ = ["GaussianNBPure", "MultinomialNBPure", "ComplementNBPure"]
class _BaseNBPure:
"""Base class for naive Bayes classifiers"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
indices = map(lambda a: a.index(max(a)), jll)
return [self.classes_[i] for i in indices]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
"""
X = check_array(X, handle_sparse="error")
jll = self._joint_log_likelihood(X)
log_prob_x = list(map(lambda a: safe_log(sum(map(safe_exp, a))), jll))
return [
list(map(lambda a: a - log_prob_x[index], jll[index]))
for index in range(len(jll))
]
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
"""
return [list(map(safe_exp, a)) for a in self.predict_log_proba(X)]
class GaussianNBPure(_BaseNBPure):
"""
Pure python implementation of `GaussianNB`.
Args:
estimator (sklearn estimator): fitted `GaussianNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_prior_ = estimator.class_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.var_ = estimator.var_.tolist()
self.theta_ = estimator.theta_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
joint_log_likelihood = []
for i in range(len(self.classes_)):
jointi = safe_log(self.class_prior_[i])
n_ij = -0.5 * sum(list(map(lambda x: safe_log(2.0 * pi * x), self.var_[i])))
jll = [
list(
map(
lambda b: ((a[b] - self.theta_[i][b]) ** 2) / self.var_[i][b],
range(len(a)),
)
)
for a in X
]
jll = list(map(lambda a: 0.5 * sum(a), jll))
jll = [(n_ij - a) + jointi for a in jll]
joint_log_likelihood.append(jll)
return transpose(joint_log_likelihood)
class MultinomialNBPure(_BaseNBPure):
"""
Pure python implementation of `MultinomialNB`.
Args:
estimator (sklearn estimator): fitted `MultinomialNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return [self._jll(a) for a in X]
def _jll(self, x):
"""Calculate the joint log likelihood for one sample"""
dot_prod = dot(x, self.feature_log_prob_)
return [
(dot_prod[index] + self.class_log_prior_[index])
for index in range(len(self.classes_))
]
class ComplementNBPure(_BaseNBPure):
"""
Pure python implementation of `ComplementNB`.
Args:
estimator (sklearn estimator): fitted `ComplementNB` object
"""
def __init__(self, estimator):
check_version(estimator)
self.class_log_prior_ = estimator.class_log_prior_.tolist()
self.classes_ = estimator.classes_.tolist()
self.feature_log_prob_ = estimator.feature_log_prob_.tolist()
check_types(self)
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X"""
jll = [dot(x, self.feature_log_prob_) for x in X]
if len(self.classes_) == 1:
jll = [[x[0] + self.class_log_prior_[0]] for x in jll]
return jll | 0.931952 | 0.583856 |
from operator import add
from ..utils import check_array, ndim, shape, check_types
from ..base import dot, expit, ravel
class LinearClassifierMixinPure:
"""Mixin for linear classifiers"""
def __init__(self, estimator):
self.coef_ = estimator.coef_.tolist()
self.classes_ = estimator.classes_.tolist()
if hasattr(estimator, "intercept_"):
if isinstance(estimator.intercept_, float):
self.intercept_ = [estimator.intercept_] * len(self.classes_)
else:
self.intercept_ = estimator.intercept_.tolist()
if hasattr(estimator, "multi_class"):
self.multi_class = estimator.multi_class
if hasattr(estimator, "solver"):
self.solver = estimator.solver
if hasattr(estimator, "loss"):
self.loss = estimator.loss
check_types(self)
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
"""
X = check_array(X, handle_sparse="allow")
n_features = shape(self.coef_)[1]
if shape(X)[1] != n_features:
raise ValueError(
"X has %d features per sample; expecting %d" % (shape(X)[1], n_features)
)
scores = [
list(map(add, dot(X[i], self.coef_), self.intercept_))
for i in range(len(X))
]
return ravel(scores) if shape(scores)[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X"""
scores = self.decision_function(X)
if len(shape(scores)) == 1:
indices = map(lambda x: int(x > 0), scores)
else:
indices = map(lambda a: a.index(max(a)), scores)
return [self.classes_[i] for i in indices]
def _predict_proba_lr(self, X):
"""
Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
if ndim(prob) == 1:
return [[1 - a, a] for a in map(expit, prob)]
else:
prob = [list(map(expit, a)) for a in prob]
return [
list(map(lambda b: (b / sum(a)) if sum(a) != 0 else float("NaN"), a))
for a in prob
] | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/linear_model/_base.py | _base.py | from operator import add
from ..utils import check_array, ndim, shape, check_types
from ..base import dot, expit, ravel
class LinearClassifierMixinPure:
"""Mixin for linear classifiers"""
def __init__(self, estimator):
self.coef_ = estimator.coef_.tolist()
self.classes_ = estimator.classes_.tolist()
if hasattr(estimator, "intercept_"):
if isinstance(estimator.intercept_, float):
self.intercept_ = [estimator.intercept_] * len(self.classes_)
else:
self.intercept_ = estimator.intercept_.tolist()
if hasattr(estimator, "multi_class"):
self.multi_class = estimator.multi_class
if hasattr(estimator, "solver"):
self.solver = estimator.solver
if hasattr(estimator, "loss"):
self.loss = estimator.loss
check_types(self)
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is the signed distance of that
sample to the hyperplane.
"""
X = check_array(X, handle_sparse="allow")
n_features = shape(self.coef_)[1]
if shape(X)[1] != n_features:
raise ValueError(
"X has %d features per sample; expecting %d" % (shape(X)[1], n_features)
)
scores = [
list(map(add, dot(X[i], self.coef_), self.intercept_))
for i in range(len(X))
]
return ravel(scores) if shape(scores)[1] == 1 else scores
def predict(self, X):
"""Predict class labels for samples in X"""
scores = self.decision_function(X)
if len(shape(scores)) == 1:
indices = map(lambda x: int(x > 0), scores)
else:
indices = map(lambda a: a.index(max(a)), scores)
return [self.classes_[i] for i in indices]
def _predict_proba_lr(self, X):
"""
Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
if ndim(prob) == 1:
return [[1 - a, a] for a in map(expit, prob)]
else:
prob = [list(map(expit, a)) for a in prob]
return [
list(map(lambda b: (b / sum(a)) if sum(a) != 0 else float("NaN"), a))
for a in prob
] | 0.772702 | 0.315413 |
import re
import unicodedata
from functools import partial
from math import isnan
import warnings
from ._hash import _FeatureHasherPure
from ..map import convert_estimator
from ..preprocessing import normalize_pure
from ..utils import (
convert_type,
sparse_list,
shape,
check_array,
check_types,
check_version,
)
from ..base import safe_log
__all__ = [
"CountVectorizerPure",
"TfidfTransformerPure",
"TfidfVectorizerPure",
"HashingVectorizerPure",
]
def _preprocess(doc, accent_function=None, lower=False):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""
Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
raise ValueError(
"English stopwords not supported. Pass explicitly as a custom stopwords list."
)
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixinPure:
"""Provides common code for text vectorizers (tokenization logic)"""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if not isinstance(doc, str) and isnan(doc):
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words." % sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""
Return a callable that handles preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
if self.input in ["file", "filename"]:
self._validate_custom_analyzer()
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
class CountVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `CountVectorizer`.
Args:
estimator (sklearn estimator): fitted `CountVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.binary = estimator.binary
self.vocabulary_ = {k: int(v) for k, v in estimator.vocabulary_.items()}
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
check_types(self)
def _count_vocab(self, raw_documents):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
vocabulary = self.vocabulary_
analyze = self.build_analyzer()
data = []
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
continue
data.append(feature_counter)
X = sparse_list(data, size=len(vocabulary), dtype=self.dtype)
return vocabulary, X
def transform(self, raw_documents):
"""Transform documents to document-term matrix"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
_, X = self._count_vocab(raw_documents)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
return X
class TfidfVectorizerPure(CountVectorizerPure):
"""
Pure python implementation of `TfidfVectorizer`.
Args:
estimator (sklearn estimator): fitted `TfidfVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self._tfidf = convert_estimator(estimator._tfidf)
super().__init__(estimator)
def transform(self, raw_documents):
"""Transform documents to document-term matrix."""
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
class TfidfTransformerPure:
"""
Pure python implementation of `TfidfTransformer`.
Args:
estimator (sklearn estimator): fitted `TfidfTransformer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.use_idf = estimator.use_idf
self.smooth_idf = estimator.smooth_idf
self.sublinear_tf = estimator.sublinear_tf
self.idf_ = estimator.idf_.tolist()
self.expected_n_features_ = estimator._idf_diag.shape[0]
check_types(self)
def transform(self, X, copy=True):
X = check_array(X, handle_sparse="allow")
n_samples, n_features = shape(X)
if self.sublinear_tf:
for index in range(len(X)):
X[index] = safe_log(X[index]) + 1
if self.use_idf:
if n_features != self.expected_n_features_:
raise ValueError(
"Input has n_features=%d while the model"
" has been trained with n_features=%d"
% (n_features, self.expected_n_features_)
)
for index in range(len(X)):
for k, v in X[index].items():
X[index][k] = v * self.idf_[k]
if self.norm:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
class HashingVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `HashingVectorizer`.
Args:
estimator (sklearn estimator): fitted `HashingVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.norm = estimator.norm
self.binary = estimator.binary
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
self.n_features = estimator.n_features
self.alternate_sign = estimator.alternate_sign
check_types(self)
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
if self.norm is not None:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
def _get_hasher(self):
return _FeatureHasherPure(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
) | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/feature_extraction/text.py | text.py | import re
import unicodedata
from functools import partial
from math import isnan
import warnings
from ._hash import _FeatureHasherPure
from ..map import convert_estimator
from ..preprocessing import normalize_pure
from ..utils import (
convert_type,
sparse_list,
shape,
check_array,
check_types,
check_version,
)
from ..base import safe_log
__all__ = [
"CountVectorizerPure",
"TfidfTransformerPure",
"TfidfVectorizerPure",
"HashingVectorizerPure",
]
def _preprocess(doc, accent_function=None, lower=False):
"""
Chain together an optional series of text preprocessing steps to
apply to a document.
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""
Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
raise ValueError(
"English stopwords not supported. Pass explicitly as a custom stopwords list."
)
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixinPure:
"""Provides common code for text vectorizers (tokenization logic)"""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if not isinstance(doc, str) and isnan(doc):
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words." % sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""
Return a callable that handles preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
if self.input in ["file", "filename"]:
self._validate_custom_analyzer()
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
class CountVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `CountVectorizer`.
Args:
estimator (sklearn estimator): fitted `CountVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.binary = estimator.binary
self.vocabulary_ = {k: int(v) for k, v in estimator.vocabulary_.items()}
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
check_types(self)
def _count_vocab(self, raw_documents):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
vocabulary = self.vocabulary_
analyze = self.build_analyzer()
data = []
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
continue
data.append(feature_counter)
X = sparse_list(data, size=len(vocabulary), dtype=self.dtype)
return vocabulary, X
def transform(self, raw_documents):
"""Transform documents to document-term matrix"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
_, X = self._count_vocab(raw_documents)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
return X
class TfidfVectorizerPure(CountVectorizerPure):
"""
Pure python implementation of `TfidfVectorizer`.
Args:
estimator (sklearn estimator): fitted `TfidfVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self._tfidf = convert_estimator(estimator._tfidf)
super().__init__(estimator)
def transform(self, raw_documents):
"""Transform documents to document-term matrix."""
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
class TfidfTransformerPure:
"""
Pure python implementation of `TfidfTransformer`.
Args:
estimator (sklearn estimator): fitted `TfidfTransformer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.use_idf = estimator.use_idf
self.smooth_idf = estimator.smooth_idf
self.sublinear_tf = estimator.sublinear_tf
self.idf_ = estimator.idf_.tolist()
self.expected_n_features_ = estimator._idf_diag.shape[0]
check_types(self)
def transform(self, X, copy=True):
X = check_array(X, handle_sparse="allow")
n_samples, n_features = shape(X)
if self.sublinear_tf:
for index in range(len(X)):
X[index] = safe_log(X[index]) + 1
if self.use_idf:
if n_features != self.expected_n_features_:
raise ValueError(
"Input has n_features=%d while the model"
" has been trained with n_features=%d"
% (n_features, self.expected_n_features_)
)
for index in range(len(X)):
for k, v in X[index].items():
X[index][k] = v * self.idf_[k]
if self.norm:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
class HashingVectorizerPure(_VectorizerMixinPure):
"""
Pure python implementation of `HashingVectorizer`.
Args:
estimator (sklearn estimator): fitted `HashingVectorizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.norm = estimator.norm
self.binary = estimator.binary
self.analyzer = estimator.analyzer
self.preprocessor = estimator.preprocessor
self.tokenizer = estimator.tokenizer
self.stop_words = estimator.stop_words
self.token_pattern = estimator.token_pattern
self.ngram_range = estimator.ngram_range
self.strip_accents = estimator.strip_accents
self.decode_error = estimator.decode_error
self.encoding = estimator.encoding
self.lowercase = estimator.lowercase
self.input = estimator.input
self.n_features = estimator.n_features
self.alternate_sign = estimator.alternate_sign
check_types(self)
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X = [dict.fromkeys(x, 1) for x in X]
if self.norm is not None:
X = normalize_pure(X, norm=self.norm, copy=False)
return X
def _get_hasher(self):
return _FeatureHasherPure(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
) | 0.586641 | 0.316316 |
import numbers
from ..utils import check_types, sparse_list
MAX_INT = 2147483647
def _xrange(a, b, c):
return range(a, b, c)
def _xencode(x):
if isinstance(x, (bytes, bytearray)):
return x
else:
return x.encode()
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
def _hash(key, seed=0x0):
"""Implements 32bit murmur3 hash"""
key = bytearray(_xencode(key))
def fmix(h):
h ^= h >> 16
h = (h * 0x85EBCA6B) & 0xFFFFFFFF
h ^= h >> 13
h = (h * 0xC2B2AE35) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len(key)
nblocks = int(length / 4)
h1 = seed
c1 = 0xCC9E2D51
c2 = 0x1B873593
# body
for block_start in _xrange(0, nblocks * 4, 4):
# ??? big endian?
k1 = (
key[block_start + 3] << 24
| key[block_start + 2] << 16
| key[block_start + 1] << 8
| key[block_start + 0]
)
k1 = (c1 * k1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (c2 * k1) & 0xFFFFFFFF
h1 ^= k1
h1 = (h1 << 13 | h1 >> 19) & 0xFFFFFFFF # inlined ROTL32
h1 = (h1 * 5 + 0xE6546B64) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[tail_index + 2] << 16
if tail_size >= 2:
k1 ^= key[tail_index + 1] << 8
if tail_size >= 1:
k1 ^= key[tail_index + 0]
if tail_size > 0:
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
# finalization
unsigned_val = fmix(h1 ^ length)
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -((unsigned_val ^ 0xFFFFFFFF) + 1)
def _hashing_transform(raw_X, n_features, dtype, alternate_sign=1, seed=0):
"""Guts of FeatureHasher.transform"""
assert n_features > 0
X = []
for x in raw_X:
row = {}
for f, v in x:
if isinstance(v, str):
f = "%s%s%s" % (f, "=", v)
value = 1
else:
value = v
if value == 0:
continue
h = _hash(f, seed)
index = abs(h) % n_features
if alternate_sign:
value *= (h >= 0) * 2 - 1
row[index] = value
X.append(row)
return sparse_list(X, size=n_features, dtype=dtype)
class _FeatureHasherPure:
"""Pure python implementation of `FeatureHasher`"""
def __init__(
self, n_features=(2**20), input_type="dict", dtype=float, alternate_sign=True
):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
check_types(self)
@staticmethod
def _validate_params(n_features, input_type):
if not isinstance(n_features, numbers.Integral):
raise TypeError(
"n_features must be integral, got %r (%s)."
% (n_features, type(n_features))
)
elif n_features < 1 or n_features >= MAX_INT + 1:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError(
"input_type must be 'dict', 'pair' or 'string', got %r." % input_type
)
def transform(self, raw_X):
"""Transform a sequence of instances to a `sparse_list`"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
return _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
) | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/feature_extraction/_hash.py | _hash.py | import numbers
from ..utils import check_types, sparse_list
MAX_INT = 2147483647
def _xrange(a, b, c):
return range(a, b, c)
def _xencode(x):
if isinstance(x, (bytes, bytearray)):
return x
else:
return x.encode()
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
def _hash(key, seed=0x0):
"""Implements 32bit murmur3 hash"""
key = bytearray(_xencode(key))
def fmix(h):
h ^= h >> 16
h = (h * 0x85EBCA6B) & 0xFFFFFFFF
h ^= h >> 13
h = (h * 0xC2B2AE35) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len(key)
nblocks = int(length / 4)
h1 = seed
c1 = 0xCC9E2D51
c2 = 0x1B873593
# body
for block_start in _xrange(0, nblocks * 4, 4):
# ??? big endian?
k1 = (
key[block_start + 3] << 24
| key[block_start + 2] << 16
| key[block_start + 1] << 8
| key[block_start + 0]
)
k1 = (c1 * k1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (c2 * k1) & 0xFFFFFFFF
h1 ^= k1
h1 = (h1 << 13 | h1 >> 19) & 0xFFFFFFFF # inlined ROTL32
h1 = (h1 * 5 + 0xE6546B64) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[tail_index + 2] << 16
if tail_size >= 2:
k1 ^= key[tail_index + 1] << 8
if tail_size >= 1:
k1 ^= key[tail_index + 0]
if tail_size > 0:
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = (k1 << 15 | k1 >> 17) & 0xFFFFFFFF # inlined ROTL32
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
# finalization
unsigned_val = fmix(h1 ^ length)
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -((unsigned_val ^ 0xFFFFFFFF) + 1)
def _hashing_transform(raw_X, n_features, dtype, alternate_sign=1, seed=0):
"""Guts of FeatureHasher.transform"""
assert n_features > 0
X = []
for x in raw_X:
row = {}
for f, v in x:
if isinstance(v, str):
f = "%s%s%s" % (f, "=", v)
value = 1
else:
value = v
if value == 0:
continue
h = _hash(f, seed)
index = abs(h) % n_features
if alternate_sign:
value *= (h >= 0) * 2 - 1
row[index] = value
X.append(row)
return sparse_list(X, size=n_features, dtype=dtype)
class _FeatureHasherPure:
"""Pure python implementation of `FeatureHasher`"""
def __init__(
self, n_features=(2**20), input_type="dict", dtype=float, alternate_sign=True
):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
check_types(self)
@staticmethod
def _validate_params(n_features, input_type):
if not isinstance(n_features, numbers.Integral):
raise TypeError(
"n_features must be integral, got %r (%s)."
% (n_features, type(n_features))
)
elif n_features < 1 or n_features >= MAX_INT + 1:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError(
"input_type must be 'dict', 'pair' or 'string', got %r." % input_type
)
def transform(self, raw_X):
"""Transform a sequence of instances to a `sparse_list`"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
return _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
) | 0.605566 | 0.343645 |
from ._label import _encode, _encode_check_unknown
from ..base import accumu, apply_2d
from ..utils import (
check_types,
check_array,
shape,
sparse_list,
convert_type,
check_version,
)
class _BaseEncoderPure:
"""
Base class for encoders that includes the code to categorize and
transform the input features.
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.categories_ = [a.tolist() for a in estimator.categories_]
if hasattr(estimator, "sparse"):
self.sparse = estimator.sparse
if hasattr(estimator, "drop") and (estimator.drop is not None):
raise ValueError("Encoder does not handle 'drop' functionality")
if hasattr(estimator, "handle_unknown"):
self.handle_unknown = estimator.handle_unknown
check_types(self)
def _check_X(self, X):
"""Perform custom check_array"""
X = check_array(X)
n_samples, n_features = shape(X)
X_columns = []
for i in range(n_features):
Xi = self._get_feature(X, feature_idx=i)
X_columns.append(Xi)
return X_columns, n_samples, n_features
def _get_feature(self, X, feature_idx):
return [x[feature_idx] for x in X]
def _transform(self, X, handle_unknown="error"):
X_list, n_samples, n_features = self._check_X(X)
X_int = [[0] * n_features] * n_samples
X_mask = [[True] * n_features] * n_samples
if n_features != len(self.categories_):
raise ValueError(
"The number of features in X is different to the number of "
"features of the fitted data. The fitted data had {} features "
"and the X has {} features.".format(
len(
self.categories_,
),
n_features,
)
)
for i in range(n_features):
Xi = X_list[i]
diff, valid_mask = _encode_check_unknown(
Xi, self.categories_[i], return_mask=True
)
if not (sum(valid_mask) == len(valid_mask)):
if handle_unknown == "error":
msg = (
"Found unknown categories {0} in column {1}"
" during transform".format(diff, i)
)
raise ValueError(msg)
else:
X_mask = [
[
valid_mask[j] if idx == i else X_mask[j][idx]
for idx in range(n_features)
]
for j in range(n_samples)
]
Xi = [
Xi[idx] if valid_mask[idx] else self.categories_[i][0]
for idx in range(len(Xi))
]
_, encoded = _encode(
Xi, self.categories_[i], encode=True, check_unknown=False
)
X_int = [
[encoded[j] if idx == i else X_int[j][idx] for idx in range(n_features)]
for j in range(n_samples)
]
return X_int, X_mask
class OrdinalEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OrdinalEncoder`.
Args:
estimator (sklearn estimator): fitted `OrdinalEncoder` object
"""
def transform(self, X):
"""Transform X to ordinal codes"""
X_int, _ = self._transform(X)
return apply_2d(X_int, self.dtype)
class OneHotEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OneHotEncoder`.
Args:
estimator (sklearn estimator): fitted `OneHotEncoder` object
"""
def transform(self, X):
"""Transform X using one-hot encoding"""
X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)
n_samples, n_features = shape(X_int)
n_values = [0] + [len(cats) for cats in self.categories_]
feature_indices = list(accumu(n_values))
data = [
dict(
[
(n_values[i] + X_int[j][i], self.dtype(1))
for i in range(n_features)
if X_mask[j][i]
]
)
for j in range(n_samples)
]
out = sparse_list(data, size=feature_indices[-1], dtype=self.dtype)
if not self.sparse:
return out.todense()
else:
return out | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/preprocessing/_encoders.py | _encoders.py | from ._label import _encode, _encode_check_unknown
from ..base import accumu, apply_2d
from ..utils import (
check_types,
check_array,
shape,
sparse_list,
convert_type,
check_version,
)
class _BaseEncoderPure:
"""
Base class for encoders that includes the code to categorize and
transform the input features.
"""
def __init__(self, estimator):
check_version(estimator)
self.dtype = convert_type(estimator.dtype)
self.categories_ = [a.tolist() for a in estimator.categories_]
if hasattr(estimator, "sparse"):
self.sparse = estimator.sparse
if hasattr(estimator, "drop") and (estimator.drop is not None):
raise ValueError("Encoder does not handle 'drop' functionality")
if hasattr(estimator, "handle_unknown"):
self.handle_unknown = estimator.handle_unknown
check_types(self)
def _check_X(self, X):
"""Perform custom check_array"""
X = check_array(X)
n_samples, n_features = shape(X)
X_columns = []
for i in range(n_features):
Xi = self._get_feature(X, feature_idx=i)
X_columns.append(Xi)
return X_columns, n_samples, n_features
def _get_feature(self, X, feature_idx):
return [x[feature_idx] for x in X]
def _transform(self, X, handle_unknown="error"):
X_list, n_samples, n_features = self._check_X(X)
X_int = [[0] * n_features] * n_samples
X_mask = [[True] * n_features] * n_samples
if n_features != len(self.categories_):
raise ValueError(
"The number of features in X is different to the number of "
"features of the fitted data. The fitted data had {} features "
"and the X has {} features.".format(
len(
self.categories_,
),
n_features,
)
)
for i in range(n_features):
Xi = X_list[i]
diff, valid_mask = _encode_check_unknown(
Xi, self.categories_[i], return_mask=True
)
if not (sum(valid_mask) == len(valid_mask)):
if handle_unknown == "error":
msg = (
"Found unknown categories {0} in column {1}"
" during transform".format(diff, i)
)
raise ValueError(msg)
else:
X_mask = [
[
valid_mask[j] if idx == i else X_mask[j][idx]
for idx in range(n_features)
]
for j in range(n_samples)
]
Xi = [
Xi[idx] if valid_mask[idx] else self.categories_[i][0]
for idx in range(len(Xi))
]
_, encoded = _encode(
Xi, self.categories_[i], encode=True, check_unknown=False
)
X_int = [
[encoded[j] if idx == i else X_int[j][idx] for idx in range(n_features)]
for j in range(n_samples)
]
return X_int, X_mask
class OrdinalEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OrdinalEncoder`.
Args:
estimator (sklearn estimator): fitted `OrdinalEncoder` object
"""
def transform(self, X):
"""Transform X to ordinal codes"""
X_int, _ = self._transform(X)
return apply_2d(X_int, self.dtype)
class OneHotEncoderPure(_BaseEncoderPure):
"""
Pure python implementation of `OneHotEncoder`.
Args:
estimator (sklearn estimator): fitted `OneHotEncoder` object
"""
def transform(self, X):
"""Transform X using one-hot encoding"""
X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)
n_samples, n_features = shape(X_int)
n_values = [0] + [len(cats) for cats in self.categories_]
feature_indices = list(accumu(n_values))
data = [
dict(
[
(n_values[i] + X_int[j][i], self.dtype(1))
for i in range(n_features)
if X_mask[j][i]
]
)
for j in range(n_samples)
]
out = sparse_list(data, size=feature_indices[-1], dtype=self.dtype)
if not self.sparse:
return out.todense()
else:
return out | 0.827793 | 0.416915 |
from math import sqrt
from copy import copy as cp
from ..utils import sparse_list, issparse, check_array, check_types, check_version
from ..base import transpose, apply_2d, apply_axis_2d, matmult_same_dim
def _handle_zeros_in_scale(scale, copy=True):
"""Makes sure that whenever scale is zero, we handle it correctly"""
if isinstance(scale, (int, float)):
if scale == 0.0:
scale = 1.0
return scale
elif isinstance(scale, list):
if copy:
scale = cp(scale)
return [(1.0 if scale[i] == 0.0 else scale[i]) for i in range(len(scale))]
def _row_norms(X):
"""Row-wise (squared) Euclidean norm of X"""
X_X = matmult_same_dim(X, X)
if issparse(X):
norms = [sum(x.values()) for x in X_X]
else:
norms = apply_axis_2d(X_X, sum, axis=1)
return list(map(sqrt, norms))
def normalize_pure(X, norm="l2", axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm"""
# check input compatibility
if (axis == 0) and issparse(X):
raise ValueError("Axis 0 is not supported for sparse data")
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis not in [0, 1]:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, handle_sparse="allow")
if axis == 0:
X = transpose(X)
if issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
norms = [sum(map(abs, x.values())) for x in X]
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = [max(list(x.values()) + [0]) for x in X]
norms = _handle_zeros_in_scale(norms, copy=False)
X_sparse = [
{k: (v / float(norms[index])) for k, v in X[index].items()}
for index in range(len(X))
]
X = sparse_list(X_sparse, X.size, X.dtype)
else:
if norm == "l1":
norms = apply_axis_2d(apply_2d(X, abs), sum, axis=1)
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = apply_axis_2d(X, max, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X = [
list(map(lambda a: a / float(norms[index]), X[index]))
for index in range(len(X))
]
if axis == 0:
X = transpose(X)
if return_norm:
return X, norms
else:
return X
class NormalizerPure:
"""
Pure python implementation of `Normalizer`.
Args:
estimator (sklearn estimator): fitted `Normalizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.copy = estimator.copy
check_types(self)
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm."""
copy = copy if copy is not None else self.copy
X = check_array(X, handle_sparse="allow")
return normalize_pure(X, norm=self.norm, axis=1, copy=copy)
class StandardScalerPure:
"""
Pure python implementation of `StandardScaler`.
Args:
estimator (sklearn estimator): fitted `StandardScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.with_mean = estimator.with_mean
self.with_std = estimator.with_std
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.mean_ is None:
self.mean_ = None
else:
self.mean_ = estimator.mean_.tolist()
check_types(self)
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
if self.scale_ is not None:
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
if self.with_mean:
X = [[x[i] - self.mean_[i] for i in range(len(self.mean_))] for x in X]
if self.with_std:
X = [
[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X
]
return X
class MinMaxScalerPure:
"""
Pure python implementation of `MinMaxScaler`.
Args:
estimator (sklearn estimator): fitted `MinMaxScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.feature_range = estimator.feature_range
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.min_ is None:
self.min_ = None
else:
self.min_ = estimator.min_.tolist()
check_types(self)
def transform(self, X):
"""Scale features of X according to feature_range"""
if issparse(X):
raise TypeError(
"MinMaxScalerPure does not support sparse input. "
"Consider using MaxAbsScalerPure instead."
)
X = check_array(X)
return [
[(x[i] * self.scale_[i]) + self.min_[i] for i in range(len(self.scale_))]
for x in X
]
class MaxAbsScalerPure:
"""
Pure python implementation of `MaxAbsScaler`.
Args:
estimator (sklearn estimator): fitted `MaxAbsScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.copy = estimator.copy
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.max_abs_ is None:
self.max_abs_ = None
else:
self.max_abs_ = estimator.max_abs_.tolist()
check_types(self)
def transform(self, X):
"""Scale the data"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
X = [[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X]
return X | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/preprocessing/_data.py | _data.py | from math import sqrt
from copy import copy as cp
from ..utils import sparse_list, issparse, check_array, check_types, check_version
from ..base import transpose, apply_2d, apply_axis_2d, matmult_same_dim
def _handle_zeros_in_scale(scale, copy=True):
"""Makes sure that whenever scale is zero, we handle it correctly"""
if isinstance(scale, (int, float)):
if scale == 0.0:
scale = 1.0
return scale
elif isinstance(scale, list):
if copy:
scale = cp(scale)
return [(1.0 if scale[i] == 0.0 else scale[i]) for i in range(len(scale))]
def _row_norms(X):
"""Row-wise (squared) Euclidean norm of X"""
X_X = matmult_same_dim(X, X)
if issparse(X):
norms = [sum(x.values()) for x in X_X]
else:
norms = apply_axis_2d(X_X, sum, axis=1)
return list(map(sqrt, norms))
def normalize_pure(X, norm="l2", axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm"""
# check input compatibility
if (axis == 0) and issparse(X):
raise ValueError("Axis 0 is not supported for sparse data")
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis not in [0, 1]:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, handle_sparse="allow")
if axis == 0:
X = transpose(X)
if issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
norms = [sum(map(abs, x.values())) for x in X]
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = [max(list(x.values()) + [0]) for x in X]
norms = _handle_zeros_in_scale(norms, copy=False)
X_sparse = [
{k: (v / float(norms[index])) for k, v in X[index].items()}
for index in range(len(X))
]
X = sparse_list(X_sparse, X.size, X.dtype)
else:
if norm == "l1":
norms = apply_axis_2d(apply_2d(X, abs), sum, axis=1)
elif norm == "l2":
norms = _row_norms(X)
elif norm == "max":
norms = apply_axis_2d(X, max, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X = [
list(map(lambda a: a / float(norms[index]), X[index]))
for index in range(len(X))
]
if axis == 0:
X = transpose(X)
if return_norm:
return X, norms
else:
return X
class NormalizerPure:
"""
Pure python implementation of `Normalizer`.
Args:
estimator (sklearn estimator): fitted `Normalizer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.norm = estimator.norm
self.copy = estimator.copy
check_types(self)
def transform(self, X, copy=None):
"""Scale each non zero row of X to unit norm."""
copy = copy if copy is not None else self.copy
X = check_array(X, handle_sparse="allow")
return normalize_pure(X, norm=self.norm, axis=1, copy=copy)
class StandardScalerPure:
"""
Pure python implementation of `StandardScaler`.
Args:
estimator (sklearn estimator): fitted `StandardScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.with_mean = estimator.with_mean
self.with_std = estimator.with_std
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.mean_ is None:
self.mean_ = None
else:
self.mean_ = estimator.mean_.tolist()
check_types(self)
def transform(self, X, copy=None):
"""Perform standardization by centering and scaling"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives."
)
if self.scale_ is not None:
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
if self.with_mean:
X = [[x[i] - self.mean_[i] for i in range(len(self.mean_))] for x in X]
if self.with_std:
X = [
[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X
]
return X
class MinMaxScalerPure:
"""
Pure python implementation of `MinMaxScaler`.
Args:
estimator (sklearn estimator): fitted `MinMaxScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.feature_range = estimator.feature_range
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.min_ is None:
self.min_ = None
else:
self.min_ = estimator.min_.tolist()
check_types(self)
def transform(self, X):
"""Scale features of X according to feature_range"""
if issparse(X):
raise TypeError(
"MinMaxScalerPure does not support sparse input. "
"Consider using MaxAbsScalerPure instead."
)
X = check_array(X)
return [
[(x[i] * self.scale_[i]) + self.min_[i] for i in range(len(self.scale_))]
for x in X
]
class MaxAbsScalerPure:
"""
Pure python implementation of `MaxAbsScaler`.
Args:
estimator (sklearn estimator): fitted `MaxAbsScaler` object
"""
def __init__(self, estimator):
check_version(estimator)
self.copy = estimator.copy
if estimator.scale_ is None:
self.scale_ = None
else:
self.scale_ = estimator.scale_.tolist()
if estimator.max_abs_ is None:
self.max_abs_ = None
else:
self.max_abs_ = estimator.max_abs_.tolist()
check_types(self)
def transform(self, X):
"""Scale the data"""
X = check_array(X, handle_sparse="allow")
if issparse(X):
X_ = [{k: (v / self.scale_[k]) for k, v in x.items()} for x in X]
X = sparse_list(X_, size=X.size, dtype=X.dtype)
else:
X = [[x[i] / self.scale_[i] for i in range(len(self.scale_))] for x in X]
return X | 0.847747 | 0.490907 |
from ..base import sfmax, expit
from ..tree import DecisionTreeRegressorPure
from ..utils import check_types, check_array
MIN_VERSION = "0.82"
SUPPORTED_OBJ = ["binary:logistic", "multi:softprob"]
SUPPORTED_BOOSTER = ["gbtree"]
class XGBClassifierPure:
"""
Pure python implementation of `XGBClassifier`. Only supports 'gbtree'
booster and 'binary:logistic' or 'multi:softprob' objectives.
Args:
estimator (xgboost estimator): fitted `XGBClassifier` object
"""
def __init__(self, estimator):
if (not isinstance(estimator.objective, str)) or (
estimator.objective not in SUPPORTED_OBJ
):
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
else:
self.objective = estimator.objective
if estimator.booster not in SUPPORTED_BOOSTER:
raise ValueError("Booster: '{}' not supported".format(estimator.booster))
else:
self.booster = estimator.booster
self.classes_ = estimator.classes_.tolist()
self.n_classes_ = estimator.n_classes_
self.n_estimators = estimator.n_estimators
self.estimators_ = self._build_estimators(estimator)
check_types(self)
def _build_estimators(self, estimator):
"""Convert booster to list of pure decision tree regressors"""
if not hasattr(estimator.get_booster(), "trees_to_dataframe"):
raise Exception(
"This xgboost estimator was likely fitted with version < {} "
"which is not supported".format(MIN_VERSION)
)
tree_df = estimator.get_booster().trees_to_dataframe()
estimators_ = []
idx = 0
for est_id in range(self.n_estimators):
if self.n_classes_ == 2:
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_ = DecisionTreeRegressorPure(tree)
idx += 1
else:
est_row_ = []
for cls_id in range(self.n_classes_):
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_.append(DecisionTreeRegressorPure(tree))
idx += 1
estimators_.append(est_row_)
return estimators_
def _predict(self, X):
"""Raw sums of estimator predictions for each class for multi-class"""
preds = []
for cls_index in range(self.n_classes_):
cls_sum = [0] * len(X)
for est_index in range(self.n_estimators):
est_preds = self.estimators_[est_index][cls_index].predict(X)
cls_sum = list(map(lambda x, y: x + y, cls_sum, est_preds))
preds.append(cls_sum)
return preds
def _predict_binary(self, X):
"""Raw sums of estimator predictions for each class for binary"""
preds = [0] * len(X)
for estimator in self.estimators_:
preds = list(map(lambda x, y: x + y, preds, estimator.predict(X)))
return preds
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba]
def predict_proba(self, X):
X = check_array(X)
if self.objective == "multi:softprob":
preds = self._predict(X)
out = []
for i in range(len(X)):
out.append(sfmax([preds[j][i] for j in range(self.n_classes_)]))
elif self.objective == "binary:logistic":
preds = self._predict_binary(X)
out = list(map(expit, preds))
out = list(map(lambda x: [1 - x, x], out))
else:
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
return out | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/xgboost/_classes.py | _classes.py | from ..base import sfmax, expit
from ..tree import DecisionTreeRegressorPure
from ..utils import check_types, check_array
MIN_VERSION = "0.82"
SUPPORTED_OBJ = ["binary:logistic", "multi:softprob"]
SUPPORTED_BOOSTER = ["gbtree"]
class XGBClassifierPure:
"""
Pure python implementation of `XGBClassifier`. Only supports 'gbtree'
booster and 'binary:logistic' or 'multi:softprob' objectives.
Args:
estimator (xgboost estimator): fitted `XGBClassifier` object
"""
def __init__(self, estimator):
if (not isinstance(estimator.objective, str)) or (
estimator.objective not in SUPPORTED_OBJ
):
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
else:
self.objective = estimator.objective
if estimator.booster not in SUPPORTED_BOOSTER:
raise ValueError("Booster: '{}' not supported".format(estimator.booster))
else:
self.booster = estimator.booster
self.classes_ = estimator.classes_.tolist()
self.n_classes_ = estimator.n_classes_
self.n_estimators = estimator.n_estimators
self.estimators_ = self._build_estimators(estimator)
check_types(self)
def _build_estimators(self, estimator):
"""Convert booster to list of pure decision tree regressors"""
if not hasattr(estimator.get_booster(), "trees_to_dataframe"):
raise Exception(
"This xgboost estimator was likely fitted with version < {} "
"which is not supported".format(MIN_VERSION)
)
tree_df = estimator.get_booster().trees_to_dataframe()
estimators_ = []
idx = 0
for est_id in range(self.n_estimators):
if self.n_classes_ == 2:
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_ = DecisionTreeRegressorPure(tree)
idx += 1
else:
est_row_ = []
for cls_id in range(self.n_classes_):
tree = tree_df[tree_df["Tree"] == idx].to_dict(orient="list")
est_row_.append(DecisionTreeRegressorPure(tree))
idx += 1
estimators_.append(est_row_)
return estimators_
def _predict(self, X):
"""Raw sums of estimator predictions for each class for multi-class"""
preds = []
for cls_index in range(self.n_classes_):
cls_sum = [0] * len(X)
for est_index in range(self.n_estimators):
est_preds = self.estimators_[est_index][cls_index].predict(X)
cls_sum = list(map(lambda x, y: x + y, cls_sum, est_preds))
preds.append(cls_sum)
return preds
def _predict_binary(self, X):
"""Raw sums of estimator predictions for each class for binary"""
preds = [0] * len(X)
for estimator in self.estimators_:
preds = list(map(lambda x, y: x + y, preds, estimator.predict(X)))
return preds
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba]
def predict_proba(self, X):
X = check_array(X)
if self.objective == "multi:softprob":
preds = self._predict(X)
out = []
for i in range(len(X)):
out.append(sfmax([preds[j][i] for j in range(self.n_classes_)]))
elif self.objective == "binary:logistic":
preds = self._predict_binary(X)
out = list(map(expit, preds))
out = list(map(lambda x: [1 - x, x], out))
else:
raise ValueError(
"Objective function not supported; only {} are supported".format(
SUPPORTED_OBJ
)
)
return out | 0.685002 | 0.384392 |
import warnings
from math import isnan
from ..base import safe_log
from ..utils import check_array, check_types, check_version
class _DecisionTreeBase:
"""Decision tree base class"""
def __init__(self, estimator):
if isinstance(estimator, dict):
# sourced from xgboost booster object tree dictionary
self.threshold_ = list(
map(lambda x: -2 if isnan(x) else x, estimator["Split"])
)
self.value_ = [[a] for a in estimator["Gain"]]
self.children_left_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["Yes"],
)
)
self.children_right_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["No"],
)
)
self.feature_ = list(
map(
lambda x: -2 if x == "Leaf" else int(x.replace("f", "")[-1]),
estimator["Feature"],
)
)
else:
# sourced from sklearn decision tree
check_version(estimator)
self.children_left_ = estimator.tree_.children_left.tolist()
self.children_right_ = estimator.tree_.children_right.tolist()
self.feature_ = estimator.tree_.feature.tolist()
self.threshold_ = estimator.tree_.threshold.tolist()
self.value_ = [a[0] for a in estimator.tree_.value.tolist()]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if hasattr(estimator, "classes_") and (estimator.classes_ is not None):
self.classes_ = estimator.classes_.tolist()
else:
self.classes_ = [0, 1]
check_types(self)
def _get_leaf_node(self, x):
if isinstance(x, dict):
left_equal = lambda nd: x.get(self.feature_[nd], 0.0)
else:
left_equal = lambda nd: x[self.feature_[nd]]
found_node = False
node_id = 0
while not found_node:
if self.children_left_[node_id] == self.children_right_[node_id]:
found_node = True
else:
if left_equal(node_id) <= self.threshold_[node_id]:
node_id = self.children_left_[node_id]
else:
node_id = self.children_right_[node_id]
return node_id
class DecisionTreeClassifierPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeClassifier` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id].index(max(self.value_[node_id]))
def _get_proba_from_leaf_node(self, node_id):
return [a / sum(self.value_[node_id]) for a in self.value_[node_id]]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
preds = [self._get_pred_from_leaf_node(x) for x in leaves]
return [self.classes_[x] for x in preds]
def predict_proba(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_proba_from_leaf_node(x) for x in leaves]
def predict_log_proba(self, X):
return [list(map(safe_log, x)) for x in self.predict_proba(X)]
class DecisionTreeRegressorPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeRegressor` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id][0]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_pred_from_leaf_node(x) for x in leaves]
class ExtraTreeClassifierPure(DecisionTreeClassifierPure):
"""
Pure python implementation of `ExtraTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeClassifier` object
"""
pass
class ExtraTreeRegressorPure(DecisionTreeRegressorPure):
"""
Pure python implementation of `ExtraTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeRegressor` object
"""
pass | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/tree/_classes.py | _classes.py | import warnings
from math import isnan
from ..base import safe_log
from ..utils import check_array, check_types, check_version
class _DecisionTreeBase:
"""Decision tree base class"""
def __init__(self, estimator):
if isinstance(estimator, dict):
# sourced from xgboost booster object tree dictionary
self.threshold_ = list(
map(lambda x: -2 if isnan(x) else x, estimator["Split"])
)
self.value_ = [[a] for a in estimator["Gain"]]
self.children_left_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["Yes"],
)
)
self.children_right_ = list(
map(
lambda x: -1 if not isinstance(x, str) else int(x.split("-")[-1]),
estimator["No"],
)
)
self.feature_ = list(
map(
lambda x: -2 if x == "Leaf" else int(x.replace("f", "")[-1]),
estimator["Feature"],
)
)
else:
# sourced from sklearn decision tree
check_version(estimator)
self.children_left_ = estimator.tree_.children_left.tolist()
self.children_right_ = estimator.tree_.children_right.tolist()
self.feature_ = estimator.tree_.feature.tolist()
self.threshold_ = estimator.tree_.threshold.tolist()
self.value_ = [a[0] for a in estimator.tree_.value.tolist()]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if hasattr(estimator, "classes_") and (estimator.classes_ is not None):
self.classes_ = estimator.classes_.tolist()
else:
self.classes_ = [0, 1]
check_types(self)
def _get_leaf_node(self, x):
if isinstance(x, dict):
left_equal = lambda nd: x.get(self.feature_[nd], 0.0)
else:
left_equal = lambda nd: x[self.feature_[nd]]
found_node = False
node_id = 0
while not found_node:
if self.children_left_[node_id] == self.children_right_[node_id]:
found_node = True
else:
if left_equal(node_id) <= self.threshold_[node_id]:
node_id = self.children_left_[node_id]
else:
node_id = self.children_right_[node_id]
return node_id
class DecisionTreeClassifierPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeClassifier` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id].index(max(self.value_[node_id]))
def _get_proba_from_leaf_node(self, node_id):
return [a / sum(self.value_[node_id]) for a in self.value_[node_id]]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
preds = [self._get_pred_from_leaf_node(x) for x in leaves]
return [self.classes_[x] for x in preds]
def predict_proba(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_proba_from_leaf_node(x) for x in leaves]
def predict_log_proba(self, X):
return [list(map(safe_log, x)) for x in self.predict_proba(X)]
class DecisionTreeRegressorPure(_DecisionTreeBase):
"""
Pure python implementation of `DecisionTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `DecisionTreeRegressor` object
"""
def _get_pred_from_leaf_node(self, node_id):
return self.value_[node_id][0]
def predict(self, X):
X = check_array(X, handle_sparse="allow")
leaves = [self._get_leaf_node(x) for x in X]
return [self._get_pred_from_leaf_node(x) for x in leaves]
class ExtraTreeClassifierPure(DecisionTreeClassifierPure):
"""
Pure python implementation of `ExtraTreeClassifier`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeClassifier` object
"""
pass
class ExtraTreeRegressorPure(DecisionTreeRegressorPure):
"""
Pure python implementation of `ExtraTreeRegressor`.
Args:
estimator (sklearn estimator): fitted `ExtraTreeRegressor` object
"""
pass | 0.795142 | 0.333313 |
from ..base import transpose, apply_axis_2d, apply_2d, safe_exp, safe_log, ravel, expit
from ..utils import check_types, shape
EPS = 1.1920929e-07
def _clip(a, a_min, a_max):
if a < a_min:
return a_min
elif a > a_max:
return a_max
else:
return a
class _MultinomialDeviancePure:
"""Multinomial deviance loss function for multi-class classification"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError(
"{0:s} requires more than 2 classes.".format(self.__class__.__name__)
)
self.n_classes_ = n_classes
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
logsumexp = list(
map(safe_log, apply_axis_2d(apply_2d(raw_predictions, safe_exp), sum))
)
return [
[
safe_exp(raw_predictions[index][i] - logsumexp[index])
for i in range(self.n_classes_)
]
for index in range(len(raw_predictions))
]
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: safe_log(_clip(x, EPS, 1 - EPS))
return apply_2d(probas, func)
class _BinomialDeviancePure:
"""Binomial deviance loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
proba_1 = list(map(expit, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class]
class _ExponentialLossPure:
"""Exponential loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
func = lambda x: expit(x) * 2.0
proba_1 = list(map(func, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
raw_predictions = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
return [int(a >= 0) for a in raw_predictions]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: 0.5 * safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class] | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/ensemble/_gb_losses.py | _gb_losses.py | from ..base import transpose, apply_axis_2d, apply_2d, safe_exp, safe_log, ravel, expit
from ..utils import check_types, shape
EPS = 1.1920929e-07
def _clip(a, a_min, a_max):
if a < a_min:
return a_min
elif a > a_max:
return a_max
else:
return a
class _MultinomialDeviancePure:
"""Multinomial deviance loss function for multi-class classification"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError(
"{0:s} requires more than 2 classes.".format(self.__class__.__name__)
)
self.n_classes_ = n_classes
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
logsumexp = list(
map(safe_log, apply_axis_2d(apply_2d(raw_predictions, safe_exp), sum))
)
return [
[
safe_exp(raw_predictions[index][i] - logsumexp[index])
for i in range(self.n_classes_)
]
for index in range(len(raw_predictions))
]
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: safe_log(_clip(x, EPS, 1 - EPS))
return apply_2d(probas, func)
class _BinomialDeviancePure:
"""Binomial deviance loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
proba_1 = list(map(expit, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
proba = self._raw_prediction_to_proba(raw_predictions)
return [a.index(max(a)) for a in proba]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class]
class _ExponentialLossPure:
"""Exponential loss function for binary classification"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class(es)".format(
self.__class__.__name__, n_classes
)
)
check_types(self)
def _raw_prediction_to_proba(self, raw_predictions):
proba = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
func = lambda x: expit(x) * 2.0
proba_1 = list(map(func, proba))
proba = [[(1 - x) for x in proba_1], proba_1]
return transpose(proba)
def _raw_prediction_to_decision(self, raw_predictions):
raw_predictions = (
ravel(raw_predictions)
if shape(raw_predictions)[1] == 1
else raw_predictions
)
return [int(a >= 0) for a in raw_predictions]
def get_init_raw_predictions(self, X, estimator):
probas = estimator.predict_proba(X)
func = lambda x: _clip(x, EPS, 1 - EPS)
proba_pos_class = [func(a[1]) for a in probas]
log_func = lambda x: 0.5 * safe_log(x / (1 - x))
return [[log_func(a)] for a in proba_pos_class] | 0.618665 | 0.504089 |
from operator import add
from ._gb_losses import (
_MultinomialDeviancePure,
_BinomialDeviancePure,
_ExponentialLossPure,
)
from ..base import transpose, apply_2d, safe_log, operate_2d
from ..utils import check_version, check_types, check_array, shape
from ..map import convert_estimator
class GradientBoostingClassifierPure:
"""
Pure python implementation of `GradientBoostingClassifier`.
Args:
estimator (sklearn estimator): fitted `GradientBoostingClassifier` object
"""
def __init__(self, estimator):
check_version(estimator, "0.21.0")
self.classes_ = estimator.classes_.tolist()
self.estimators_ = []
for est_arr in estimator.estimators_:
est_arr_ = []
for est in est_arr:
est_ = convert_estimator(est)
est_arr_.append(est_)
self.estimators_.append(est_arr_)
if hasattr(estimator, "init_"):
self.init_ = convert_estimator(estimator.init_)
self.loss = estimator.loss
self.learning_rate = estimator.learning_rate
self.n_features_ = estimator.n_features_in_
if self.loss == "deviance":
self.loss_ = (
_MultinomialDeviancePure(len(self.classes_))
if len(self.classes_) > 2
else _BinomialDeviancePure(len(self.classes_))
)
elif self.loss == "exponential":
self.loss_ = _ExponentialLossPure(len(self.classes_))
else:
raise ValueError("Loss: '{}' not supported.".format(self.loss))
check_types(self)
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator"""
X = check_array(X)
if shape(X)[1] != self.n_features_:
raise ValueError(
"X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, shape(X)[1]
)
)
if self.init_ == "zero":
raw_predictions = [[0.0] * shape(X)[1]] * shape(X)[0]
else:
raw_predictions = self.loss_.get_init_raw_predictions(X, self.init_)
return raw_predictions
def _raw_predict(self, X):
init_preds = self._raw_predict_init(X)
out = []
for k in range(len(self.estimators_[0])):
column = [0] * (shape(X)[0])
for index in range(len(self.estimators_)):
preds = self.estimators_[index][k].predict(X)
column = [
column[i] + (preds[i] * self.learning_rate)
for i in range(len(preds))
]
out.append(column)
out = transpose(out)
return operate_2d(init_preds, out, add)
def predict_proba(self, X):
raw_predictions = self._raw_predict(X)
return self.loss_._raw_prediction_to_proba(raw_predictions)
def predict_log_proba(self, X):
return apply_2d(self.predict_proba(X), safe_log)
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba] | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/ensemble/_gb.py | _gb.py | from operator import add
from ._gb_losses import (
_MultinomialDeviancePure,
_BinomialDeviancePure,
_ExponentialLossPure,
)
from ..base import transpose, apply_2d, safe_log, operate_2d
from ..utils import check_version, check_types, check_array, shape
from ..map import convert_estimator
class GradientBoostingClassifierPure:
"""
Pure python implementation of `GradientBoostingClassifier`.
Args:
estimator (sklearn estimator): fitted `GradientBoostingClassifier` object
"""
def __init__(self, estimator):
check_version(estimator, "0.21.0")
self.classes_ = estimator.classes_.tolist()
self.estimators_ = []
for est_arr in estimator.estimators_:
est_arr_ = []
for est in est_arr:
est_ = convert_estimator(est)
est_arr_.append(est_)
self.estimators_.append(est_arr_)
if hasattr(estimator, "init_"):
self.init_ = convert_estimator(estimator.init_)
self.loss = estimator.loss
self.learning_rate = estimator.learning_rate
self.n_features_ = estimator.n_features_in_
if self.loss == "deviance":
self.loss_ = (
_MultinomialDeviancePure(len(self.classes_))
if len(self.classes_) > 2
else _BinomialDeviancePure(len(self.classes_))
)
elif self.loss == "exponential":
self.loss_ = _ExponentialLossPure(len(self.classes_))
else:
raise ValueError("Loss: '{}' not supported.".format(self.loss))
check_types(self)
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator"""
X = check_array(X)
if shape(X)[1] != self.n_features_:
raise ValueError(
"X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features_, shape(X)[1]
)
)
if self.init_ == "zero":
raw_predictions = [[0.0] * shape(X)[1]] * shape(X)[0]
else:
raw_predictions = self.loss_.get_init_raw_predictions(X, self.init_)
return raw_predictions
def _raw_predict(self, X):
init_preds = self._raw_predict_init(X)
out = []
for k in range(len(self.estimators_[0])):
column = [0] * (shape(X)[0])
for index in range(len(self.estimators_)):
preds = self.estimators_[index][k].predict(X)
column = [
column[i] + (preds[i] * self.learning_rate)
for i in range(len(preds))
]
out.append(column)
out = transpose(out)
return operate_2d(init_preds, out, add)
def predict_proba(self, X):
raw_predictions = self._raw_predict(X)
return self.loss_._raw_prediction_to_proba(raw_predictions)
def predict_log_proba(self, X):
return apply_2d(self.predict_proba(X), safe_log)
def predict(self, X):
proba = self.predict_proba(X)
return [self.classes_[a.index(max(a))] for a in proba] | 0.865991 | 0.385519 |
from math import isnan
from ..utils import shape, check_array, check_types, check_version
from ..base import apply_2d, apply_axis_2d
def _to_impute(val, missing_values):
if isnan(missing_values):
return isnan(val)
else:
return val == missing_values
class MissingIndicatorPure:
"""
Pure python implementation of `MissingIndicator`.
Args:
estimator (sklearn estimator): fitted `MissingIndicator` object
"""
def __init__(self, estimator):
check_version(estimator)
self.features = estimator.features
self.features_ = estimator.features_.tolist()
self._n_features = estimator._n_features
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
self.error_on_new = estimator.error_on_new
check_types(self)
def transform(self, X):
X = check_array(X)
if shape(X)[1] != self._n_features:
raise ValueError(
"X has a different number of features than during fitting."
)
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = set(features) - set(self.features_)
if self.error_on_new and len(features_diff_fit_trans) > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if len(self.features_) < self._n_features:
imputer_mask = [
[float(a[i]) for i in range(len(a)) if i in self.features_]
for a in imputer_mask
]
return imputer_mask
def _get_missing_features_info(self, X):
func = lambda x: _to_impute(x, self.missing_values)
imputer_mask = apply_2d(X, func)
if self.features == "missing-only":
n_missing = apply_axis_2d(imputer_mask, sum, axis=0)
if self.features == "all":
features_indices = range(shape(X)[1])
else:
features_indices = [a for a in n_missing if a != 0]
return imputer_mask, features_indices
class SimpleImputerPure:
"""
Pure python implementation of `SimpleImputer`.
Args:
estimator (sklearn estimator): fitted `SimpleImputer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.statistics_ = estimator.statistics_.tolist()
self.strategy = estimator.strategy
if hasattr(estimator, "add_indicator"):
self.add_indicator = estimator.add_indicator
else:
self.add_indicator = False
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
if hasattr(estimator, "indicator_") and (estimator.indicator_ is not None):
self.indicator_ = MissingIndicatorPure(estimator.indicator_)
self.indicator_.error_on_new = False
check_types(self)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data"""
if not self.add_indicator:
return X_imputed
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return [
X_imputed[index] + X_indicator[index] for index in range(len(X_imputed))
]
def _transform_indicator(self, X):
"""
Compute the indicator mask.
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
def transform(self, X):
"""Transform inpute X by imputing values"""
X = check_array(X)
X_indicator = self._transform_indicator(X)
if shape(X)[1] != shape(self.statistics_)[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (shape(X)[1], shape(self.statistics_)[0])
)
# delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = self.statistics_
else:
to_remove = [
index
for index in range(len(self.statistics_))
if isnan(self.statistics_[index])
]
if len(to_remove) > 0:
X = [[a[i] for i in range(len(a)) if i not in to_remove] for a in X]
valid_statistics = [
self.statistics_[i]
for i in range(len(self.statistics_))
if i not in to_remove
]
else:
valid_statistics = self.statistics_
func = (
lambda a, i: a[i]
if not _to_impute(a[i], self.missing_values)
else valid_statistics[i]
)
X_imputed = [[func(a, i) for i in range(len(a))] for a in X]
return self._concatenate_indicator(X_imputed, X_indicator) | scikit-endpoint | /scikit-endpoint-0.0.3.tar.gz/scikit-endpoint-0.0.3/scikit_endpoint/impute/_base.py | _base.py | from math import isnan
from ..utils import shape, check_array, check_types, check_version
from ..base import apply_2d, apply_axis_2d
def _to_impute(val, missing_values):
if isnan(missing_values):
return isnan(val)
else:
return val == missing_values
class MissingIndicatorPure:
"""
Pure python implementation of `MissingIndicator`.
Args:
estimator (sklearn estimator): fitted `MissingIndicator` object
"""
def __init__(self, estimator):
check_version(estimator)
self.features = estimator.features
self.features_ = estimator.features_.tolist()
self._n_features = estimator._n_features
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
self.error_on_new = estimator.error_on_new
check_types(self)
def transform(self, X):
X = check_array(X)
if shape(X)[1] != self._n_features:
raise ValueError(
"X has a different number of features than during fitting."
)
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = set(features) - set(self.features_)
if self.error_on_new and len(features_diff_fit_trans) > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if len(self.features_) < self._n_features:
imputer_mask = [
[float(a[i]) for i in range(len(a)) if i in self.features_]
for a in imputer_mask
]
return imputer_mask
def _get_missing_features_info(self, X):
func = lambda x: _to_impute(x, self.missing_values)
imputer_mask = apply_2d(X, func)
if self.features == "missing-only":
n_missing = apply_axis_2d(imputer_mask, sum, axis=0)
if self.features == "all":
features_indices = range(shape(X)[1])
else:
features_indices = [a for a in n_missing if a != 0]
return imputer_mask, features_indices
class SimpleImputerPure:
"""
Pure python implementation of `SimpleImputer`.
Args:
estimator (sklearn estimator): fitted `SimpleImputer` object
"""
def __init__(self, estimator):
check_version(estimator)
self.statistics_ = estimator.statistics_.tolist()
self.strategy = estimator.strategy
if hasattr(estimator, "add_indicator"):
self.add_indicator = estimator.add_indicator
else:
self.add_indicator = False
self.missing_values = (
float(estimator.missing_values)
if isinstance(estimator.missing_values, float)
else estimator.missing_values
)
if hasattr(estimator, "indicator_") and (estimator.indicator_ is not None):
self.indicator_ = MissingIndicatorPure(estimator.indicator_)
self.indicator_.error_on_new = False
check_types(self)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data"""
if not self.add_indicator:
return X_imputed
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return [
X_imputed[index] + X_indicator[index] for index in range(len(X_imputed))
]
def _transform_indicator(self, X):
"""
Compute the indicator mask.
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
def transform(self, X):
"""Transform inpute X by imputing values"""
X = check_array(X)
X_indicator = self._transform_indicator(X)
if shape(X)[1] != shape(self.statistics_)[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (shape(X)[1], shape(self.statistics_)[0])
)
# delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = self.statistics_
else:
to_remove = [
index
for index in range(len(self.statistics_))
if isnan(self.statistics_[index])
]
if len(to_remove) > 0:
X = [[a[i] for i in range(len(a)) if i not in to_remove] for a in X]
valid_statistics = [
self.statistics_[i]
for i in range(len(self.statistics_))
if i not in to_remove
]
else:
valid_statistics = self.statistics_
func = (
lambda a, i: a[i]
if not _to_impute(a[i], self.missing_values)
else valid_statistics[i]
)
X_imputed = [[func(a, i) for i in range(len(a))] for a in X]
return self._concatenate_indicator(X_imputed, X_indicator) | 0.829492 | 0.471223 |
<p>
<img src="https://github.com/monte-flora/scikit-explain/blob/master/images/mintpy_logo.png?raw=true" align="right" width="400" height="400" />
</p>
![Unit Tests](https://github.com/monte-flora/scikit-explain/actions/workflows/continuous_intergration.yml/badge.svg)
[![codecov](https://codecov.io/gh/monte-flora/s/branch/master/graph/badge.svg?token=GG9NRQOZ0N)](https://codecov.io/gh/monte-flora/scikit-explain)
[![Updates](https://pyup.io/repos/github/monte-flora/scikit-explain/shield.svg)](https://pyup.io/repos/github/monte-flora/scikit-explain/)
[![Python 3](https://pyup.io/repos/github/monte-flora/scikit-explain/python-3-shield.svg)](https://pyup.io/repos/github/monte-flora/scikit-explain/)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
![PyPI](https://img.shields.io/pypi/v/scikit-explain)
[![Documentation Status](https://readthedocs.org/projects/scikit-explain/badge/?version=latest)](https://scikit-explain.readthedocs.io/en/latest/?badge=latest)
scikit-explain is a user-friendly Python module for tabular-style machine learning explainability. Current explainability products includes
* Feature importance:
* [Single- and Multi-pass Permutation Importance](https://permutationimportance.readthedocs.io/en/latest/methods.html#permutation-importance) ([Brieman et al. 2001](https://link.springer.com/article/10.1023/A:1010933404324)], [Lakshmanan et al. 2015](https://journals.ametsoc.org/view/journals/atot/32/6/jtech-d-13-00205_1.xml?rskey=hlSyXu&result=2))
* [SHAP](https://christophm.github.io/interpretable-ml-book/shap.html)
* First-order PD/ALE Variance ([Greenwell et al. 2018](https://arxiv.org/abs/1805.04755))
* Grouped permutation importance ([Au et al. 2021](https://arxiv.org/abs/2104.11688))
* Feature Effects/Attributions:
* [Partial Dependence](https://christophm.github.io/interpretable-ml-book/pdp.html) (PD),
* [Accumulated local effects](https://christophm.github.io/interpretable-ml-book/ale.html) (ALE),
* Random forest-based feature contributions ([treeinterpreter](http://blog.datadive.net/interpreting-random-forests/))
* [SHAP](https://christophm.github.io/interpretable-ml-book/shap.html)
* [LIME](https://christophm.github.io/interpretable-ml-book/lime.html#lime)
* Main Effect Complexity (MEC; [Molnar et al. 2019](https://arxiv.org/abs/1904.03867))
* Feature Interactions:
* Second-order PD/ALE
* Interaction Strength and Main Effect Complexity (IAS; [Molnar et al. 2019](https://arxiv.org/abs/1904.03867))
* Second-order PD/ALE Variance ([Greenwell et al. 2018](https://arxiv.org/abs/1805.04755))
* Second-order Permutation Importance ([Oh et al. 2019](https://www.mdpi.com/2076-3417/9/23/5191))
* Friedman H-statistic ([Friedman and Popescu 2008](https://projecteuclid.org/journals/annals-of-applied-statistics/volume-2/issue-3/Predictive-learning-via-rule-ensembles/10.1214/07-AOAS148.full))
These explainability methods are discussed at length in Christoph Molnar's [Interpretable Machine Learning](https://christophm.github.io/interpretable-ml-book/). The primary feature of this package is the accompanying built-in plotting methods, which are desgined to be easy to use while producing publication-level quality figures. The computations do leverage parallelization when possible. Documentation for scikit-explain can be found at [Read the Docs](https://scikit-explain.readthedocs.io/en/latest/index.html#).
The package is under active development and will likely contain bugs or errors. Feel free to raise issues!
This package is largely original code, but also includes snippets or chunks of code from preexisting packages. Our goal is not take credit from other code authors, but to make a single source for computing several machine learning explanation methods. Here is a list of packages used in scikit-explain:
[**PyALE**](https://github.com/DanaJomar/PyALE),
[**PermutationImportance**](https://github.com/gelijergensen/PermutationImportance),
[**ALEPython**](https://github.com/blent-ai/ALEPython),
[**SHAP**](https://github.com/slundberg/shap/),
[**scikit-learn**](https://github.com/scikit-learn/scikit-learn),
[**LIME**](https://github.com/marcotcr/lime),
[**Faster-LIME**](https://github.com/seansaito/Faster-LIME),
[**treeinterpreter**](https://github.com/andosa/treeinterpreter)
If you employ scikit-explain in your research, please cite this github and the relevant packages listed above.
If you are experiencing issues with loading the tutorial jupyter notebooks, you can enter the URL/location of the notebooks into the following address: https://nbviewer.jupyter.org/.
## Install
scikit-explain can be installed through conda-forge or pip.
```
conda install -c conda-forge scikit-explain
pip install scikit-explain
```
## Dependencies
scikit-explain is compatible with Python 3.8 or newer. scikit-explain requires the following packages:
```
numpy
scipy
pandas
scikit-learn
matplotlib
shap>=0.30.0
xarray>=0.16.0
tqdm
statsmodels
seaborn>=0.11.0
```
Scikit-explain has built-in saving and loading function for pandas dataframes and xarray datasets. Datasets are saved in netCDF4 format. To use this feature, install netCDF4 with one of the following: `pip install netCDF4` or `conda install -c conda-forge netCDF4`
### Initializing scikit-explain
The interface of scikit-explain is ```ExplainToolkit```, which houses all of the explainability methods and their corresponding plotting methods. See the tutorial notebooks for examples.
```python
import skexplain
# Loads three ML models (random forest, gradient-boosted tree, and logistic regression)
# trained on a subset of the road surface temperature data from Handler et al. (2020).
estimators = skexplain.load_models()
X,y = skexplain.load_data()
explainer = skexplain.ExplainToolkit(estimators=estimators,X=X,y=y,)
```
## Permutation Importance
scikit-explain includes both single-pass and multiple-pass permutation importance method ([Brieman et al. 2001](https://link.springer.com/article/10.1023/A:1010933404324)], [Lakshmanan et al. 2015](https://journals.ametsoc.org/view/journals/atot/32/6/jtech-d-13-00205_1.xml?rskey=hlSyXu&result=2), [McGovern et al. 2019](https://journals.ametsoc.org/view/journals/bams/100/11/bams-d-18-0195.1.xml?rskey=TvAHl8&result=20)). The permutation direction can also be given (i.e., backward or forward). Users can also specify feature groups and compute the grouped permutation feature importance ([Au et al. 2021](https://arxiv.org/abs/2104.11688)). Scikit-explain has a function that allows for any feature ranking to be converted into a format for using the plotting package (skexplain.common.importance_utils.to_skexplain_importance). In the [tutorial](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb), users have flexibility for making publication-quality figures.
```python
perm_results = explainer.permutation_importance(n_vars=10, evaluation_fn='auc')
explainer.plot_importance(data=perm_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/multi_pass_perm_imp.png?raw=true" />
</p>
Sample notebook can be found here: [**Permutation Importance**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb)
## Partial dependence and Accumulated Local Effects
To compute the expected functional relationship between a feature and an ML model's prediction, scikit-explain has partial dependence, accumulated local effects, or SHAP dependence. There is also an option for second-order interaction effects. For the choice of feature, you can manually select or can run the permutation importance and a built-in method will retrieve those features. It is also possible to configure the plot for readable feature names.
```python
# Assumes the .permutation_importance has already been run.
important_vars = explainer.get_important_vars(results, multipass=True, nvars=7)
ale = explainer.ale(features=important_vars, n_bins=20)
explainer.plot_ale(ale)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_1d.png?raw=true" />
</p>
Additionally, you can use the same code snippet to compute the second-order ALE (see the notebook for more details).
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_2d.png?raw=true" />
</p>
Sample notebook can be found here:
- [**Accumulated Local effects**](https://github.com/monte-flora/skexplain/blob/master/tutorial_notebooks/accumulated_local_effect_tutorial.ipynb)
- [**Partial Dependence**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/partial_dependence_tutorial.ipynb)
## Feature Attributions (Local Explainability)
To explain individual examples (or set of examples), scikit-explain has model-agnostic methods like SHAP and LIME and model-specific methods like tree interpreter (for decision tree-based model from scikit-learn). For SHAP, scikit-explain uses the shap.Explainer method, which automatically determines the most appropriate Shapley value algorithm ([see their docs](https://shap.readthedocs.io/en/latest/generated/shap.Explainer.html)). For LIME, scikit-explain uses the code from the Faster-LIME method. scikit-explain can create the summary and dependence plots from the shap python package, but is adapted for multiple features and an easier user interface. It is also possible to plot attributions for a single example or summarized by model performance.
```python
import shap
single_example = examples.iloc[[0]]
explainer = skexplain.ExplainToolkit(estimators=estimators[0], X=single_example,)
# For the LIME, we must provide the training dataset. We also denote any categorical features.
lime_kws = {'training_data' : X.values, 'categorical_names' : ['rural', 'urban']}
# The masker handles the missing features. In this case, we are using correlations
# in the dataset to determine the feature groupings. These groups of features are remove or added into
# sets together.
shap_kws={'masker' : shap.maskers.Partition(X, max_samples=100, clustering="correlation"),
'algorithm' : 'permutation'}
# method can be a single str or list of strs.
attr_results = explainer.local_attributions(method=['shap', 'lime', 'tree_interpreter'], shap_kws=shap_kws, lime_kws=lime_kws)
fig = explainer.plot_contributions(results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contribution_single.png?raw=true" />
</p>
```python
explainer = skexplain.ExplainToolkit(estimators=estimators[0],X=X, y=y)
# average_attributions is used to average feature attributions and their feature values either using a simple mean or the mean based on model performance.
avg_attr_results = explainer.average_attributions(method='shap', shap_kwargs=shap_kwargs, performance_based=True,)
fig = myInterpreter.plot_contributions(avg_attr_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contributions_perform.png?raw=true" />
</p>
```python
explainer = skexplain.ExplainToolkit(estimators=estimators[0],X=X, y=y)
attr_results = explainer.local_attributions(method='lime', lime_kws=lime_kws)
explainer.scatter_plot(plot_type = 'summary', dataset=attr_results)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/shap_dependence.png?raw=true" />
</p>
```python
from skexplain.common import plotting_config
features = ['tmp2m_hrs_bl_frez', 'sat_irbt', 'sfcT_hrs_ab_frez', 'tmp2m_hrs_ab_frez', 'd_rad_d']
explainer.scatter_plot(features=features,
plot_type = 'dependence',
dataset=dataset,
display_feature_names=plotting_config.display_feature_names,
display_units = plotting_config.display_units,
to_probability=True)
```
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/shap_summary.png?raw=true" />
</p>
Sample notebook can be found here:
- [**Feature Contributions**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/feature_contributions.ipynb)
- [**Additional Feature Attributions Plots**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/additional_feature_attribution_plots.ipynb)
## Tutorial notebooks
The notebooks provides the package documentation and demonstrate scikit-explain API, which was used to create the above figures. If you are experiencing issues with loading the jupyter notebooks, you can enter the URL/location of the notebooks into the following address: https://nbviewer.jupyter.org/.
- [**Permutation Importance**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/permutation_importance_tutorial.ipynb)
- [**Accumulated Local effects**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/accumulated_local_effect_tutorial.ipynb)
- [**Partial Dependence**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/partial_dependence_tutorial.ipynb)
- [**Feature Contributions**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/feature_contributions.ipynb)
- [**Additional Feature Attributions Plots**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/additional_feature_attribution_plots.ipynb)
| scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/README.md | README.md | conda install -c conda-forge scikit-explain
pip install scikit-explain
numpy
scipy
pandas
scikit-learn
matplotlib
shap>=0.30.0
xarray>=0.16.0
tqdm
statsmodels
seaborn>=0.11.0
import skexplain
# Loads three ML models (random forest, gradient-boosted tree, and logistic regression)
# trained on a subset of the road surface temperature data from Handler et al. (2020).
estimators = skexplain.load_models()
X,y = skexplain.load_data()
explainer = skexplain.ExplainToolkit(estimators=estimators,X=X,y=y,)
perm_results = explainer.permutation_importance(n_vars=10, evaluation_fn='auc')
explainer.plot_importance(data=perm_results)
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_1d.png?raw=true" />
</p>
Additionally, you can use the same code snippet to compute the second-order ALE (see the notebook for more details).
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/ale_2d.png?raw=true" />
</p>
Sample notebook can be found here:
- [**Accumulated Local effects**](https://github.com/monte-flora/skexplain/blob/master/tutorial_notebooks/accumulated_local_effect_tutorial.ipynb)
- [**Partial Dependence**](https://github.com/monte-flora/scikit-explain/blob/master/tutorial_notebooks/partial_dependence_tutorial.ipynb)
## Feature Attributions (Local Explainability)
To explain individual examples (or set of examples), scikit-explain has model-agnostic methods like SHAP and LIME and model-specific methods like tree interpreter (for decision tree-based model from scikit-learn). For SHAP, scikit-explain uses the shap.Explainer method, which automatically determines the most appropriate Shapley value algorithm ([see their docs](https://shap.readthedocs.io/en/latest/generated/shap.Explainer.html)). For LIME, scikit-explain uses the code from the Faster-LIME method. scikit-explain can create the summary and dependence plots from the shap python package, but is adapted for multiple features and an easier user interface. It is also possible to plot attributions for a single example or summarized by model performance.
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contribution_single.png?raw=true" />
</p>
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/feature_contributions_perform.png?raw=true" />
</p>
<p align="center">
<img width="811" src="https://github.com/monte-flora/scikit-explain/blob/master/images/shap_dependence.png?raw=true" />
</p>
| 0.838845 | 0.944331 |
import numpy as np
import sklearn
from multiprocessing.pool import Pool
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, _tree
from distutils.version import LooseVersion
from tqdm import tqdm
if LooseVersion(sklearn.__version__) < LooseVersion("0.17"):
raise Exception("treeinterpreter requires scikit-learn 0.17 or later")
class TreeInterpreter:
def __init__(self, model, examples, joint_contribution=False, n_jobs=1):
"""
Parameters
----------
model : DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, ExtraTreeClassifier,
RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, ExtraTreesClassifier
Scikit-learn model on which the prediction should be decomposed.
X : array-like, shape = (n_samples, n_features)
Test samples.
joint_contribution : boolean
Specifies if contributions are given individually from each feature,
or jointly over them
"""
self._model = model
self._examples = examples
self._joint_contribution = joint_contribution
self._n_jobs = n_jobs
def _get_tree_paths(self, tree, node_id, depth=0):
"""
Returns all paths through the tree as list of node_ids
"""
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
if left_child != _tree.TREE_LEAF:
left_paths = self._get_tree_paths(tree, left_child, depth=depth + 1)
right_paths = self._get_tree_paths(tree, right_child, depth=depth + 1)
for path in left_paths:
path.append(node_id)
for path in right_paths:
path.append(node_id)
paths = left_paths + right_paths
else:
paths = [[node_id]]
return paths
def predict_tree(self, tree):
"""
For a given DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, or ExtraTreeClassifier,
returns a triple of [prediction, bias and feature_contributions], such
that prediction ≈ bias + feature_contributions.
"""
leaves = tree.apply(self._examples)
paths = self._get_tree_paths(tree.tree_, 0)
for path in paths:
path.reverse()
leaf_to_path = {}
# map leaves to paths
for path in paths:
leaf_to_path[path[-1]] = path
# remove the single-dimensional inner arrays
values = tree.tree_.value.squeeze(axis=1)
# reshape if squeezed into a single float
if len(values.shape) == 0:
values = np.array([values])
if isinstance(tree, DecisionTreeRegressor):
biases = np.full(self._examples.shape[0], values[paths[0][0]])
line_shape = self._examples.shape[1]
elif isinstance(tree, DecisionTreeClassifier):
# scikit stores category counts, we turn them into probabilities
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
values /= normalizer
biases = np.tile(values[paths[0][0]], (self._examples.shape[0], 1))
line_shape = (self._examples.shape[1], tree.n_classes_)
direct_prediction = values[leaves]
# make into python list, accessing values will be faster
values_list = list(values)
feature_index = list(tree.tree_.feature)
contributions = []
if self._joint_contribution:
for row, leaf in enumerate(leaves):
path = leaf_to_path[leaf]
path_features = set()
contributions.append({})
for i in range(len(path) - 1):
path_features.add(feature_index[path[i]])
contrib = values_list[path[i + 1]] - values_list[path[i]]
# path_features.sort()
contributions[row][tuple(sorted(path_features))] = (
contributions[row].get(tuple(sorted(path_features)), 0)
+ contrib
)
return direct_prediction, biases, contributions
else:
unique_leaves = np.unique(leaves)
unique_contributions = {}
for row, leaf in enumerate(unique_leaves):
for path in paths:
if leaf == path[-1]:
break
contribs = np.zeros(line_shape)
for i in range(len(path) - 1):
contrib = values_list[path[i + 1]] - values_list[path[i]]
contribs[feature_index[path[i]]] += contrib
unique_contributions[leaf] = contribs
for row, leaf in enumerate(leaves):
contributions.append(unique_contributions[leaf])
return direct_prediction, biases, np.array(contributions)
def predict_forest(self):
"""
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
"""
biases = []
contributions = []
predictions = []
if self._joint_contribution:
for tree in self._model.estimators_:
pred, bias, contribution = self.predict_tree()
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
total_contributions = []
for i in range(len(self._examples)):
contr = {}
for j, dct in enumerate(contributions):
for k in set(dct[i]).union(set(contr.keys())):
contr[k] = (contr.get(k, 0) * j + dct[i].get(k, 0)) / (j + 1)
total_contributions.append(contr)
for i, item in enumerate(contribution):
total_contributions[i]
sm = sum([v for v in contribution[i].values()])
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
total_contributions,
)
else:
if self._n_jobs > 1:
pool = Pool(processes=self._n_jobs)
# iterates return values from the issued tasks
iterator = self._model.estimators_
for pred, bias, contribution in tqdm(pool.map(self.predict_tree, iterator), desc='Tree'):
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
pool.close()
pool.join()
else:
for tree in tqdm(self._model.estimators_, desc='Tree'):
pred, bias, contribution = self.predict_tree(tree)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
np.mean(contributions, axis=0),
)
def predict(self):
"""Returns a triple (prediction, bias, feature_contributions), such
that prediction ≈ bias + feature_contributions.
Returns
-------
decomposed prediction : triple of
* prediction, shape = (n_samples) for regression and (n_samples, n_classes)
for classification
* bias, shape = (n_samples) for regression and (n_samples, n_classes) for
classification
* contributions, If joint_contribution is False then returns and array of
shape = (n_samples, n_features) for regression or
shape = (n_samples, n_features, n_classes) for classification, denoting
contribution from each feature.
If joint_contribution is True, then shape is array of size n_samples,
where each array element is a dict from a tuple of feature indices to
to a value denoting the contribution from that feature tuple.
"""
# Only single out response variable supported,
if self._model.n_outputs_ > 1:
raise ValueError("Multilabel classification trees not supported")
if isinstance(self._model, DecisionTreeClassifier) or isinstance(
self._model, DecisionTreeRegressor
):
return self.predict_tree()
elif isinstance(self._model, RandomForestClassifier) or isinstance(
self._model, RandomForestRegressor
):
return self.predict_forest()
else:
raise ValueError(
"Wrong model type. Base learner needs to be a "
"DecisionTreeClassifier or DecisionTreeRegressor."
) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/tree_interpreter.py | tree_interpreter.py | import numpy as np
import sklearn
from multiprocessing.pool import Pool
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, _tree
from distutils.version import LooseVersion
from tqdm import tqdm
if LooseVersion(sklearn.__version__) < LooseVersion("0.17"):
raise Exception("treeinterpreter requires scikit-learn 0.17 or later")
class TreeInterpreter:
def __init__(self, model, examples, joint_contribution=False, n_jobs=1):
"""
Parameters
----------
model : DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, ExtraTreeClassifier,
RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, ExtraTreesClassifier
Scikit-learn model on which the prediction should be decomposed.
X : array-like, shape = (n_samples, n_features)
Test samples.
joint_contribution : boolean
Specifies if contributions are given individually from each feature,
or jointly over them
"""
self._model = model
self._examples = examples
self._joint_contribution = joint_contribution
self._n_jobs = n_jobs
def _get_tree_paths(self, tree, node_id, depth=0):
"""
Returns all paths through the tree as list of node_ids
"""
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
if left_child != _tree.TREE_LEAF:
left_paths = self._get_tree_paths(tree, left_child, depth=depth + 1)
right_paths = self._get_tree_paths(tree, right_child, depth=depth + 1)
for path in left_paths:
path.append(node_id)
for path in right_paths:
path.append(node_id)
paths = left_paths + right_paths
else:
paths = [[node_id]]
return paths
def predict_tree(self, tree):
"""
For a given DecisionTreeRegressor, DecisionTreeClassifier,
ExtraTreeRegressor, or ExtraTreeClassifier,
returns a triple of [prediction, bias and feature_contributions], such
that prediction ≈ bias + feature_contributions.
"""
leaves = tree.apply(self._examples)
paths = self._get_tree_paths(tree.tree_, 0)
for path in paths:
path.reverse()
leaf_to_path = {}
# map leaves to paths
for path in paths:
leaf_to_path[path[-1]] = path
# remove the single-dimensional inner arrays
values = tree.tree_.value.squeeze(axis=1)
# reshape if squeezed into a single float
if len(values.shape) == 0:
values = np.array([values])
if isinstance(tree, DecisionTreeRegressor):
biases = np.full(self._examples.shape[0], values[paths[0][0]])
line_shape = self._examples.shape[1]
elif isinstance(tree, DecisionTreeClassifier):
# scikit stores category counts, we turn them into probabilities
normalizer = values.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
values /= normalizer
biases = np.tile(values[paths[0][0]], (self._examples.shape[0], 1))
line_shape = (self._examples.shape[1], tree.n_classes_)
direct_prediction = values[leaves]
# make into python list, accessing values will be faster
values_list = list(values)
feature_index = list(tree.tree_.feature)
contributions = []
if self._joint_contribution:
for row, leaf in enumerate(leaves):
path = leaf_to_path[leaf]
path_features = set()
contributions.append({})
for i in range(len(path) - 1):
path_features.add(feature_index[path[i]])
contrib = values_list[path[i + 1]] - values_list[path[i]]
# path_features.sort()
contributions[row][tuple(sorted(path_features))] = (
contributions[row].get(tuple(sorted(path_features)), 0)
+ contrib
)
return direct_prediction, biases, contributions
else:
unique_leaves = np.unique(leaves)
unique_contributions = {}
for row, leaf in enumerate(unique_leaves):
for path in paths:
if leaf == path[-1]:
break
contribs = np.zeros(line_shape)
for i in range(len(path) - 1):
contrib = values_list[path[i + 1]] - values_list[path[i]]
contribs[feature_index[path[i]]] += contrib
unique_contributions[leaf] = contribs
for row, leaf in enumerate(leaves):
contributions.append(unique_contributions[leaf])
return direct_prediction, biases, np.array(contributions)
def predict_forest(self):
"""
For a given RandomForestRegressor, RandomForestClassifier,
ExtraTreesRegressor, or ExtraTreesClassifier returns a triple of
[prediction, bias and feature_contributions], such that prediction ≈ bias +
feature_contributions.
"""
biases = []
contributions = []
predictions = []
if self._joint_contribution:
for tree in self._model.estimators_:
pred, bias, contribution = self.predict_tree()
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
total_contributions = []
for i in range(len(self._examples)):
contr = {}
for j, dct in enumerate(contributions):
for k in set(dct[i]).union(set(contr.keys())):
contr[k] = (contr.get(k, 0) * j + dct[i].get(k, 0)) / (j + 1)
total_contributions.append(contr)
for i, item in enumerate(contribution):
total_contributions[i]
sm = sum([v for v in contribution[i].values()])
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
total_contributions,
)
else:
if self._n_jobs > 1:
pool = Pool(processes=self._n_jobs)
# iterates return values from the issued tasks
iterator = self._model.estimators_
for pred, bias, contribution in tqdm(pool.map(self.predict_tree, iterator), desc='Tree'):
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
pool.close()
pool.join()
else:
for tree in tqdm(self._model.estimators_, desc='Tree'):
pred, bias, contribution = self.predict_tree(tree)
biases.append(bias)
contributions.append(contribution)
predictions.append(pred)
return (
np.mean(predictions, axis=0),
np.mean(biases, axis=0),
np.mean(contributions, axis=0),
)
def predict(self):
"""Returns a triple (prediction, bias, feature_contributions), such
that prediction ≈ bias + feature_contributions.
Returns
-------
decomposed prediction : triple of
* prediction, shape = (n_samples) for regression and (n_samples, n_classes)
for classification
* bias, shape = (n_samples) for regression and (n_samples, n_classes) for
classification
* contributions, If joint_contribution is False then returns and array of
shape = (n_samples, n_features) for regression or
shape = (n_samples, n_features, n_classes) for classification, denoting
contribution from each feature.
If joint_contribution is True, then shape is array of size n_samples,
where each array element is a dict from a tuple of feature indices to
to a value denoting the contribution from that feature tuple.
"""
# Only single out response variable supported,
if self._model.n_outputs_ > 1:
raise ValueError("Multilabel classification trees not supported")
if isinstance(self._model, DecisionTreeClassifier) or isinstance(
self._model, DecisionTreeRegressor
):
return self.predict_tree()
elif isinstance(self._model, RandomForestClassifier) or isinstance(
self._model, RandomForestRegressor
):
return self.predict_forest()
else:
raise ValueError(
"Wrong model type. Base learner needs to be a "
"DecisionTreeClassifier or DecisionTreeRegressor."
) | 0.817793 | 0.402216 |
import numpy as np
from .error_handling import InvalidStrategyException
__all__ = [
"verify_scoring_strategy",
"VALID_SCORING_STRATEGIES",
"argmin_of_mean",
"argmax_of_mean",
"indexer_of_converter",
]
def verify_scoring_strategy(scoring_strategy):
"""Asserts that the scoring strategy is valid and interprets various strings
:param scoring_strategy: a function to be used for determining optimal
variables or a string. If a function, should be of the form
``([some value]) -> index``. If a string, must be one of the options in
``VALID_SCORING_STRATEGIES``
:returns: a function to be used for determining optimal variables
"""
if callable(scoring_strategy):
return scoring_strategy
elif scoring_strategy in VALID_SCORING_STRATEGIES:
return VALID_SCORING_STRATEGIES[scoring_strategy]
else:
raise InvalidStrategyException(
scoring_strategy, options=list(VALID_SCORING_STRATEGIES.keys())
)
class indexer_of_converter(object):
"""This object is designed to help construct a scoring strategy by breaking
the process of determining an optimal score into two pieces:
First, each of the scores are converted to a simpler representation. For
instance, an array of scores resulting from a bootstrapped evaluation method
may be converted to just their mean.
Second, each of the simpler representations are compared to determine the
index of the one which is most optimal. This is typically just an ``argmin``
or ``argmax`` call.
"""
def __init__(self, indexer, converter):
"""Constructs a function which first converts all objects in a list to
something simpler and then uses the indexer to determine the index of
the most "optimal" one
:param indexer: a function which converts a list of probably simply
values (like numbers) to a single index
:param converter: a function which converts a single more complex object
to a simpler one (like a single number)
"""
self.indexer = indexer
self.converter = converter
def __call__(self, scores):
"""Finds the index of the most "optimal" score in a list"""
return self.indexer([self.converter(score) for score in scores])
argmin_of_mean = indexer_of_converter(np.argmin, np.mean)
argmax_of_mean = indexer_of_converter(np.argmax, np.mean)
VALID_SCORING_STRATEGIES = {
"max": argmax_of_mean,
"maximize": argmax_of_mean,
"argmax": np.argmax,
"min": argmin_of_mean,
"minimize": argmin_of_mean,
"argmin": np.argmin,
"argmin_of_mean": argmin_of_mean,
"argmax_of_mean": argmax_of_mean,
} | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/scoring_strategies.py | scoring_strategies.py | import numpy as np
from .error_handling import InvalidStrategyException
__all__ = [
"verify_scoring_strategy",
"VALID_SCORING_STRATEGIES",
"argmin_of_mean",
"argmax_of_mean",
"indexer_of_converter",
]
def verify_scoring_strategy(scoring_strategy):
"""Asserts that the scoring strategy is valid and interprets various strings
:param scoring_strategy: a function to be used for determining optimal
variables or a string. If a function, should be of the form
``([some value]) -> index``. If a string, must be one of the options in
``VALID_SCORING_STRATEGIES``
:returns: a function to be used for determining optimal variables
"""
if callable(scoring_strategy):
return scoring_strategy
elif scoring_strategy in VALID_SCORING_STRATEGIES:
return VALID_SCORING_STRATEGIES[scoring_strategy]
else:
raise InvalidStrategyException(
scoring_strategy, options=list(VALID_SCORING_STRATEGIES.keys())
)
class indexer_of_converter(object):
"""This object is designed to help construct a scoring strategy by breaking
the process of determining an optimal score into two pieces:
First, each of the scores are converted to a simpler representation. For
instance, an array of scores resulting from a bootstrapped evaluation method
may be converted to just their mean.
Second, each of the simpler representations are compared to determine the
index of the one which is most optimal. This is typically just an ``argmin``
or ``argmax`` call.
"""
def __init__(self, indexer, converter):
"""Constructs a function which first converts all objects in a list to
something simpler and then uses the indexer to determine the index of
the most "optimal" one
:param indexer: a function which converts a list of probably simply
values (like numbers) to a single index
:param converter: a function which converts a single more complex object
to a simpler one (like a single number)
"""
self.indexer = indexer
self.converter = converter
def __call__(self, scores):
"""Finds the index of the most "optimal" score in a list"""
return self.indexer([self.converter(score) for score in scores])
argmin_of_mean = indexer_of_converter(np.argmin, np.mean)
argmax_of_mean = indexer_of_converter(np.argmax, np.mean)
VALID_SCORING_STRATEGIES = {
"max": argmax_of_mean,
"maximize": argmax_of_mean,
"argmax": np.argmax,
"min": argmin_of_mean,
"minimize": argmin_of_mean,
"argmin": np.argmin,
"argmin_of_mean": argmin_of_mean,
"argmax_of_mean": argmax_of_mean,
} | 0.827793 | 0.480418 |
from .abstract_runner import abstract_variable_importance
from .selection_strategies import (
SequentialForwardSelectionStrategy,
SequentialBackwardSelectionStrategy,
)
from .sklearn_api import (
score_untrained_sklearn_model,
score_untrained_sklearn_model_with_probabilities,
)
__all__ = [
"sequential_forward_selection",
"sklearn_sequential_forward_selection",
"sequential_backward_selection",
"sklearn_sequential_backward_selection",
]
def sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential forward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialForwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_forward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential forward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential backward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialBackwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_backward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential backward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/sequential_selection.py | sequential_selection.py | from .abstract_runner import abstract_variable_importance
from .selection_strategies import (
SequentialForwardSelectionStrategy,
SequentialBackwardSelectionStrategy,
)
from .sklearn_api import (
score_untrained_sklearn_model,
score_untrained_sklearn_model_with_probabilities,
)
__all__ = [
"sequential_forward_selection",
"sklearn_sequential_forward_selection",
"sequential_backward_selection",
"sklearn_sequential_backward_selection",
]
def sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential forward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialForwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_forward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential forward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_forward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
):
"""Performs sequential backward selection over data given a particular
set of functions for scoring and determining optimal variables
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
return abstract_variable_importance(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
SequentialBackwardSelectionStrategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
)
def sklearn_sequential_backward_selection(
model,
training_data,
scoring_data,
evaluation_fn,
scoring_strategy,
variable_names=None,
nimportant_vars=None,
njobs=1,
nbootstrap=None,
subsample=1,
**kwargs
):
"""Performs sequential backward selection for a particular model,
``scoring_data``, ``evaluation_fn``, and strategy for determining optimal
variables
:param model: a sklearn model
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs will be passed on to the ``evaluation_fn``
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
# Check if the data is probabilistic
if len(scoring_data[1].shape) > 1 and scoring_data[1].shape[1] > 1:
scoring_fn = score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
else:
scoring_fn = score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=nbootstrap, subsample=subsample, **kwargs
)
return sequential_backward_selection(
training_data,
scoring_data,
scoring_fn,
scoring_strategy,
variable_names=variable_names,
nimportant_vars=nimportant_vars,
njobs=njobs,
) | 0.913141 | 0.528229 |
from multiprocessing import Process, Queue, cpu_count
try:
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
except ImportError: # python3
from queue import Full as QueueFull
from queue import Empty as QueueEmpty
__all__ = ["pool_imap_unordered"]
def worker(func, recvq, sendq):
for args in iter(recvq.get, None):
# The args are training_data, scoring_data, var_idx
# Thus, we want to return the var_idx and then
# send those args to the abstract runner.
result = (args[-1], func(*args))
sendq.put(result)
def pool_imap_unordered(func, iterable, procs=cpu_count()):
"""Lazily imaps in an unordered manner over an iterable in parallel as a
generator
:Author: Grant Jenks <https://stackoverflow.com/users/232571/grantj>
:param func: function to perform on each iterable
:param iterable: iterable which has items to map over
:param procs: number of workers in the pool. Defaults to the cpu count
:yields: the results of the mapping
"""
# Create queues for sending/receiving items from iterable.
sendq = Queue(procs)
recvq = Queue()
# Start worker processes.
for rpt in range(procs):
Process(target=worker, args=(func, sendq, recvq)).start()
# Iterate iterable and communicate with worker processes.
send_len = 0
recv_len = 0
itr = iter(iterable)
try:
value = next(itr)
while True:
try:
sendq.put(value, True, 0.1)
send_len += 1
value = next(itr)
except QueueFull:
while True:
try:
result = recvq.get(False)
recv_len += 1
yield result
except QueueEmpty:
break
except StopIteration:
pass
# Collect all remaining results.
while recv_len < send_len:
result = recvq.get()
recv_len += 1
yield result
# Terminate worker processes.
for rpt in range(procs):
sendq.put(None) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/multiprocessing_utils.py | multiprocessing_utils.py | from multiprocessing import Process, Queue, cpu_count
try:
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
except ImportError: # python3
from queue import Full as QueueFull
from queue import Empty as QueueEmpty
__all__ = ["pool_imap_unordered"]
def worker(func, recvq, sendq):
for args in iter(recvq.get, None):
# The args are training_data, scoring_data, var_idx
# Thus, we want to return the var_idx and then
# send those args to the abstract runner.
result = (args[-1], func(*args))
sendq.put(result)
def pool_imap_unordered(func, iterable, procs=cpu_count()):
"""Lazily imaps in an unordered manner over an iterable in parallel as a
generator
:Author: Grant Jenks <https://stackoverflow.com/users/232571/grantj>
:param func: function to perform on each iterable
:param iterable: iterable which has items to map over
:param procs: number of workers in the pool. Defaults to the cpu count
:yields: the results of the mapping
"""
# Create queues for sending/receiving items from iterable.
sendq = Queue(procs)
recvq = Queue()
# Start worker processes.
for rpt in range(procs):
Process(target=worker, args=(func, sendq, recvq)).start()
# Iterate iterable and communicate with worker processes.
send_len = 0
recv_len = 0
itr = iter(iterable)
try:
value = next(itr)
while True:
try:
sendq.put(value, True, 0.1)
send_len += 1
value = next(itr)
except QueueFull:
while True:
try:
result = recvq.get(False)
recv_len += 1
yield result
except QueueEmpty:
break
except StopIteration:
pass
# Collect all remaining results.
while recv_len < send_len:
result = recvq.get()
recv_len += 1
yield result
# Terminate worker processes.
for rpt in range(procs):
sendq.put(None) | 0.494629 | 0.225843 |
import numpy as np
import pandas as pd
from .utils import get_data_subset, make_data_from_columns, conditional_permutations
__all__ = [
"SequentialForwardSelectionStrategy",
"SequentialBackwardSelectionStrategy",
"PermutationImportanceSelectionStrategy",
"SelectionStrategy",
]
class SelectionStrategy(object):
"""The base ``SelectionStrategy`` only provides the tools for storing the
data and other important information as well as the convenience method for
iterating over the selection strategies triples lazily."""
name = "Abstract Selection Strategy"
def __init__(self, training_data, scoring_data, num_vars, important_vars):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are already
considered important
"""
self.training_data = training_data
self.scoring_data = scoring_data
self.num_vars = num_vars
self.important_vars = important_vars
def generate_datasets(self, important_variables):
"""Generator which returns triples (variable, training_data, scoring_data)"""
raise NotImplementedError(
"Please implement a strategy for generating datasets on class %s"
% self.name
)
def generate_all_datasets(self):
"""By default, loops over all variables not yet considered important"""
for var in range(self.num_vars):
if var not in self.important_vars:
training_data, scoring_data = self.generate_datasets(
self.important_vars
+ [var,]
)
yield (training_data, scoring_data, var)
def __iter__(self):
return self.generate_all_datasets()
class SequentialForwardSelectionStrategy(SelectionStrategy):
"""Sequential Forward Selection tests all variables which are not yet
considered important by adding that columns to the other columns which are
returned. This means that the shape of the training data will be
``(num_rows, num_important_vars + 1)``."""
name = "Sequential Forward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are important
:returns: (training_data, scoring_data)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = important_variables
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class SequentialBackwardSelectionStrategy(SelectionStrategy):
"""Sequential Backward Selection tests all variables which are not yet
considered important by removing that column from the data. This means that
the shape of the training data will be
``(num_rows, num_vars - num_important_vars - 1)``."""
name = "Sequential Backward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are not important
:yields: a sequence of (variable being evaluated, columns to include)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = [x for x in range(self.num_vars) if x not in important_variables]
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class PermutationImportanceSelectionStrategy(SelectionStrategy):
"""Permutation Importance tests all variables which are not yet considered
important by shuffling that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(PermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ConditionalPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Conditional Permutation Importance tests all variables which are not yet considered
important by performing conditional permutation on that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Conditional Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ConditionalPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
n_bins = kwargs.get("n_bins", 50)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
self.shuffled_scoring_inputs = conditional_permutations(
scoring_inputs, n_bins, random_state
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ForwardPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Forward Permutation Importance permutes all variables and then tests
all variables which are not yet considered."""
name = "Forward Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ForwardPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are non-important variables are shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
scoring_inputs
if i in important_variables
else self.shuffled_scoring_inputs,
columns = [i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/selection_strategies.py | selection_strategies.py | import numpy as np
import pandas as pd
from .utils import get_data_subset, make_data_from_columns, conditional_permutations
__all__ = [
"SequentialForwardSelectionStrategy",
"SequentialBackwardSelectionStrategy",
"PermutationImportanceSelectionStrategy",
"SelectionStrategy",
]
class SelectionStrategy(object):
"""The base ``SelectionStrategy`` only provides the tools for storing the
data and other important information as well as the convenience method for
iterating over the selection strategies triples lazily."""
name = "Abstract Selection Strategy"
def __init__(self, training_data, scoring_data, num_vars, important_vars):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are already
considered important
"""
self.training_data = training_data
self.scoring_data = scoring_data
self.num_vars = num_vars
self.important_vars = important_vars
def generate_datasets(self, important_variables):
"""Generator which returns triples (variable, training_data, scoring_data)"""
raise NotImplementedError(
"Please implement a strategy for generating datasets on class %s"
% self.name
)
def generate_all_datasets(self):
"""By default, loops over all variables not yet considered important"""
for var in range(self.num_vars):
if var not in self.important_vars:
training_data, scoring_data = self.generate_datasets(
self.important_vars
+ [var,]
)
yield (training_data, scoring_data, var)
def __iter__(self):
return self.generate_all_datasets()
class SequentialForwardSelectionStrategy(SelectionStrategy):
"""Sequential Forward Selection tests all variables which are not yet
considered important by adding that columns to the other columns which are
returned. This means that the shape of the training data will be
``(num_rows, num_important_vars + 1)``."""
name = "Sequential Forward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are important
:returns: (training_data, scoring_data)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = important_variables
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class SequentialBackwardSelectionStrategy(SelectionStrategy):
"""Sequential Backward Selection tests all variables which are not yet
considered important by removing that column from the data. This means that
the shape of the training data will be
``(num_rows, num_vars - num_important_vars - 1)``."""
name = "Sequential Backward Selection"
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset is the columns
which are not important
:yields: a sequence of (variable being evaluated, columns to include)
"""
training_inputs, training_outputs = self.training_data
scoring_inputs, scoring_outputs = self.scoring_data
columns = [x for x in range(self.num_vars) if x not in important_variables]
# Make a slice of the training inputs
training_inputs_subset = get_data_subset(training_inputs, None, columns)
# Make a slice of the scoring inputs
scoring_inputs_subset = get_data_subset(scoring_inputs, None, columns)
return (training_inputs_subset, training_outputs), (
scoring_inputs_subset,
scoring_outputs,
)
class PermutationImportanceSelectionStrategy(SelectionStrategy):
"""Permutation Importance tests all variables which are not yet considered
important by shuffling that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(PermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ConditionalPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Conditional Permutation Importance tests all variables which are not yet considered
important by performing conditional permutation on that column in addition to the columns of the
variables which are considered important. The shape of the data will remain
constant, but at each step, one additional column will be permuted."""
name = "Conditional Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ConditionalPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
n_bins = kwargs.get("n_bins", 50)
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
self.shuffled_scoring_inputs = conditional_permutations(
scoring_inputs, n_bins, random_state
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are important shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
self.shuffled_scoring_inputs
if i in important_variables
else scoring_inputs,
None,
[i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs)
class ForwardPermutationImportanceSelectionStrategy(SelectionStrategy):
"""Forward Permutation Importance permutes all variables and then tests
all variables which are not yet considered."""
name = "Forward Permutation Importance"
def __init__(
self,
training_data,
scoring_data,
num_vars,
important_vars,
random_state,
**kwargs
):
"""Initializes the object by storing the data and keeping track of other
important information
:param training_data: (training_inputs, training_outputs)
:param scoring_data: (scoring_inputs, scoring_outputs)
:param num_vars: integer for the total number of variables
:param important_vars: a list of the indices of variables which are
already considered important
"""
super(ForwardPermutationImportanceSelectionStrategy, self).__init__(
training_data, scoring_data, num_vars, important_vars
)
# With each iteration of the algorithm, the indices
# are shuffled once and identically for each feature.
# Thus, when multiple features are permuted they are
# jointly permuted (i.e., without destroying the
# dependencies of the features within the group).
# However, how the features are jointly permuted
# changes from iteration to iteration to limit
# bias due to a poor permutation.
# Also initialize the "shuffled data"
scoring_inputs, __ = self.scoring_data
indices = random_state.permutation(len(scoring_inputs))
self.shuffled_scoring_inputs = get_data_subset(
scoring_inputs, indices
) # This copies
# keep track of the initial index (assuming this is pandas data)
self.original_index = (
scoring_inputs.index if isinstance(scoring_inputs, pd.DataFrame) else None
)
def generate_datasets(self, important_variables):
"""Check each of the non-important variables. Dataset has columns which
are non-important variables are shuffled
:returns: (training_data, scoring_data)
"""
scoring_inputs, scoring_outputs = self.scoring_data
# If a feature has been deemed important it remains shuffled
complete_scoring_inputs = make_data_from_columns(
[
get_data_subset(
scoring_inputs
if i in important_variables
else self.shuffled_scoring_inputs,
columns = [i],
)
for i in range(self.num_vars)
],
index=self.original_index,
)
return self.training_data, (complete_scoring_inputs, scoring_outputs) | 0.842053 | 0.553928 |
import numpy as np
import pandas as pd
import numbers
from .error_handling import InvalidDataException
__all__ = ["add_ranks_to_dict", "get_data_subset", "make_data_from_columns"]
def add_ranks_to_dict(result, variable_names, scoring_strategy):
"""Takes a list of (var, score) and converts to a dictionary of
{var: (rank, score)}
:param result: a dict of {var_index: score}
:param variable_names: a list of variable names
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ([floats]) -> index
"""
if len(result) == 0:
return dict()
result_dict = dict()
rank = 0
while len(result) > 1:
var_idxs = list(result.keys())
idxs = np.argsort(var_idxs)
# Sort by indices to guarantee order
variables = list(np.array(var_idxs)[idxs])
scores = list(np.array(list(result.values()))[idxs])
best_var = variables[scoring_strategy(scores)]
score = result.pop(best_var)
result_dict[variable_names[best_var]] = (rank, score)
rank += 1
var, score = list(result.items())[0]
result_dict[variable_names[var]] = (rank, score)
return result_dict
def get_data_subset(data, rows=None, columns=None):
"""Returns a subset of the data corresponding to the desired rows and
columns
:param data: either a pandas dataframe or a numpy array
:param rows: a list of row indices
:param columns: a list of column indices
:returns: data_subset (same type as data)
"""
if rows is None:
rows = np.arange(data.shape[0])
if isinstance(data, pd.DataFrame):
if columns is None:
return data.iloc[rows]
else:
return data.iloc[rows, columns]
elif isinstance(data, np.ndarray):
if columns is None:
return data[rows]
else:
return data[np.ix_(rows, columns)]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
def make_data_from_columns(columns_list, index=None):
"""Synthesizes a dataset out of a list of columns
:param columns_list: a list of either pandas series or numpy arrays
:returns: a pandas dataframe or a numpy array
"""
if len(columns_list) == 0:
raise InvalidDataException(
columns_list, "Must have at least one column to synthesize dataset"
)
if isinstance(columns_list[0], pd.DataFrame) or isinstance(
columns_list[0], pd.Series
):
df = pd.concat([c.reset_index(drop=True) for c in columns_list], axis=1)
if index is not None:
return df.set_index(index)
else:
return df
elif isinstance(columns_list[0], np.ndarray):
return np.column_stack(columns_list)
else:
raise InvalidDataException(
columns_list,
"Columns_list must come from a pandas dataframe or numpy arrays",
)
def conditional_permutations(data, n_bins, random_state):
"""
Conditionally permute each feature in a dataset.
Code appended to the PermutationImportance package by Montgomery Flora 2021.
Args:
-------------------
data : pd.DataFrame or np.ndarray shape=(n_examples, n_features,)
n_bins : interger
number of bins to divide a feature into. Based on a
percentile method to ensure that each bin receieves
a similar number of examples
random_state : np.random.RandomState instance
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
Returns:
-------------------
permuted_data : a permuted version of data
"""
permuted_data = data.copy()
for i in range(np.shape(data)[1]):
# Get the bin values of feature
if isinstance(data, pd.DataFrame):
feature_values = data.iloc[:, i]
elif isinstance(data, np.ndarray):
feature_values = data[:, i]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
bin_edges = np.unique(
np.percentile(
feature_values,
np.linspace(0, 100, n_bins + 1),
interpolation="lower",
)
)
bin_indices = np.clip(
np.digitize(feature_values, bin_edges, right=True) - 1, 0, None
)
shuffled_indices = bin_indices.copy()
unique_bin_values = np.unique(bin_indices)
# bin_indices is composed of bin indices for a corresponding value of feature_values
for bin_idx in unique_bin_values:
# idx is the actual index of indices where the bin index == i
idx = np.where(bin_indices == bin_idx)[0]
# Replace the bin indices with a permutation of the actual indices
shuffled_indices[idx] = random_state.permutation(idx)
if isinstance(data, pd.DataFrame):
permuted_data.iloc[:, i] = data.iloc[shuffled_indices, i]
else:
permuted_data[:, i] = data[shuffled_indices, i]
return permuted_data
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters. Function comes for sci-kit-learn.
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
)
def bootstrap_generator(n_bootstrap, seed=42):
"""
Create a repeatable bootstrap generator.
Will create the same set of random state generators given
a number of bootstrap iterations.
"""
base_random_state = np.random.RandomState(seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
return random_states | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/utils.py | utils.py | import numpy as np
import pandas as pd
import numbers
from .error_handling import InvalidDataException
__all__ = ["add_ranks_to_dict", "get_data_subset", "make_data_from_columns"]
def add_ranks_to_dict(result, variable_names, scoring_strategy):
"""Takes a list of (var, score) and converts to a dictionary of
{var: (rank, score)}
:param result: a dict of {var_index: score}
:param variable_names: a list of variable names
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ([floats]) -> index
"""
if len(result) == 0:
return dict()
result_dict = dict()
rank = 0
while len(result) > 1:
var_idxs = list(result.keys())
idxs = np.argsort(var_idxs)
# Sort by indices to guarantee order
variables = list(np.array(var_idxs)[idxs])
scores = list(np.array(list(result.values()))[idxs])
best_var = variables[scoring_strategy(scores)]
score = result.pop(best_var)
result_dict[variable_names[best_var]] = (rank, score)
rank += 1
var, score = list(result.items())[0]
result_dict[variable_names[var]] = (rank, score)
return result_dict
def get_data_subset(data, rows=None, columns=None):
"""Returns a subset of the data corresponding to the desired rows and
columns
:param data: either a pandas dataframe or a numpy array
:param rows: a list of row indices
:param columns: a list of column indices
:returns: data_subset (same type as data)
"""
if rows is None:
rows = np.arange(data.shape[0])
if isinstance(data, pd.DataFrame):
if columns is None:
return data.iloc[rows]
else:
return data.iloc[rows, columns]
elif isinstance(data, np.ndarray):
if columns is None:
return data[rows]
else:
return data[np.ix_(rows, columns)]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
def make_data_from_columns(columns_list, index=None):
"""Synthesizes a dataset out of a list of columns
:param columns_list: a list of either pandas series or numpy arrays
:returns: a pandas dataframe or a numpy array
"""
if len(columns_list) == 0:
raise InvalidDataException(
columns_list, "Must have at least one column to synthesize dataset"
)
if isinstance(columns_list[0], pd.DataFrame) or isinstance(
columns_list[0], pd.Series
):
df = pd.concat([c.reset_index(drop=True) for c in columns_list], axis=1)
if index is not None:
return df.set_index(index)
else:
return df
elif isinstance(columns_list[0], np.ndarray):
return np.column_stack(columns_list)
else:
raise InvalidDataException(
columns_list,
"Columns_list must come from a pandas dataframe or numpy arrays",
)
def conditional_permutations(data, n_bins, random_state):
"""
Conditionally permute each feature in a dataset.
Code appended to the PermutationImportance package by Montgomery Flora 2021.
Args:
-------------------
data : pd.DataFrame or np.ndarray shape=(n_examples, n_features,)
n_bins : interger
number of bins to divide a feature into. Based on a
percentile method to ensure that each bin receieves
a similar number of examples
random_state : np.random.RandomState instance
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
Returns:
-------------------
permuted_data : a permuted version of data
"""
permuted_data = data.copy()
for i in range(np.shape(data)[1]):
# Get the bin values of feature
if isinstance(data, pd.DataFrame):
feature_values = data.iloc[:, i]
elif isinstance(data, np.ndarray):
feature_values = data[:, i]
else:
raise InvalidDataException(
data, "Data must be a pandas dataframe or numpy array"
)
bin_edges = np.unique(
np.percentile(
feature_values,
np.linspace(0, 100, n_bins + 1),
interpolation="lower",
)
)
bin_indices = np.clip(
np.digitize(feature_values, bin_edges, right=True) - 1, 0, None
)
shuffled_indices = bin_indices.copy()
unique_bin_values = np.unique(bin_indices)
# bin_indices is composed of bin indices for a corresponding value of feature_values
for bin_idx in unique_bin_values:
# idx is the actual index of indices where the bin index == i
idx = np.where(bin_indices == bin_idx)[0]
# Replace the bin indices with a permutation of the actual indices
shuffled_indices[idx] = random_state.permutation(idx)
if isinstance(data, pd.DataFrame):
permuted_data.iloc[:, i] = data.iloc[shuffled_indices, i]
else:
permuted_data[:, i] = data[shuffled_indices, i]
return permuted_data
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters. Function comes for sci-kit-learn.
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
)
def bootstrap_generator(n_bootstrap, seed=42):
"""
Create a repeatable bootstrap generator.
Will create the same set of random state generators given
a number of bootstrap iterations.
"""
base_random_state = np.random.RandomState(seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
return random_states | 0.79909 | 0.591045 |
class InvalidStrategyException(Exception):
"""Thrown when a scoring strategy is invalid"""
def __init__(self, strategy, msg=None, options=None):
if msg is None:
msg = (
"%s is not a valid strategy for determining the optimal variable. "
% strategy
)
msg += "\nShould be a callable or a valid string option. "
if options is not None:
msg += "Valid options are\n%r" % options
super(InvalidStrategyException, self).__init__(msg)
self.strategy = strategy
self.options = None
class InvalidInputException(Exception):
"""Thrown when the input to the program does not match expectations"""
def __init__(self, value, msg=None):
if msg is None:
msg = "Input value does not match expectations: %s" % value
super(InvalidInputException, self).__init__(msg)
self.value = value
class InvalidDataException(Exception):
"""Thrown when the training or scoring data is not of the right type"""
def __init__(self, data, msg=None):
if msg is None:
msg = "Data is not of the right format"
super(InvalidDataException, self).__init__(msg)
self.data = data
class UnmatchedLengthPredictionsException(Exception):
"""Thrown when the number of predictions doesn't match truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchedLengthPredictionsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class UnmatchingProbabilisticForecastsException(Exception):
"""Thrown when the shape of probabilisic predictions doesn't match the truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchingProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class AmbiguousProbabilisticForecastsException(Exception):
"""Thrown when classes were not provided for converting probabilistic
predictions to deterministic ones but are required"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Classes not provided for converting probabilistic predictions to deterministic ones"
super(AmbiguousProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class FullImportanceResultWarning(Warning):
"""Thrown when we try to add a result to a full
:class:`PermutationImportance.result.ImportanceResult`"""
pass | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/error_handling.py | error_handling.py | class InvalidStrategyException(Exception):
"""Thrown when a scoring strategy is invalid"""
def __init__(self, strategy, msg=None, options=None):
if msg is None:
msg = (
"%s is not a valid strategy for determining the optimal variable. "
% strategy
)
msg += "\nShould be a callable or a valid string option. "
if options is not None:
msg += "Valid options are\n%r" % options
super(InvalidStrategyException, self).__init__(msg)
self.strategy = strategy
self.options = None
class InvalidInputException(Exception):
"""Thrown when the input to the program does not match expectations"""
def __init__(self, value, msg=None):
if msg is None:
msg = "Input value does not match expectations: %s" % value
super(InvalidInputException, self).__init__(msg)
self.value = value
class InvalidDataException(Exception):
"""Thrown when the training or scoring data is not of the right type"""
def __init__(self, data, msg=None):
if msg is None:
msg = "Data is not of the right format"
super(InvalidDataException, self).__init__(msg)
self.data = data
class UnmatchedLengthPredictionsException(Exception):
"""Thrown when the number of predictions doesn't match truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchedLengthPredictionsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class UnmatchingProbabilisticForecastsException(Exception):
"""Thrown when the shape of probabilisic predictions doesn't match the truths"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Shapes of truths and predictions do not match: %r and %r" % (
truths.shape,
predictions.shape,
)
super(UnmatchingProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class AmbiguousProbabilisticForecastsException(Exception):
"""Thrown when classes were not provided for converting probabilistic
predictions to deterministic ones but are required"""
def __init__(self, truths, predictions, msg=None):
if msg is None:
msg = "Classes not provided for converting probabilistic predictions to deterministic ones"
super(AmbiguousProbabilisticForecastsException, self).__init__(msg)
self.truths = truths
self.predictions = predictions
class FullImportanceResultWarning(Warning):
"""Thrown when we try to add a result to a full
:class:`PermutationImportance.result.ImportanceResult`"""
pass | 0.790611 | 0.24243 |
import numpy as np
import pandas as pd
from .error_handling import InvalidDataException, InvalidInputException
try:
basestring
except NameError: # Python3
basestring = str
__all__ = ["verify_data", "determine_variable_names"]
def verify_data(data):
"""Verifies that the data tuple is of the right format and coerces it to
numpy arrays for the code under the hood
:param data: one of the following:
(pandas dataframe, string for target column),
(pandas dataframe for inputs, pandas dataframe for outputs),
(numpy array for inputs, numpy array for outputs)
:returns: (numpy array for input, numpy array for output) or
(pandas dataframe for input, pandas dataframe for output)
"""
try:
iter(data)
except TypeError:
raise InvalidDataException(data, "Data must be iterable")
else:
if len(data) != 2:
raise InvalidDataException(data, "Data must contain 2 elements")
else:
# check if the first element is pandas dataframe or numpy array
if isinstance(data[0], pd.DataFrame):
# check if the second element is string or pandas dataframe
if isinstance(data[1], basestring):
return (
data[0].loc[:, data[0].columns != data[1]],
data[0][[data[1]]],
)
elif isinstance(data[1], pd.DataFrame):
return data[0], data[1]
else:
raise InvalidDataException(
data,
"Second element of data must be a string for the target column or a pandas dataframe",
)
elif isinstance(data[0], np.ndarray):
if isinstance(data[1], np.ndarray):
return data[0], data[1]
else:
raise InvalidDataException(
data, "Second element of data must also be a numpy array"
)
else:
raise InvalidDataException(
data,
"First element of data must be a numpy array or pandas dataframe",
)
def determine_variable_names(data, variable_names):
"""Uses ``data`` and/or the ``variable_names`` to determine what the
variable names are. If ``variable_names`` is not specified and ``data`` is
not a pandas dataframe, defaults to the column indices
:param data: a 2-tuple where the input data is the first item
:param variable_names: either a list of variable names or None
:returns: a list of variable names
"""
if variable_names is not None:
try:
iter(variable_names)
except TypeError:
raise InvalidInputException(
variable_names, "Variable names must be iterable"
)
else:
if len(variable_names) != data[0].shape[1]:
raise InvalidInputException(
variable_names,
"Variable names should have length %i" % data[0].shape[1],
)
else:
return np.array(variable_names)
else:
if isinstance(data[0], pd.DataFrame):
return data[0].columns.values
else:
return np.arange(data[0].shape[1]) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/data_verification.py | data_verification.py | import numpy as np
import pandas as pd
from .error_handling import InvalidDataException, InvalidInputException
try:
basestring
except NameError: # Python3
basestring = str
__all__ = ["verify_data", "determine_variable_names"]
def verify_data(data):
"""Verifies that the data tuple is of the right format and coerces it to
numpy arrays for the code under the hood
:param data: one of the following:
(pandas dataframe, string for target column),
(pandas dataframe for inputs, pandas dataframe for outputs),
(numpy array for inputs, numpy array for outputs)
:returns: (numpy array for input, numpy array for output) or
(pandas dataframe for input, pandas dataframe for output)
"""
try:
iter(data)
except TypeError:
raise InvalidDataException(data, "Data must be iterable")
else:
if len(data) != 2:
raise InvalidDataException(data, "Data must contain 2 elements")
else:
# check if the first element is pandas dataframe or numpy array
if isinstance(data[0], pd.DataFrame):
# check if the second element is string or pandas dataframe
if isinstance(data[1], basestring):
return (
data[0].loc[:, data[0].columns != data[1]],
data[0][[data[1]]],
)
elif isinstance(data[1], pd.DataFrame):
return data[0], data[1]
else:
raise InvalidDataException(
data,
"Second element of data must be a string for the target column or a pandas dataframe",
)
elif isinstance(data[0], np.ndarray):
if isinstance(data[1], np.ndarray):
return data[0], data[1]
else:
raise InvalidDataException(
data, "Second element of data must also be a numpy array"
)
else:
raise InvalidDataException(
data,
"First element of data must be a numpy array or pandas dataframe",
)
def determine_variable_names(data, variable_names):
"""Uses ``data`` and/or the ``variable_names`` to determine what the
variable names are. If ``variable_names`` is not specified and ``data`` is
not a pandas dataframe, defaults to the column indices
:param data: a 2-tuple where the input data is the first item
:param variable_names: either a list of variable names or None
:returns: a list of variable names
"""
if variable_names is not None:
try:
iter(variable_names)
except TypeError:
raise InvalidInputException(
variable_names, "Variable names must be iterable"
)
else:
if len(variable_names) != data[0].shape[1]:
raise InvalidInputException(
variable_names,
"Variable names should have length %i" % data[0].shape[1],
)
else:
return np.array(variable_names)
else:
if isinstance(data[0], pd.DataFrame):
return data[0].columns.values
else:
return np.arange(data[0].shape[1]) | 0.619241 | 0.616936 |
import numpy as np
from sklearn.base import clone
from .utils import get_data_subset, bootstrap_generator
from joblib import Parallel, delayed
__all__ = [
"model_scorer",
"score_untrained_sklearn_model",
"score_untrained_sklearn_model_with_probabilities",
"score_trained_sklearn_model",
"score_trained_sklearn_model_with_probabilities",
"train_model",
"get_model",
"predict_model",
"predict_proba_model",
]
def train_model(model, X_train, y_train):
"""Trains a scikit-learn model and returns the trained model"""
if X_train.shape[1] == 0:
# No data to train over, so don't bother
return None
cloned_model = clone(model)
return cloned_model.fit(X_train, y_train)
def get_model(model, X_train, y_train):
"""Just return the trained model"""
return model
def predict_model(model, X_score):
"""Uses a trained scikit-learn model to predict over the scoring data"""
return model.predict(X_score)
def predict_proba_model(model, X_score):
"""Uses a trained scikit-learn model to predict class probabilities for the
scoring data"""
pred = model.predict_proba(X_score)
# Binary classification.
if pred.shape[1] == 2:
return pred[:,1]
else:
return pred
def forward_permutations(X, inds, var_idx):
return np.array(
[
X[:, i] if i == var_idx else X[inds, i]
for i in range(X.shape[1])
]
).T
class model_scorer(object):
"""General purpose scoring method which takes a particular model, trains the
model over the given training data, uses the trained model to predict on the
given scoring data, and then evaluates those predictions using some
evaluation function. Additionally provides the tools for bootstrapping the
scores and providing a distribution of scores to be used for statistics.
NOTE: Since these method is used internally, the scoring inputs into
this method for different rounds of multipass permutation importance
are already permuted for the top most features. Thus, in any current
iteration, we need only permute a single column at a time.
"""
def __init__(
self,
model,
training_fn,
prediction_fn,
evaluation_fn,
nimportant_vars=1,
default_score=0.0,
n_permute=1,
subsample=1,
direction='backward',
**kwargs
):
"""Initializes the scoring object by storing the training, predicting,
and evaluation functions
:param model: a scikit-learn model
:param training_fn: a function for training a scikit-learn model. Must
be of the form ``(model, X_train, y_train) ->
trained_model | None``. If the function returns ``None``, then it is
assumed that the model training failed.
Probably :func:`PermutationImportance.sklearn_api.train_model` or
:func:`PermutationImportance.sklearn_api.get_model`
:param predicting_fn: a function for predicting on scoring data using a
scikit-learn model. Must be of the form ``(model, X_score) ->
predictions``. Predictions may be either deterministic or
probabilistic, depending on what the evaluation_fn accepts.
Probably :func:`PermutationImportance.sklearn_api.predict_model` or
:func:`PermutationImportance.sklearn_api.predict_proba_model`
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param default_score: value to return if the model cannot be trained
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults
to None, which will not perform bootstrapping
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
"""
self.model = model
self.training_fn = training_fn
self.prediction_fn = prediction_fn
self.evaluation_fn = evaluation_fn
self.default_score = default_score
self.n_permute = n_permute
self.subsample = subsample
self.direction = direction
self.kwargs = kwargs
self.random_seed = kwargs.get("random_seed", 42)
def _scorer(self, X, y):
predictions = self.prediction_fn(self.model, X)
return self.evaluation_fn(y,predictions)
def get_subsample_size(self, full_size):
return (
int(full_size * self.subsample)
if self.subsample <= 1
else self.subsample
)
def _train(self):
# Try to train model
trained_model = self.training_fn(self.model, X_train, y_train)
# If we didn't succeed in training (probably because there weren't any
# training predictors), return the default_score
if trained_model is None:
if self.n_permute == 1:
return [self.default_score]
else:
return np.full((self.n_permute,), self.default_score)
def get_permuted_data(self, idx, var_idx):
""" Get permuted data """
X_score_sub = self.X_score
y_score_sub = self.y_score
X_train_sub = self.X_train
inds = self.shuffled_indices[idx]
if len(self.rows[0]) != self.y_score.shape[0]:
X_score_sub = get_data_subset(self.X_score, self.rows[idx])
y_score_sub = get_data_subset(self.y_score, self.rows[idx])
if self.direction == 'forward':
X_train_sub = get_data_subset(self.X_train, self.rows[idx])
#inds = inds[idx]
if var_idx is None:
return X_score_sub, y_score_sub
# For the backward, X_score is mostly unpermuted expect for
# the top features. For the forward, X_score is all permuted
# expect for the top features.
X_perm = X_score_sub.copy()
if self.direction == 'backward':
X_perm[:,var_idx] = X_score_sub[inds, var_idx]
else:
X_perm[:,var_idx] = X_train_sub[:, var_idx]
return X_perm, y_score_sub
def __call__(self, training_data, scoring_data, var_idx):
"""Uses the training, predicting, and evaluation functions to score the
model given the training and scoring data
:param training_data: (training_input, training_output)
:param scoring_data: (scoring_input, scoring_output)
:returns: either a single value or an array of values
:param var_idx : integer
The column index of the variable being permuted. When computing the original
score, set var_idx==None.
"""
(self.X_train, self.y_train) = training_data
(self.X_score, self.y_score) = scoring_data
permuted_set = [self.get_permuted_data(idx, var_idx) for idx in range(self.n_permute)]
scores = np.array([self._scorer(*arg) for arg in permuted_set])
return np.array(scores)
def score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
deterministic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
probabilistic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_trained_sklearn_model(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
deterministic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
)
def score_trained_sklearn_model_with_probabilities(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
probabilistic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/sklearn_api.py | sklearn_api.py | import numpy as np
from sklearn.base import clone
from .utils import get_data_subset, bootstrap_generator
from joblib import Parallel, delayed
__all__ = [
"model_scorer",
"score_untrained_sklearn_model",
"score_untrained_sklearn_model_with_probabilities",
"score_trained_sklearn_model",
"score_trained_sklearn_model_with_probabilities",
"train_model",
"get_model",
"predict_model",
"predict_proba_model",
]
def train_model(model, X_train, y_train):
"""Trains a scikit-learn model and returns the trained model"""
if X_train.shape[1] == 0:
# No data to train over, so don't bother
return None
cloned_model = clone(model)
return cloned_model.fit(X_train, y_train)
def get_model(model, X_train, y_train):
"""Just return the trained model"""
return model
def predict_model(model, X_score):
"""Uses a trained scikit-learn model to predict over the scoring data"""
return model.predict(X_score)
def predict_proba_model(model, X_score):
"""Uses a trained scikit-learn model to predict class probabilities for the
scoring data"""
pred = model.predict_proba(X_score)
# Binary classification.
if pred.shape[1] == 2:
return pred[:,1]
else:
return pred
def forward_permutations(X, inds, var_idx):
return np.array(
[
X[:, i] if i == var_idx else X[inds, i]
for i in range(X.shape[1])
]
).T
class model_scorer(object):
"""General purpose scoring method which takes a particular model, trains the
model over the given training data, uses the trained model to predict on the
given scoring data, and then evaluates those predictions using some
evaluation function. Additionally provides the tools for bootstrapping the
scores and providing a distribution of scores to be used for statistics.
NOTE: Since these method is used internally, the scoring inputs into
this method for different rounds of multipass permutation importance
are already permuted for the top most features. Thus, in any current
iteration, we need only permute a single column at a time.
"""
def __init__(
self,
model,
training_fn,
prediction_fn,
evaluation_fn,
nimportant_vars=1,
default_score=0.0,
n_permute=1,
subsample=1,
direction='backward',
**kwargs
):
"""Initializes the scoring object by storing the training, predicting,
and evaluation functions
:param model: a scikit-learn model
:param training_fn: a function for training a scikit-learn model. Must
be of the form ``(model, X_train, y_train) ->
trained_model | None``. If the function returns ``None``, then it is
assumed that the model training failed.
Probably :func:`PermutationImportance.sklearn_api.train_model` or
:func:`PermutationImportance.sklearn_api.get_model`
:param predicting_fn: a function for predicting on scoring data using a
scikit-learn model. Must be of the form ``(model, X_score) ->
predictions``. Predictions may be either deterministic or
probabilistic, depending on what the evaluation_fn accepts.
Probably :func:`PermutationImportance.sklearn_api.predict_model` or
:func:`PermutationImportance.sklearn_api.predict_proba_model`
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param default_score: value to return if the model cannot be trained
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults
to None, which will not perform bootstrapping
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
"""
self.model = model
self.training_fn = training_fn
self.prediction_fn = prediction_fn
self.evaluation_fn = evaluation_fn
self.default_score = default_score
self.n_permute = n_permute
self.subsample = subsample
self.direction = direction
self.kwargs = kwargs
self.random_seed = kwargs.get("random_seed", 42)
def _scorer(self, X, y):
predictions = self.prediction_fn(self.model, X)
return self.evaluation_fn(y,predictions)
def get_subsample_size(self, full_size):
return (
int(full_size * self.subsample)
if self.subsample <= 1
else self.subsample
)
def _train(self):
# Try to train model
trained_model = self.training_fn(self.model, X_train, y_train)
# If we didn't succeed in training (probably because there weren't any
# training predictors), return the default_score
if trained_model is None:
if self.n_permute == 1:
return [self.default_score]
else:
return np.full((self.n_permute,), self.default_score)
def get_permuted_data(self, idx, var_idx):
""" Get permuted data """
X_score_sub = self.X_score
y_score_sub = self.y_score
X_train_sub = self.X_train
inds = self.shuffled_indices[idx]
if len(self.rows[0]) != self.y_score.shape[0]:
X_score_sub = get_data_subset(self.X_score, self.rows[idx])
y_score_sub = get_data_subset(self.y_score, self.rows[idx])
if self.direction == 'forward':
X_train_sub = get_data_subset(self.X_train, self.rows[idx])
#inds = inds[idx]
if var_idx is None:
return X_score_sub, y_score_sub
# For the backward, X_score is mostly unpermuted expect for
# the top features. For the forward, X_score is all permuted
# expect for the top features.
X_perm = X_score_sub.copy()
if self.direction == 'backward':
X_perm[:,var_idx] = X_score_sub[inds, var_idx]
else:
X_perm[:,var_idx] = X_train_sub[:, var_idx]
return X_perm, y_score_sub
def __call__(self, training_data, scoring_data, var_idx):
"""Uses the training, predicting, and evaluation functions to score the
model given the training and scoring data
:param training_data: (training_input, training_output)
:param scoring_data: (scoring_input, scoring_output)
:returns: either a single value or an array of values
:param var_idx : integer
The column index of the variable being permuted. When computing the original
score, set var_idx==None.
"""
(self.X_train, self.y_train) = training_data
(self.X_score, self.y_score) = scoring_data
permuted_set = [self.get_permuted_data(idx, var_idx) for idx in range(self.n_permute)]
scores = np.array([self._scorer(*arg) for arg in permuted_set])
return np.array(scores)
def score_untrained_sklearn_model(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
deterministic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_untrained_sklearn_model_with_probabilities(
model, evaluation_fn, nbootstrap=None, subsample=1, **kwargs
):
"""A convenience method which uses the default training and the
probabilistic prediction methods for scikit-learn to evaluate a model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=train_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
nbootstrap=nbootstrap,
subsample=subsample,
**kwargs
)
def score_trained_sklearn_model(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
deterministic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
)
def score_trained_sklearn_model_with_probabilities(
model, evaluation_fn, n_permute=1, subsample=1, direction='backward', **kwargs
):
"""A convenience method which does not retrain a scikit-learn model and uses
probabilistic prediction methods to evaluate the model
:param model: a scikit-learn model
:param evaluation_fn: a function which takes the deterministic or
probabilistic model predictions and scores them against the true
values. Must be of the form ``(truths, predictions) -> some_value``
Probably one of the metrics in
:mod:`PermutationImportance.metrics` or
`sklearn.metrics <https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics>`_
:param nbootstrap: number of times to perform scoring on each variable.
Results over different bootstrap iterations are averaged. Defaults to 1
:param subsample: number of elements to sample (with replacement) per
bootstrap round. If between 0 and 1, treated as a fraction of the number
of total number of events (e.g. 0.5 means half the number of events).
If not specified, subsampling will not be used and the entire data will
be used (without replacement)
:param kwargs: all other kwargs passed on to the evaluation_fn
:returns: a callable which accepts ``(training_data, scoring_data)`` and
returns some value (probably a float or an array of floats)
"""
return model_scorer(
model,
training_fn=get_model,
prediction_fn=predict_proba_model,
evaluation_fn=evaluation_fn,
n_permute=n_permute,
subsample=subsample,
direction=direction,
**kwargs
) | 0.903559 | 0.66888 |
import warnings
try:
from itertools import izip as zip
except ImportError: # python3
pass
from .error_handling import FullImportanceResultWarning
class ImportanceResult(object):
"""Houses the result of any importance method, which consists of a
sequence of contexts and results. An individual result can only be truly
interpreted correctly in light of the corresponding context. This object
allows for indexing into the contexts and results and also provides
convenience methods for retrieving the results with no context and the
most complete context"""
def __init__(self, method, variable_names, original_score):
"""Initializes the results object with the method used and a list of
variable names
:param method: string for the type of variable importance used
:param variable_names: a list of names for variables
:param original_score: the score of the model when no variables are
important
"""
self.method = method
self.variable_names = variable_names
self.original_score = original_score
# The initial context is "empty"
self.contexts = [{}]
self.results = list()
self.complete = False
def add_new_results(self, new_results, next_important_variable=None):
"""Adds a new round of results. Warns if the ImportanceResult is already
complete
:param new_results: a dictionary with keys of variable names and values
of ``(rank, score)``
:param next_important_variable: variable name of the next most important
variable. If not given, will select the variable with the smallest
rank
"""
if not self.complete:
if next_important_variable is None:
next_important_variable = min(
new_results.keys(), key=lambda key: new_results[key][0]
)
self.results.append(new_results)
new_context = self.contexts[-1].copy()
self.contexts.append(new_context)
__, score = new_results[next_important_variable]
self.contexts[-1][next_important_variable] = (len(self.results) - 1, score)
# Check to see if this result could constitute the last possible one
if len(self.results) == len(self.variable_names):
self.results.append(dict())
self.complete = True
else:
warnings.warn(
"Cannot add new result to full ImportanceResult",
FullImportanceResultWarning,
)
def retrieve_singlepass(self):
"""Returns the singlepass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.results[0]
def retrieve_all_iterations(self):
"""Returns the singlepass results for all multipass iterations"""
return self.results
def retrieve_multipass(self):
"""Returns the multipass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.contexts[-1]
def __iter__(self):
"""Iterates over pairs of contexts and results"""
return zip(self.contexts, self.results)
def __getitem__(self, index):
"""Retrieves the ith pair of ``(context, result)``"""
if index < 0:
index = len(self.results) + index
return (self.contexts[index], self.results[index])
def __len__(self):
"""Returns the total number of results computed"""
return len(self.results) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/main/PermutationImportance/result.py | result.py | import warnings
try:
from itertools import izip as zip
except ImportError: # python3
pass
from .error_handling import FullImportanceResultWarning
class ImportanceResult(object):
"""Houses the result of any importance method, which consists of a
sequence of contexts and results. An individual result can only be truly
interpreted correctly in light of the corresponding context. This object
allows for indexing into the contexts and results and also provides
convenience methods for retrieving the results with no context and the
most complete context"""
def __init__(self, method, variable_names, original_score):
"""Initializes the results object with the method used and a list of
variable names
:param method: string for the type of variable importance used
:param variable_names: a list of names for variables
:param original_score: the score of the model when no variables are
important
"""
self.method = method
self.variable_names = variable_names
self.original_score = original_score
# The initial context is "empty"
self.contexts = [{}]
self.results = list()
self.complete = False
def add_new_results(self, new_results, next_important_variable=None):
"""Adds a new round of results. Warns if the ImportanceResult is already
complete
:param new_results: a dictionary with keys of variable names and values
of ``(rank, score)``
:param next_important_variable: variable name of the next most important
variable. If not given, will select the variable with the smallest
rank
"""
if not self.complete:
if next_important_variable is None:
next_important_variable = min(
new_results.keys(), key=lambda key: new_results[key][0]
)
self.results.append(new_results)
new_context = self.contexts[-1].copy()
self.contexts.append(new_context)
__, score = new_results[next_important_variable]
self.contexts[-1][next_important_variable] = (len(self.results) - 1, score)
# Check to see if this result could constitute the last possible one
if len(self.results) == len(self.variable_names):
self.results.append(dict())
self.complete = True
else:
warnings.warn(
"Cannot add new result to full ImportanceResult",
FullImportanceResultWarning,
)
def retrieve_singlepass(self):
"""Returns the singlepass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.results[0]
def retrieve_all_iterations(self):
"""Returns the singlepass results for all multipass iterations"""
return self.results
def retrieve_multipass(self):
"""Returns the multipass results as a dictionary with keys of variable
names and values of ``(rank, score)``."""
return self.contexts[-1]
def __iter__(self):
"""Iterates over pairs of contexts and results"""
return zip(self.contexts, self.results)
def __getitem__(self, index):
"""Retrieves the ith pair of ``(context, result)``"""
if index < 0:
index = len(self.results) + index
return (self.contexts[index], self.results[index])
def __len__(self):
"""Returns the total number of results computed"""
return len(self.results) | 0.715821 | 0.439146 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, FormatStrFormatter, AutoMinorLocator
import matplotlib.ticker as mticker
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
from matplotlib.gridspec import GridSpec
import matplotlib
import seaborn as sns
from ..common.utils import is_outlier
from ..common.contrib_utils import combine_like_features
import shap
class PlotStructure:
"""
Plot handles figure and subplot generation
"""
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
GENERIC_FONT_SIZE_NAMES = [
"teensie",
"tiny",
"small",
"normal",
"big",
"large",
"huge",
]
FONT_SIZES_ARRAY = np.arange(-6, 8, 2) + BASE_FONT_SIZE
self.FONT_SIZES = {
name: size for name, size in zip(GENERIC_FONT_SIZE_NAMES, FONT_SIZES_ARRAY)
}
if seaborn_kws is None:
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
else:
if seaborn_kws is not None and isinstance(seaborn_kws, dict):
sns.set_theme(**seaborn_kws)
# Setting the font style to serif
rcParams["font.family"] = "serif"
plt.rc("font", size=self.FONT_SIZES["normal"]) # controls default text sizes
plt.rc("axes", titlesize=self.FONT_SIZES["tiny"]) # fontsize of the axes title
plt.rc(
"axes", labelsize=self.FONT_SIZES["normal"]
) # fontsize of the x and y labels
plt.rc(
"xtick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the x-axis tick marks
plt.rc(
"ytick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the y-axis tick marks
plt.rc("legend", fontsize=self.FONT_SIZES["teensie"]) # legend fontsize
plt.rc(
"figure", titlesize=self.FONT_SIZES["big"]
) # fontsize of the figure title
def get_fig_props(self, n_panels, **kwargs):
"""Determine appropriate figure properties"""
width_slope = 0.875
height_slope = 0.45
intercept = 3.0 - width_slope
figsize = (
min((n_panels * width_slope) + intercept, 19),
min((n_panels * height_slope) + intercept, 12),
)
wspace = (-0.03 * n_panels) + 0.85
hspace = (0.0175 * n_panels) + 0.3
n_columns = kwargs.get("n_columns", 3)
wspace = wspace + 0.25 if n_columns > 3 else wspace
kwargs["figsize"] = kwargs.get("figsize", figsize)
kwargs["wspace"] = kwargs.get("wspace", wspace)
kwargs["hspace"] = kwargs.get("hspace", hspace)
return kwargs
def create_subplots(self, n_panels, **kwargs):
"""
Create a series of subplots (MxN) based on the
number of panels and number of columns (optionally).
Args:
-----------------------
n_panels : int
Number of subplots to create
Optional keyword args:
n_columns : int
The number of columns for a plot (default=3 for n_panels >=3)
figsize: 2-tuple of figure size (width, height in inches)
wspace : float
the amount of width reserved for space between subplots,
expressed as a fraction of the average axis width
hspace : float
sharex : boolean
sharey : boolean
"""
# figsize = width, height in inches
figsize = kwargs.get("figsize", (6.4, 4.8))
wspace = kwargs.get("wspace", 0.4)
hspace = kwargs.get("hspace", 0.3)
sharex = kwargs.get("sharex", False)
sharey = kwargs.get("sharey", False)
delete = True
if n_panels <= 3:
n_columns = kwargs.get("n_columns", n_panels)
delete = True if n_panels != n_columns else False
else:
n_columns = kwargs.get("n_columns", 3)
n_rows = int(n_panels / n_columns)
extra_row = 0 if (n_panels % n_columns) == 0 else 1
fig, axes = plt.subplots(
n_rows + extra_row,
n_columns,
sharex=sharex,
sharey=sharey,
figsize=figsize,
dpi=300,
)
fig.patch.set_facecolor("white")
plt.subplots_adjust(wspace=wspace, hspace=hspace)
if delete:
n_axes_to_delete = len(axes.flat) - n_panels
if n_axes_to_delete > 0:
for i in range(n_axes_to_delete):
fig.delaxes(axes.flat[-(i + 1)])
return fig, axes
def _create_joint_subplots(self, n_panels, **kwargs):
"""
Create grid for multipanel drawing a bivariate plots with marginal
univariate plots on the top and right hand side.
"""
figsize = kwargs.get("figsize", (6.4, 4.8))
ratio = kwargs.get("ratio", 5)
n_columns = kwargs.get("n_columns", 3)
fig = plt.figure(figsize=figsize, dpi=300)
fig.patch.set_facecolor("white")
extra_row = 0 if (n_panels % n_columns) == 0 else 1
nrows = ratio * (int(n_panels / n_columns) + extra_row)
ncols = ratio * n_columns
gs = GridSpec(ncols=ncols, nrows=nrows)
main_ax_len = ratio - 1
main_axes = []
top_axes = []
rhs_axes = []
col_offset_idx = list(range(n_columns)) * int(nrows / ratio)
row_offset = 0
for i in range(n_panels):
col_offset = ratio * col_offset_idx[i]
row_idx = 1
if i % n_columns == 0 and i > 0:
row_offset += ratio
main_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
col_offset : main_ax_len + col_offset - 1,
],
frameon=False,
)
top_ax = fig.add_subplot(
gs[row_idx + row_offset - 1, col_offset : main_ax_len + col_offset - 1],
frameon=False,
sharex=main_ax,
)
rhs_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
main_ax_len + col_offset - 1,
],
frameon=False,
sharey=main_ax,
)
ax_marg = [top_ax, rhs_ax]
for ax in ax_marg:
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax.yaxis.get_majorticklines(), visible=False)
plt.setp(ax.xaxis.get_majorticklines(), visible=False)
plt.setp(ax.yaxis.get_minorticklines(), visible=False)
plt.setp(ax.xaxis.get_minorticklines(), visible=False)
ax.yaxis.grid(False)
ax.xaxis.grid(False)
for axis in [ax.xaxis, ax.yaxis]:
axis.label.set_visible(False)
main_axes.append(main_ax)
top_axes.append(top_ax)
rhs_axes.append(rhs_ax)
n_rows = int(nrows / ratio)
return fig, main_axes, top_axes, rhs_axes, n_rows
def axes_to_iterator(self, n_panels, axes):
"""Turns axes list into iterable"""
if isinstance(axes, list):
return axes
else:
ax_iterator = [axes] if n_panels == 1 else axes.flat
return ax_iterator
def set_major_axis_labels(
self,
fig,
xlabel=None,
ylabel_left=None,
ylabel_right=None,
title=None,
**kwargs,
):
"""
Generate a single X- and Y-axis labels for
a series of subplot panels. E.g.,
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["normal"])
labelpad = kwargs.get("labelpad", 15)
ylabel_right_color = kwargs.get("ylabel_right_color", "k")
# add a big axis, hide frame
ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
# set axes labels
ax.set_xlabel(xlabel, fontsize=fontsize, labelpad=labelpad)
ax.set_ylabel(ylabel_left, fontsize=fontsize, labelpad=labelpad)
if ylabel_right is not None:
ax_right = fig.add_subplot(1, 1, 1, sharex=ax, frameon=False)
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
ax_right.yaxis.set_label_position("right")
ax_right.set_ylabel(
ylabel_right,
labelpad=2 * labelpad,
fontsize=fontsize,
color=ylabel_right_color,
)
ax_right.grid(False)
ax.set_title(title)
ax.grid(False)
return ax
def set_row_labels(self, labels, axes, pos=-1, pad=1.15, rotation=270, **kwargs):
"""
Give a label to each row in a series of subplots
"""
colors = kwargs.get("colors", ["xkcd:darkish blue"] * len(labels))
fontsize = kwargs.get("fontsize", self.FONT_SIZES["small"])
if np.ndim(axes) == 2:
iterator = axes[:, pos]
else:
iterator = [axes[pos]]
for ax, row, color in zip(iterator, labels, colors):
ax.yaxis.set_label_position("right")
ax.annotate(
row,
xy=(1, 1),
xytext=(pad, 0.5),
xycoords=ax.transAxes,
rotation=rotation,
size=fontsize,
ha="center",
va="center",
color=color,
alpha=0.65,
)
def add_alphabet_label(self, n_panels, axes, pos=(0.9, 0.09), alphabet_fontsize=10, **kwargs):
"""
A alphabet character to each subpanel.
"""
alphabet_list = [chr(x) for x in range(ord("a"), ord("z") + 1)] + [
f"{chr(x)}{chr(x)}" for x in range(ord("a"), ord("z") + 1)
]
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, ax in enumerate(ax_iterator):
ax.text(
pos[0],
pos[1],
f"({alphabet_list[i]})",
fontsize=alphabet_fontsize,
alpha=0.8,
ha="center",
va="center",
transform=ax.transAxes,
)
def _to_sci_notation(self, ydata, ax=None, xdata=None, colorbar=False):
"""
Convert decimals (less 0.01) to 10^e notation
"""
# f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
# g = lambda x, pos: "${}$".format(f._formatSciNotation("%1.10e" % x))
if colorbar and np.absolute(np.amax(ydata)) <= 0.01:
# colorbar.ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
colorbar.ax.ticklabel_format(
style="sci",
)
colorbar.ax.tick_params(axis="y", labelsize=5)
elif ax:
if np.absolute(np.amax(xdata)) <= 0.01:
ax.ticklabel_format(
style="sci",
)
# ax.xaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.tick_params(axis="x", labelsize=5, rotation=45)
if np.absolute(np.amax(ydata)) <= 0.01:
# ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.ticklabel_format(
style="sci",
)
ax.tick_params(axis="y", labelsize=5, rotation=45)
def calculate_ticks(
self,
nticks,
ax=None,
upperbound=None,
lowerbound=None,
round_to=5,
center=False,
):
"""
Calculate the y-axis ticks marks for the line plots
"""
if ax is not None:
upperbound = round(ax.get_ybound()[1], 5)
lowerbound = round(ax.get_ybound()[0], 5)
max_value = max(abs(upperbound), abs(lowerbound))
if 0 < max_value < 1:
if max_value < 0.1:
round_to = 3
else:
round_to = 5
elif 5 < max_value < 10:
round_to = 2
else:
round_to = 0
def round_to_a_base(a_number, base=5):
return base * round(a_number / base)
if max_value > 5:
max_value = round_to_a_base(max_value)
if center:
values = np.linspace(-max_value, max_value, nticks)
values = np.round(values, round_to)
else:
dy = upperbound - lowerbound
# deprecated 8 March 2022 by Monte.
if round_to > 2:
fit = np.floor(dy / (nticks - 1)) + 1
dy = (nticks - 1) * fit
values = np.linspace(lowerbound, lowerbound + dy, nticks)
values = np.round(values, round_to)
return values
def set_tick_labels(
self, ax, feature_names, display_feature_names, return_labels=False
):
"""
Setting the tick labels for the tree interpreter plots.
"""
if isinstance(display_feature_names, dict):
labels = [
display_feature_names.get(feature_name, feature_name)
for feature_name in feature_names
]
else:
labels = display_feature_names
if return_labels:
labels = [f"{l}" for l in labels]
return labels
else:
labels = [f"{l}" for l in labels]
ax.set_yticklabels(labels)
def set_axis_label(self, ax, xaxis_label=None, yaxis_label=None, **kwargs):
"""
Setting the x- and y-axis labels with fancy labels (and optionally
physical units)
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["tiny"])
if xaxis_label is not None:
xaxis_label_pretty = self.display_feature_names.get(
xaxis_label, xaxis_label
)
units = self.display_units.get(xaxis_label, "")
if units == "":
xaxis_label_with_units = f"{xaxis_label_pretty}"
else:
xaxis_label_with_units = f"{xaxis_label_pretty} ({units})"
ax.set_xlabel(xaxis_label_with_units, fontsize=fontsize)
if yaxis_label is not None:
yaxis_label_pretty = self.display_feature_names.get(
yaxis_label, yaxis_label
)
units = self.display_units.get(yaxis_label, "")
if units == "":
yaxis_label_with_units = f"{yaxis_label_pretty}"
else:
yaxis_label_with_units = f"{yaxis_label_pretty} ({units})"
ax.set_ylabel(yaxis_label_with_units, fontsize=fontsize)
def set_legend(self, n_panels, fig, ax, major_ax=None, **kwargs):
"""
Set a single legend on the bottom of a figure
for a set of subplots.
"""
if major_ax is None:
major_ax = self.set_major_axis_labels(fig)
fontsize = kwargs.get("fontsize", "medium")
ncol = kwargs.get("ncol", 3)
handles = kwargs.get("handles", None)
labels = kwargs.get("labels", None)
if handles is None:
handles, _ = ax.get_legend_handles_labels()
if labels is None:
_, labels = ax.get_legend_handles_labels()
if n_panels > 3:
bbox_to_anchor = (0.5, -0.35)
else:
bbox_to_anchor = (0.5, -0.5)
bbox_to_anchor = kwargs.get("bbox_to_anchor", bbox_to_anchor)
# Shrink current axis's height by 10% on the bottom
box = major_ax.get_position()
major_ax.set_position(
[box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]
)
# Put a legend below current axis
major_ax.legend(
handles,
labels,
loc="lower center",
bbox_to_anchor=bbox_to_anchor,
fancybox=True,
shadow=True,
ncol=ncol,
fontsize=fontsize,
)
def set_minor_ticks(self, ax):
"""
Adds minor tick marks to the x- and y-axis to a subplot ax
to increase readability.
"""
ax.xaxis.set_minor_locator(AutoMinorLocator(n=3))
ax.yaxis.set_minor_locator(AutoMinorLocator(n=3))
def set_n_ticks(self, ax, option="y", nticks=5):
"""
Set the max number of ticks per x- and y-axis for a
subplot ax
"""
if option == "y" or option == "both":
ax.yaxis.set_major_locator(MaxNLocator(nticks))
if option == "x" or option == "both":
ax.xaxis.set_major_locator(MaxNLocator(nticks))
def make_twin_ax(self, ax):
"""
Create a twin axis on an existing axis with a shared x-axis
"""
# align the twinx axis
twin_ax = ax.twinx()
# Turn twin_ax grid off.
twin_ax.grid(False)
# Set ax's patch invisible
ax.patch.set_visible(False)
# Set axtwin's patch visible and colorize it in grey
twin_ax.patch.set_visible(True)
# move ax in front
ax.set_zorder(twin_ax.get_zorder() + 1)
return twin_ax
def despine_plt(self, ax):
"""
remove all four spines of plot
"""
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
def annotate_bars(self, ax, num1, num2, y, width, dh=0.01, barh=0.05, delta=0):
"""
Annotate barplot with connections between correlated variables.
Parameters
----------------
num1: index of left bar to put bracket over
num2: index of right bar to put bracket over
y: centers of all bars (like plt.barh() input)
width: widths of all bars (like plt.barh() input)
dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)
barh: bar height in axes coordinates (0 to 1)
delta : shifting of the annotation when multiple annotations would overlap
"""
lx, ly = y[num1], width[num1]
rx, ry = y[num2], width[num2]
ax_y0, ax_y1 = plt.gca().get_xlim()
dh *= (ax_y1 - ax_y0)
barh *= (ax_y1 - ax_y0)
y = max(ly, ry) + dh
barx = [lx, lx, rx, rx]
bary = [y, y+barh, y+barh, y]
mid = ((lx+rx)/2, y+barh)
ax.plot(np.array(bary)+delta, barx, alpha=0.8, clip_on=False)
'''
Deprecated 14 March 2022.
def annotate_bars(self, ax, bottom_idx, top_idx, x=0, **kwargs):
"""
Adds a square bracket that contains two points. Used to
connect predictors in the predictor ranking plot
for highly correlated pairs.
"""
color = kwargs.get("color", "xkcd:slate gray")
ax.annotate(
"",
xy=(x, bottom_idx),
xytext=(x, top_idx),
arrowprops=dict(
arrowstyle="<->,head_length=0.05,head_width=0.05",
ec=color,
connectionstyle="bar,fraction=0.2",
shrinkA=0.5,
shrinkB=0.5,
linewidth=0.5,
),
)
'''
def get_custom_colormap(self, vals, **kwargs):
"""Get a custom colormap"""
cmap = kwargs.get("cmap", matplotlib.cm.PuOr)
bounds = np.linspace(np.nanpercentile(vals, 0), np.nanpercentile(vals, 100), 10)
norm = matplotlib.colors.BoundaryNorm(
bounds,
cmap.N,
)
mappable = matplotlib.cm.ScalarMappable(
norm=norm,
cmap=cmap,
)
return mappable, bounds
def add_ice_colorbar(self, fig, ax, mappable, cb_label, cdata, fontsize, **kwargs):
"""Add a colorbar to the right of a panel to
accompany ICE color-coded plots"""
cb = plt.colorbar(mappable, ax=ax, pad=0.2)
cb.set_label(cb_label, size=fontsize)
cb.ax.tick_params(labelsize=fontsize)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
self._to_sci_notation(ax=None, colorbar=cb, ydata=cdata)
def add_colorbar(
self,
fig,
plot_obj,
colorbar_label,
ticks=MaxNLocator(5),
ax=None,
cax=None,
**kwargs,
):
"""Adds a colorbar to the right of a panel"""
# Add a colobar
orientation = kwargs.get("orientation", "vertical")
pad = kwargs.get("pad", 0.1)
shrink = kwargs.get("shrink", 1.1)
extend = kwargs.get("extend", "neither")
if cax:
cbar = plt.colorbar(
plot_obj,
cax=cax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
else:
cbar = plt.colorbar(
plot_obj,
ax=ax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
cbar.ax.tick_params(labelsize=self.FONT_SIZES["tiny"])
cbar.set_label(colorbar_label, size=self.FONT_SIZES["small"])
cbar.outline.set_visible(False)
# bbox = cbar.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# cbar.ax.set_aspect((bbox.height - 0.7) * 20)
def save_figure(self, fname, fig=None, bbox_inches="tight", dpi=300, aformat="png"):
"""Saves the current figure"""
plt.savefig(fname=fname, bbox_inches=bbox_inches, dpi=dpi, format=aformat) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/base_plotting.py | base_plotting.py |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, FormatStrFormatter, AutoMinorLocator
import matplotlib.ticker as mticker
from matplotlib import rcParams
from matplotlib.colors import ListedColormap
from matplotlib.gridspec import GridSpec
import matplotlib
import seaborn as sns
from ..common.utils import is_outlier
from ..common.contrib_utils import combine_like_features
import shap
class PlotStructure:
"""
Plot handles figure and subplot generation
"""
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
GENERIC_FONT_SIZE_NAMES = [
"teensie",
"tiny",
"small",
"normal",
"big",
"large",
"huge",
]
FONT_SIZES_ARRAY = np.arange(-6, 8, 2) + BASE_FONT_SIZE
self.FONT_SIZES = {
name: size for name, size in zip(GENERIC_FONT_SIZE_NAMES, FONT_SIZES_ARRAY)
}
if seaborn_kws is None:
custom_params = {"axes.spines.right": False, "axes.spines.top": False}
sns.set_theme(style="ticks", rc=custom_params)
else:
if seaborn_kws is not None and isinstance(seaborn_kws, dict):
sns.set_theme(**seaborn_kws)
# Setting the font style to serif
rcParams["font.family"] = "serif"
plt.rc("font", size=self.FONT_SIZES["normal"]) # controls default text sizes
plt.rc("axes", titlesize=self.FONT_SIZES["tiny"]) # fontsize of the axes title
plt.rc(
"axes", labelsize=self.FONT_SIZES["normal"]
) # fontsize of the x and y labels
plt.rc(
"xtick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the x-axis tick marks
plt.rc(
"ytick", labelsize=self.FONT_SIZES["teensie"]
) # fontsize of the y-axis tick marks
plt.rc("legend", fontsize=self.FONT_SIZES["teensie"]) # legend fontsize
plt.rc(
"figure", titlesize=self.FONT_SIZES["big"]
) # fontsize of the figure title
def get_fig_props(self, n_panels, **kwargs):
"""Determine appropriate figure properties"""
width_slope = 0.875
height_slope = 0.45
intercept = 3.0 - width_slope
figsize = (
min((n_panels * width_slope) + intercept, 19),
min((n_panels * height_slope) + intercept, 12),
)
wspace = (-0.03 * n_panels) + 0.85
hspace = (0.0175 * n_panels) + 0.3
n_columns = kwargs.get("n_columns", 3)
wspace = wspace + 0.25 if n_columns > 3 else wspace
kwargs["figsize"] = kwargs.get("figsize", figsize)
kwargs["wspace"] = kwargs.get("wspace", wspace)
kwargs["hspace"] = kwargs.get("hspace", hspace)
return kwargs
def create_subplots(self, n_panels, **kwargs):
"""
Create a series of subplots (MxN) based on the
number of panels and number of columns (optionally).
Args:
-----------------------
n_panels : int
Number of subplots to create
Optional keyword args:
n_columns : int
The number of columns for a plot (default=3 for n_panels >=3)
figsize: 2-tuple of figure size (width, height in inches)
wspace : float
the amount of width reserved for space between subplots,
expressed as a fraction of the average axis width
hspace : float
sharex : boolean
sharey : boolean
"""
# figsize = width, height in inches
figsize = kwargs.get("figsize", (6.4, 4.8))
wspace = kwargs.get("wspace", 0.4)
hspace = kwargs.get("hspace", 0.3)
sharex = kwargs.get("sharex", False)
sharey = kwargs.get("sharey", False)
delete = True
if n_panels <= 3:
n_columns = kwargs.get("n_columns", n_panels)
delete = True if n_panels != n_columns else False
else:
n_columns = kwargs.get("n_columns", 3)
n_rows = int(n_panels / n_columns)
extra_row = 0 if (n_panels % n_columns) == 0 else 1
fig, axes = plt.subplots(
n_rows + extra_row,
n_columns,
sharex=sharex,
sharey=sharey,
figsize=figsize,
dpi=300,
)
fig.patch.set_facecolor("white")
plt.subplots_adjust(wspace=wspace, hspace=hspace)
if delete:
n_axes_to_delete = len(axes.flat) - n_panels
if n_axes_to_delete > 0:
for i in range(n_axes_to_delete):
fig.delaxes(axes.flat[-(i + 1)])
return fig, axes
def _create_joint_subplots(self, n_panels, **kwargs):
"""
Create grid for multipanel drawing a bivariate plots with marginal
univariate plots on the top and right hand side.
"""
figsize = kwargs.get("figsize", (6.4, 4.8))
ratio = kwargs.get("ratio", 5)
n_columns = kwargs.get("n_columns", 3)
fig = plt.figure(figsize=figsize, dpi=300)
fig.patch.set_facecolor("white")
extra_row = 0 if (n_panels % n_columns) == 0 else 1
nrows = ratio * (int(n_panels / n_columns) + extra_row)
ncols = ratio * n_columns
gs = GridSpec(ncols=ncols, nrows=nrows)
main_ax_len = ratio - 1
main_axes = []
top_axes = []
rhs_axes = []
col_offset_idx = list(range(n_columns)) * int(nrows / ratio)
row_offset = 0
for i in range(n_panels):
col_offset = ratio * col_offset_idx[i]
row_idx = 1
if i % n_columns == 0 and i > 0:
row_offset += ratio
main_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
col_offset : main_ax_len + col_offset - 1,
],
frameon=False,
)
top_ax = fig.add_subplot(
gs[row_idx + row_offset - 1, col_offset : main_ax_len + col_offset - 1],
frameon=False,
sharex=main_ax,
)
rhs_ax = fig.add_subplot(
gs[
row_idx + row_offset : main_ax_len + row_offset,
main_ax_len + col_offset - 1,
],
frameon=False,
sharey=main_ax,
)
ax_marg = [top_ax, rhs_ax]
for ax in ax_marg:
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax.yaxis.get_majorticklines(), visible=False)
plt.setp(ax.xaxis.get_majorticklines(), visible=False)
plt.setp(ax.yaxis.get_minorticklines(), visible=False)
plt.setp(ax.xaxis.get_minorticklines(), visible=False)
ax.yaxis.grid(False)
ax.xaxis.grid(False)
for axis in [ax.xaxis, ax.yaxis]:
axis.label.set_visible(False)
main_axes.append(main_ax)
top_axes.append(top_ax)
rhs_axes.append(rhs_ax)
n_rows = int(nrows / ratio)
return fig, main_axes, top_axes, rhs_axes, n_rows
def axes_to_iterator(self, n_panels, axes):
"""Turns axes list into iterable"""
if isinstance(axes, list):
return axes
else:
ax_iterator = [axes] if n_panels == 1 else axes.flat
return ax_iterator
def set_major_axis_labels(
self,
fig,
xlabel=None,
ylabel_left=None,
ylabel_right=None,
title=None,
**kwargs,
):
"""
Generate a single X- and Y-axis labels for
a series of subplot panels. E.g.,
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["normal"])
labelpad = kwargs.get("labelpad", 15)
ylabel_right_color = kwargs.get("ylabel_right_color", "k")
# add a big axis, hide frame
ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
# set axes labels
ax.set_xlabel(xlabel, fontsize=fontsize, labelpad=labelpad)
ax.set_ylabel(ylabel_left, fontsize=fontsize, labelpad=labelpad)
if ylabel_right is not None:
ax_right = fig.add_subplot(1, 1, 1, sharex=ax, frameon=False)
plt.tick_params(
labelcolor="none", top=False, bottom=False, left=False, right=False
)
ax_right.yaxis.set_label_position("right")
ax_right.set_ylabel(
ylabel_right,
labelpad=2 * labelpad,
fontsize=fontsize,
color=ylabel_right_color,
)
ax_right.grid(False)
ax.set_title(title)
ax.grid(False)
return ax
def set_row_labels(self, labels, axes, pos=-1, pad=1.15, rotation=270, **kwargs):
"""
Give a label to each row in a series of subplots
"""
colors = kwargs.get("colors", ["xkcd:darkish blue"] * len(labels))
fontsize = kwargs.get("fontsize", self.FONT_SIZES["small"])
if np.ndim(axes) == 2:
iterator = axes[:, pos]
else:
iterator = [axes[pos]]
for ax, row, color in zip(iterator, labels, colors):
ax.yaxis.set_label_position("right")
ax.annotate(
row,
xy=(1, 1),
xytext=(pad, 0.5),
xycoords=ax.transAxes,
rotation=rotation,
size=fontsize,
ha="center",
va="center",
color=color,
alpha=0.65,
)
def add_alphabet_label(self, n_panels, axes, pos=(0.9, 0.09), alphabet_fontsize=10, **kwargs):
"""
A alphabet character to each subpanel.
"""
alphabet_list = [chr(x) for x in range(ord("a"), ord("z") + 1)] + [
f"{chr(x)}{chr(x)}" for x in range(ord("a"), ord("z") + 1)
]
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, ax in enumerate(ax_iterator):
ax.text(
pos[0],
pos[1],
f"({alphabet_list[i]})",
fontsize=alphabet_fontsize,
alpha=0.8,
ha="center",
va="center",
transform=ax.transAxes,
)
def _to_sci_notation(self, ydata, ax=None, xdata=None, colorbar=False):
"""
Convert decimals (less 0.01) to 10^e notation
"""
# f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
# g = lambda x, pos: "${}$".format(f._formatSciNotation("%1.10e" % x))
if colorbar and np.absolute(np.amax(ydata)) <= 0.01:
# colorbar.ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
colorbar.ax.ticklabel_format(
style="sci",
)
colorbar.ax.tick_params(axis="y", labelsize=5)
elif ax:
if np.absolute(np.amax(xdata)) <= 0.01:
ax.ticklabel_format(
style="sci",
)
# ax.xaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.tick_params(axis="x", labelsize=5, rotation=45)
if np.absolute(np.amax(ydata)) <= 0.01:
# ax.yaxis.set_major_formatter(mticker.FuncFormatter(g))
ax.ticklabel_format(
style="sci",
)
ax.tick_params(axis="y", labelsize=5, rotation=45)
def calculate_ticks(
self,
nticks,
ax=None,
upperbound=None,
lowerbound=None,
round_to=5,
center=False,
):
"""
Calculate the y-axis ticks marks for the line plots
"""
if ax is not None:
upperbound = round(ax.get_ybound()[1], 5)
lowerbound = round(ax.get_ybound()[0], 5)
max_value = max(abs(upperbound), abs(lowerbound))
if 0 < max_value < 1:
if max_value < 0.1:
round_to = 3
else:
round_to = 5
elif 5 < max_value < 10:
round_to = 2
else:
round_to = 0
def round_to_a_base(a_number, base=5):
return base * round(a_number / base)
if max_value > 5:
max_value = round_to_a_base(max_value)
if center:
values = np.linspace(-max_value, max_value, nticks)
values = np.round(values, round_to)
else:
dy = upperbound - lowerbound
# deprecated 8 March 2022 by Monte.
if round_to > 2:
fit = np.floor(dy / (nticks - 1)) + 1
dy = (nticks - 1) * fit
values = np.linspace(lowerbound, lowerbound + dy, nticks)
values = np.round(values, round_to)
return values
def set_tick_labels(
self, ax, feature_names, display_feature_names, return_labels=False
):
"""
Setting the tick labels for the tree interpreter plots.
"""
if isinstance(display_feature_names, dict):
labels = [
display_feature_names.get(feature_name, feature_name)
for feature_name in feature_names
]
else:
labels = display_feature_names
if return_labels:
labels = [f"{l}" for l in labels]
return labels
else:
labels = [f"{l}" for l in labels]
ax.set_yticklabels(labels)
def set_axis_label(self, ax, xaxis_label=None, yaxis_label=None, **kwargs):
"""
Setting the x- and y-axis labels with fancy labels (and optionally
physical units)
"""
fontsize = kwargs.get("fontsize", self.FONT_SIZES["tiny"])
if xaxis_label is not None:
xaxis_label_pretty = self.display_feature_names.get(
xaxis_label, xaxis_label
)
units = self.display_units.get(xaxis_label, "")
if units == "":
xaxis_label_with_units = f"{xaxis_label_pretty}"
else:
xaxis_label_with_units = f"{xaxis_label_pretty} ({units})"
ax.set_xlabel(xaxis_label_with_units, fontsize=fontsize)
if yaxis_label is not None:
yaxis_label_pretty = self.display_feature_names.get(
yaxis_label, yaxis_label
)
units = self.display_units.get(yaxis_label, "")
if units == "":
yaxis_label_with_units = f"{yaxis_label_pretty}"
else:
yaxis_label_with_units = f"{yaxis_label_pretty} ({units})"
ax.set_ylabel(yaxis_label_with_units, fontsize=fontsize)
def set_legend(self, n_panels, fig, ax, major_ax=None, **kwargs):
"""
Set a single legend on the bottom of a figure
for a set of subplots.
"""
if major_ax is None:
major_ax = self.set_major_axis_labels(fig)
fontsize = kwargs.get("fontsize", "medium")
ncol = kwargs.get("ncol", 3)
handles = kwargs.get("handles", None)
labels = kwargs.get("labels", None)
if handles is None:
handles, _ = ax.get_legend_handles_labels()
if labels is None:
_, labels = ax.get_legend_handles_labels()
if n_panels > 3:
bbox_to_anchor = (0.5, -0.35)
else:
bbox_to_anchor = (0.5, -0.5)
bbox_to_anchor = kwargs.get("bbox_to_anchor", bbox_to_anchor)
# Shrink current axis's height by 10% on the bottom
box = major_ax.get_position()
major_ax.set_position(
[box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]
)
# Put a legend below current axis
major_ax.legend(
handles,
labels,
loc="lower center",
bbox_to_anchor=bbox_to_anchor,
fancybox=True,
shadow=True,
ncol=ncol,
fontsize=fontsize,
)
def set_minor_ticks(self, ax):
"""
Adds minor tick marks to the x- and y-axis to a subplot ax
to increase readability.
"""
ax.xaxis.set_minor_locator(AutoMinorLocator(n=3))
ax.yaxis.set_minor_locator(AutoMinorLocator(n=3))
def set_n_ticks(self, ax, option="y", nticks=5):
"""
Set the max number of ticks per x- and y-axis for a
subplot ax
"""
if option == "y" or option == "both":
ax.yaxis.set_major_locator(MaxNLocator(nticks))
if option == "x" or option == "both":
ax.xaxis.set_major_locator(MaxNLocator(nticks))
def make_twin_ax(self, ax):
"""
Create a twin axis on an existing axis with a shared x-axis
"""
# align the twinx axis
twin_ax = ax.twinx()
# Turn twin_ax grid off.
twin_ax.grid(False)
# Set ax's patch invisible
ax.patch.set_visible(False)
# Set axtwin's patch visible and colorize it in grey
twin_ax.patch.set_visible(True)
# move ax in front
ax.set_zorder(twin_ax.get_zorder() + 1)
return twin_ax
def despine_plt(self, ax):
"""
remove all four spines of plot
"""
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
def annotate_bars(self, ax, num1, num2, y, width, dh=0.01, barh=0.05, delta=0):
"""
Annotate barplot with connections between correlated variables.
Parameters
----------------
num1: index of left bar to put bracket over
num2: index of right bar to put bracket over
y: centers of all bars (like plt.barh() input)
width: widths of all bars (like plt.barh() input)
dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)
barh: bar height in axes coordinates (0 to 1)
delta : shifting of the annotation when multiple annotations would overlap
"""
lx, ly = y[num1], width[num1]
rx, ry = y[num2], width[num2]
ax_y0, ax_y1 = plt.gca().get_xlim()
dh *= (ax_y1 - ax_y0)
barh *= (ax_y1 - ax_y0)
y = max(ly, ry) + dh
barx = [lx, lx, rx, rx]
bary = [y, y+barh, y+barh, y]
mid = ((lx+rx)/2, y+barh)
ax.plot(np.array(bary)+delta, barx, alpha=0.8, clip_on=False)
'''
Deprecated 14 March 2022.
def annotate_bars(self, ax, bottom_idx, top_idx, x=0, **kwargs):
"""
Adds a square bracket that contains two points. Used to
connect predictors in the predictor ranking plot
for highly correlated pairs.
"""
color = kwargs.get("color", "xkcd:slate gray")
ax.annotate(
"",
xy=(x, bottom_idx),
xytext=(x, top_idx),
arrowprops=dict(
arrowstyle="<->,head_length=0.05,head_width=0.05",
ec=color,
connectionstyle="bar,fraction=0.2",
shrinkA=0.5,
shrinkB=0.5,
linewidth=0.5,
),
)
'''
def get_custom_colormap(self, vals, **kwargs):
"""Get a custom colormap"""
cmap = kwargs.get("cmap", matplotlib.cm.PuOr)
bounds = np.linspace(np.nanpercentile(vals, 0), np.nanpercentile(vals, 100), 10)
norm = matplotlib.colors.BoundaryNorm(
bounds,
cmap.N,
)
mappable = matplotlib.cm.ScalarMappable(
norm=norm,
cmap=cmap,
)
return mappable, bounds
def add_ice_colorbar(self, fig, ax, mappable, cb_label, cdata, fontsize, **kwargs):
"""Add a colorbar to the right of a panel to
accompany ICE color-coded plots"""
cb = plt.colorbar(mappable, ax=ax, pad=0.2)
cb.set_label(cb_label, size=fontsize)
cb.ax.tick_params(labelsize=fontsize)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.7) * 20)
self._to_sci_notation(ax=None, colorbar=cb, ydata=cdata)
def add_colorbar(
self,
fig,
plot_obj,
colorbar_label,
ticks=MaxNLocator(5),
ax=None,
cax=None,
**kwargs,
):
"""Adds a colorbar to the right of a panel"""
# Add a colobar
orientation = kwargs.get("orientation", "vertical")
pad = kwargs.get("pad", 0.1)
shrink = kwargs.get("shrink", 1.1)
extend = kwargs.get("extend", "neither")
if cax:
cbar = plt.colorbar(
plot_obj,
cax=cax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
else:
cbar = plt.colorbar(
plot_obj,
ax=ax,
pad=pad,
ticks=ticks,
shrink=shrink,
orientation=orientation,
extend=extend,
)
cbar.ax.tick_params(labelsize=self.FONT_SIZES["tiny"])
cbar.set_label(colorbar_label, size=self.FONT_SIZES["small"])
cbar.outline.set_visible(False)
# bbox = cbar.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# cbar.ax.set_aspect((bbox.height - 0.7) * 20)
def save_figure(self, fname, fig=None, bbox_inches="tight", dpi=300, aformat="png"):
"""Saves the current figure"""
plt.savefig(fname=fname, bbox_inches=bbox_inches, dpi=dpi, format=aformat) | 0.727395 | 0.518973 |
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from scipy.stats import gaussian_kde
from scipy.ndimage import gaussian_filter
import scipy
import itertools
import numpy as np
import matplotlib as mpl
from scipy.ndimage import gaussian_filter
from .base_plotting import PlotStructure
gray4 = (189 / 255.0, 189 / 255.0, 189 / 255.0)
gray5 = (150 / 255.0, 150 / 255.0, 150 / 255.0)
blue2 = (222 / 255.0, 235 / 255.0, 247 / 255.0)
blue5 = (107 / 255.0, 174 / 255.0, 214 / 255.0)
orange3 = (253 / 255.0, 208 / 255.0, 162 / 255.0)
orange5 = (253 / 255.0, 141 / 255.0, 60 / 255.0)
red5 = (251 / 255.0, 106 / 255.0, 74 / 255.0)
red6 = (239 / 255.0, 59 / 255.0, 44 / 255.0)
purple5 = (158 / 255.0, 154 / 255.0, 200 / 255.0)
purple6 = (128 / 255.0, 125 / 255.0, 186 / 255.0)
purple9 = (63 / 255.0, 0 / 255.0, 125 / 255.0)
custom_cmap = ListedColormap(
[
gray4,
gray5,
blue2,
blue5,
orange3,
orange5,
red5,
red6,
purple5,
purple6,
purple9,
]
)
class PlotScatter(PlotStructure):
"""
PlotScatter handles plotting 2D scatter between a set of features.
It will also optionally overlay KDE contours of the target variable,
which is a first-order method for evaluating possible feature interactions
and whether the learned relationships are consistent with the data.
"""
oranges = ListedColormap(
["xkcd:peach", "xkcd:orange", "xkcd:bright orange", "xkcd:rust brown"]
)
blues = ListedColormap(
["xkcd:periwinkle blue", "xkcd:clear blue", "xkcd:navy blue"]
)
def __init__(self, BASE_FONT_SIZE=12):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE)
def plot_scatter(
self,
estimators,
X,
y,
features,
display_feature_names={},
display_units={},
subsample=1.0,
peak_val=None,
kde=False,
**kwargs,
):
"""
Plot KDE between two features and color-code by the target variable
"""
# TODO: Plot relationships for multiple features!!
estimator_names = list(estimators.keys())
n_panels = len(estimator_names)
only_one_model = len(estimator_names) == 1
predictions = np.zeros((n_panels, len(y)), dtype=np.float16)
j = 0
for estimator_name, estimator in estimators.items():
if hasattr(estimator, "predict_proba"):
predictions[j, :] = estimator.predict_proba(X)[:, 1]
else:
predictions[j, :] = estimator.predict(X)
j += 1
kwargs = self.get_fig_props(n_panels, **kwargs)
# create subplots, one for each feature
fig, axes = self.create_subplots(
n_panels=n_panels,
sharex=False,
sharey=False,
**kwargs,
)
ax_iterator = self.axes_to_iterator(n_panels, axes)
vmax = np.max(predictions)
for i, ax in enumerate(ax_iterator):
cf = self.scatter_(
ax=ax,
X=X,
features=features,
predictions=predictions[i, :],
vmax=vmax,
**kwargs,
)
if subsample < 1.0:
size = min(int(subsample * len(y)), len(y))
idxs = np.random.choice(len(y), size=size, replace=False)
var1 = X[features[0]].values[idxs]
var2 = X[features[1]].values[idxs]
y1 = y.values[idxs]
else:
var1 = X[features[0]].values
var2 = X[features[1]].values
y1 = np.copy(y)
# Shuffle values
index = np.arange(len(var1))
np.random.shuffle(index)
var1 = var1[index]
var2 = var2[index]
y1 = y1[index]
ax.set_xlabel(display_feature_names.get(features[0], features[0]))
ax.set_ylabel(display_feature_names.get(features[1], features[1]))
ax.grid(color="#2A3459", alpha=0.6, linestyle="dashed", linewidth=0.5)
# bluish dark grey, but slightly lighter than background
if kde:
cmap_set = [
self.oranges,
self.blues,
"Reds",
"jet",
]
classes = np.unique(y1)
idx_sets = [np.where(y1 == c) for c in classes]
for idxs, cmap in zip(idx_sets, cmap_set):
# Plot positive cases
cs = self.plot_kde_contours(
ax,
dy=var2[idxs],
dx=var1[idxs],
target=y1[idxs],
cmap=cmap,
)
handles_set = [cs.legend_elements()[-1]]
labels = classes
legend = ax.legend(handles_set, labels, framealpha=0.5)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if not only_one_model:
ax.set_title(estimator_names[i])
if n_panels == 1:
ax_ = axes
else:
ax_ = axes.ravel().tolist()
if hasattr(estimator, "predict_proba"):
cbar_label = "Probability"
else:
cbar_label = "Response"
cbar_label = kwargs.get("cbar_label", cbar_label)
fig.colorbar(cf, ax=ax_, label=cbar_label, orientation="horizontal")
return fig, axes
def scatter_(self, ax, features, X, predictions, vmax, **kwargs):
"""
Plot 2D scatter of ML predictions;
Only plots a random 20000 points
"""
size = min(20000, len(X))
idxs = np.random.choice(len(X), size=size, replace=False)
x_val = X[features[0]].values[idxs]
y_val = X[features[1]].values[idxs]
z_val = predictions[idxs]
# Show highest predictions on top!
index = np.argsort(z_val)
# index = np.random.choice(len(z_val), size=len(z_val), replace=False)
x_val = x_val[index]
y_val = y_val[index]
z_val = z_val[index]
cmap = kwargs.get("cmap", custom_cmap)
zmax = vmax + 0.05 if vmax < 1.0 else 1.1
delta = 0.05 if vmax < 0.5 else 0.1
levels = [0, 0.05] + list(np.arange(0.1, zmax, delta))
norm = mpl.colors.BoundaryNorm(levels, cmap.N)
sca = ax.scatter(
x_val,
y_val,
c=z_val,
cmap=cmap,
alpha=0.6,
s=3,
norm=norm,
)
return sca
def kernal_density_estimate(self, dy, dx):
dy_min = np.amin(dy)
dx_min = np.amin(dx)
dy_max = np.amax(dy)
dx_max = np.amax(dx)
x, y = np.mgrid[dx_min:dx_max:100j, dy_min:dy_max:100j]
positions = np.vstack([x.ravel(), y.ravel()])
values = np.vstack([dx, dy])
kernel = gaussian_kde(values)
f = np.reshape(kernel(positions).T, x.shape)
return x, y, f
def plot_kde_contours(
self,
ax,
dy,
dx,
target,
cmap,
):
x, y, f = self.kernal_density_estimate(dy, dx)
temp_linewidths = [0.85, 1.0, 1.25, 1.75]
temp_thresh = [75.0, 90.0, 95.0, 97.5]
temp_levels = [0.0, 0.0, 0.0, 0.0]
for i in range(0, len(temp_thresh)):
temp_levels[i] = np.percentile(f.ravel(), temp_thresh[i])
# masked_f = np.ma.masked_where(f < 1.6e-5, f)
cs = ax.contour(
x,
y,
f,
levels=temp_levels,
cmap=cmap,
linewidths=temp_linewidths,
alpha=1.0,
)
fmt = {}
for l, s in zip(cs.levels, temp_thresh[::-1]):
fmt[l] = f"{int(s)}%"
ax.clabel(
cs, cs.levels, inline=True, fontsize=self.FONT_SIZES["teensie"], fmt=fmt
)
return cs | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/_kde_2d.py | _kde_2d.py | import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from scipy.stats import gaussian_kde
from scipy.ndimage import gaussian_filter
import scipy
import itertools
import numpy as np
import matplotlib as mpl
from scipy.ndimage import gaussian_filter
from .base_plotting import PlotStructure
gray4 = (189 / 255.0, 189 / 255.0, 189 / 255.0)
gray5 = (150 / 255.0, 150 / 255.0, 150 / 255.0)
blue2 = (222 / 255.0, 235 / 255.0, 247 / 255.0)
blue5 = (107 / 255.0, 174 / 255.0, 214 / 255.0)
orange3 = (253 / 255.0, 208 / 255.0, 162 / 255.0)
orange5 = (253 / 255.0, 141 / 255.0, 60 / 255.0)
red5 = (251 / 255.0, 106 / 255.0, 74 / 255.0)
red6 = (239 / 255.0, 59 / 255.0, 44 / 255.0)
purple5 = (158 / 255.0, 154 / 255.0, 200 / 255.0)
purple6 = (128 / 255.0, 125 / 255.0, 186 / 255.0)
purple9 = (63 / 255.0, 0 / 255.0, 125 / 255.0)
custom_cmap = ListedColormap(
[
gray4,
gray5,
blue2,
blue5,
orange3,
orange5,
red5,
red6,
purple5,
purple6,
purple9,
]
)
class PlotScatter(PlotStructure):
"""
PlotScatter handles plotting 2D scatter between a set of features.
It will also optionally overlay KDE contours of the target variable,
which is a first-order method for evaluating possible feature interactions
and whether the learned relationships are consistent with the data.
"""
oranges = ListedColormap(
["xkcd:peach", "xkcd:orange", "xkcd:bright orange", "xkcd:rust brown"]
)
blues = ListedColormap(
["xkcd:periwinkle blue", "xkcd:clear blue", "xkcd:navy blue"]
)
def __init__(self, BASE_FONT_SIZE=12):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE)
def plot_scatter(
self,
estimators,
X,
y,
features,
display_feature_names={},
display_units={},
subsample=1.0,
peak_val=None,
kde=False,
**kwargs,
):
"""
Plot KDE between two features and color-code by the target variable
"""
# TODO: Plot relationships for multiple features!!
estimator_names = list(estimators.keys())
n_panels = len(estimator_names)
only_one_model = len(estimator_names) == 1
predictions = np.zeros((n_panels, len(y)), dtype=np.float16)
j = 0
for estimator_name, estimator in estimators.items():
if hasattr(estimator, "predict_proba"):
predictions[j, :] = estimator.predict_proba(X)[:, 1]
else:
predictions[j, :] = estimator.predict(X)
j += 1
kwargs = self.get_fig_props(n_panels, **kwargs)
# create subplots, one for each feature
fig, axes = self.create_subplots(
n_panels=n_panels,
sharex=False,
sharey=False,
**kwargs,
)
ax_iterator = self.axes_to_iterator(n_panels, axes)
vmax = np.max(predictions)
for i, ax in enumerate(ax_iterator):
cf = self.scatter_(
ax=ax,
X=X,
features=features,
predictions=predictions[i, :],
vmax=vmax,
**kwargs,
)
if subsample < 1.0:
size = min(int(subsample * len(y)), len(y))
idxs = np.random.choice(len(y), size=size, replace=False)
var1 = X[features[0]].values[idxs]
var2 = X[features[1]].values[idxs]
y1 = y.values[idxs]
else:
var1 = X[features[0]].values
var2 = X[features[1]].values
y1 = np.copy(y)
# Shuffle values
index = np.arange(len(var1))
np.random.shuffle(index)
var1 = var1[index]
var2 = var2[index]
y1 = y1[index]
ax.set_xlabel(display_feature_names.get(features[0], features[0]))
ax.set_ylabel(display_feature_names.get(features[1], features[1]))
ax.grid(color="#2A3459", alpha=0.6, linestyle="dashed", linewidth=0.5)
# bluish dark grey, but slightly lighter than background
if kde:
cmap_set = [
self.oranges,
self.blues,
"Reds",
"jet",
]
classes = np.unique(y1)
idx_sets = [np.where(y1 == c) for c in classes]
for idxs, cmap in zip(idx_sets, cmap_set):
# Plot positive cases
cs = self.plot_kde_contours(
ax,
dy=var2[idxs],
dx=var1[idxs],
target=y1[idxs],
cmap=cmap,
)
handles_set = [cs.legend_elements()[-1]]
labels = classes
legend = ax.legend(handles_set, labels, framealpha=0.5)
# Hide the right and top spines
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if not only_one_model:
ax.set_title(estimator_names[i])
if n_panels == 1:
ax_ = axes
else:
ax_ = axes.ravel().tolist()
if hasattr(estimator, "predict_proba"):
cbar_label = "Probability"
else:
cbar_label = "Response"
cbar_label = kwargs.get("cbar_label", cbar_label)
fig.colorbar(cf, ax=ax_, label=cbar_label, orientation="horizontal")
return fig, axes
def scatter_(self, ax, features, X, predictions, vmax, **kwargs):
"""
Plot 2D scatter of ML predictions;
Only plots a random 20000 points
"""
size = min(20000, len(X))
idxs = np.random.choice(len(X), size=size, replace=False)
x_val = X[features[0]].values[idxs]
y_val = X[features[1]].values[idxs]
z_val = predictions[idxs]
# Show highest predictions on top!
index = np.argsort(z_val)
# index = np.random.choice(len(z_val), size=len(z_val), replace=False)
x_val = x_val[index]
y_val = y_val[index]
z_val = z_val[index]
cmap = kwargs.get("cmap", custom_cmap)
zmax = vmax + 0.05 if vmax < 1.0 else 1.1
delta = 0.05 if vmax < 0.5 else 0.1
levels = [0, 0.05] + list(np.arange(0.1, zmax, delta))
norm = mpl.colors.BoundaryNorm(levels, cmap.N)
sca = ax.scatter(
x_val,
y_val,
c=z_val,
cmap=cmap,
alpha=0.6,
s=3,
norm=norm,
)
return sca
def kernal_density_estimate(self, dy, dx):
dy_min = np.amin(dy)
dx_min = np.amin(dx)
dy_max = np.amax(dy)
dx_max = np.amax(dx)
x, y = np.mgrid[dx_min:dx_max:100j, dy_min:dy_max:100j]
positions = np.vstack([x.ravel(), y.ravel()])
values = np.vstack([dx, dy])
kernel = gaussian_kde(values)
f = np.reshape(kernel(positions).T, x.shape)
return x, y, f
def plot_kde_contours(
self,
ax,
dy,
dx,
target,
cmap,
):
x, y, f = self.kernal_density_estimate(dy, dx)
temp_linewidths = [0.85, 1.0, 1.25, 1.75]
temp_thresh = [75.0, 90.0, 95.0, 97.5]
temp_levels = [0.0, 0.0, 0.0, 0.0]
for i in range(0, len(temp_thresh)):
temp_levels[i] = np.percentile(f.ravel(), temp_thresh[i])
# masked_f = np.ma.masked_where(f < 1.6e-5, f)
cs = ax.contour(
x,
y,
f,
levels=temp_levels,
cmap=cmap,
linewidths=temp_linewidths,
alpha=1.0,
)
fmt = {}
for l, s in zip(cs.levels, temp_thresh[::-1]):
fmt[l] = f"{int(s)}%"
ax.clabel(
cs, cs.levels, inline=True, fontsize=self.FONT_SIZES["teensie"], fmt=fmt
)
return cs | 0.665084 | 0.351589 |
import numpy as np
import collections
from ..common.importance_utils import find_correlated_pairs_among_top_features
from ..common.utils import is_list, is_correlated
from .base_plotting import PlotStructure
import random
class PlotImportance(PlotStructure):
"""
PlotImportance handles plotting feature ranking plotting. The class
is designed to be generic enough to handle all possible ranking methods
computed within Scikit-Explain.
"""
SINGLE_VAR_METHODS = [
"backward_multipass",
"backward_singlepass",
"forward_multipass",
"forward_singlepass",
"ale_variance",
"coefs",
"shap_sum",
"gini",
"combined",
"sage",
"grouped",
"grouped_only",
"lime",
"tree_interpreter",
"sobol_total",
"sobol_1st",
"sobol_interact"
]
DISPLAY_NAMES_DICT = {
"backward_multipass": "Backward Multi-Pass",
"backward_singlepass": "Backward Single-Pass",
"forward_multipass": "Forward Multi-Pass",
"forward_singlepass": "Forward Single-Pass",
"perm_based": "Perm.-based Interact.",
"ale_variance": "ALE-Based Import.",
"ale_variance_interactions": "ALE-Based Interac.",
"coefs": "Coef.",
"shap_sum": "SHAP",
"hstat": "H-Stat",
"gini": "Gini",
"combined": "Method-Average Ranking",
"sage": "SAGE Importance Scores",
"grouped": "Grouped Importance",
"grouped_only": "Grouped Only Importance",
"sobol_total" : 'Sobol Total',
"sobol_1st" : 'Sobol 1st Order',
"sobol_interact" : 'Sobol Higher Orders',
}
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE, seaborn_kws=seaborn_kws)
def is_bootstrapped(self, scores):
"""Check if the permutation importance results are bootstrapped"""
return np.ndim(scores) > 1
def _get_axes(self, n_panels, **kwargs):
"""
Determine how many axes are required.
"""
if n_panels == 1:
kwargs["figsize"] = kwargs.get("figsize", (3, 2.5))
elif n_panels == 2 or n_panels == 3:
kwargs["figsize"] = kwargs.get("figsize", (6, 2.5))
else:
figsize = kwargs.get("figsize", (8, 5))
# create subplots, one for each feature
fig, axes = self.create_subplots(n_panels=n_panels, **kwargs)
return fig, axes
def _check_for_estimators(self, data, estimator_names):
"""Check that each estimator is in data"""
for ds in data:
if not (
collections.Counter(ds.attrs["estimators used"])
== collections.Counter(estimator_names)
):
raise AttributeError(
"""
The estimator names given do not match the estimators used to create
given data
"""
)
def plot_variable_importance(
self,
data,
panels,
display_feature_names={},
feature_colors=None,
num_vars_to_plot=10,
estimator_output="raw",
plot_correlated_features=False,
**kwargs,
):
"""Plots any variable importance method for a particular estimator
Parameters
-----------------
data : xarray.Dataset or list of xarray.Dataset
Permutation importance dataset for one or more metrics
panels: list of 2-tuples of estimator names and rank method
E.g., panels = [('singlepass', 'Random Forest',
('multipass', 'Random Forest') ]
will plot the singlepass and multipass results for the
random forest model.
Possible methods include 'multipass', 'singlepass',
'perm_based', 'ale_variance', or 'ale_variance_interactions'
display_feature_names : dict
A dict mapping feature names to readable, "pretty" feature names
feature_colors : dict
A dict mapping features to various colors. Helpful for color coding groups of features
num_vars_to_plot : int
Number of top variables to plot (defalut is None and will use number of multipass results)
kwargs:
- xlabels
- ylabel
- xticks
- p_values
- colinear_features
- rho_threshold
"""
xlabels = kwargs.get("xlabels", None)
ylabels = kwargs.get("ylabels", None)
xticks = kwargs.get("xticks", None)
title = kwargs.get("title", "")
p_values = kwargs.get("p_values", None)
colinear_features = kwargs.get("colinear_features", None)
rho_threshold = kwargs.get("rho_threshold", 0.8)
plot_reference_score = kwargs.get("plot_reference_score", True)
plot_error = kwargs.get('plot_error', True)
only_one_method = all([m[0] == panels[0][0] for m in panels])
only_one_estimator = all([m[1] == panels[0][1] for m in panels])
if not only_one_method:
kwargs["hspace"] = kwargs.get("hspace", 0.6)
if plot_correlated_features:
X = kwargs.get("X", None)
if X is None or X.empty:
raise ValueError(
"Must provide X to InterpretToolkit to compute the correlations!"
)
corr_matrix = X.corr().abs()
data = [data] if not is_list(data) else data
n_panels = len(panels)
fig, axes = self._get_axes(n_panels, **kwargs)
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, (panel, ax) in enumerate(zip(panels, ax_iterator)):
# Set the facecolor.
#ax.set_facecolor(kwargs.get("facecolor", (0.95, 0.95, 0.95)))
method, estimator_name = panel
results = data[i]
if xlabels is not None:
ax.set_xlabel(xlabels[i], fontsize=self.FONT_SIZES["small"])
else:
if not only_one_method:
ax.set_xlabel(
self.DISPLAY_NAMES_DICT.get(method, method),
fontsize=self.FONT_SIZES["small"],
)
if not only_one_estimator:
ax.set_title(estimator_name)
sorted_var_names = list(
results[f"{method}_rankings__{estimator_name}"].values
)
if num_vars_to_plot is None:
num_vars_to_plot == len(sorted_var_names)
sorted_var_names = sorted_var_names[
: min(num_vars_to_plot, len(sorted_var_names))
]
sorted_var_names = sorted_var_names[::-1]
scores = results[f"{method}_scores__{estimator_name}"].values
scores = scores[: min(num_vars_to_plot, len(sorted_var_names))]
# Reverse the order.
scores = scores[::-1]
# Set very small values to zero.
scores = np.where(np.absolute(np.round(scores, 17)) < 1e-15, 0, scores)
# Get the colors for the plot
colors_to_plot = [
self.variable_to_color(var, feature_colors) for var in sorted_var_names
]
# Get the predictor names
variable_names_to_plot = [
f" {var}"
for var in self.convert_vars_to_readable(
sorted_var_names,
display_feature_names,
)
]
if method == "combined":
scores_to_plot = np.nanpercentile(scores, 50, axis=1)
# Compute the confidence intervals (ci)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [25, 75], axis=1)
)
else:
scores_to_plot = np.nanmean(scores, axis=1)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [2.5, 97.5], axis=1)
)
if plot_reference_score:
if 'forward' in method:
ax.axvline(results[f'all_permuted_score__{estimator_name}'].mean(),color='k',ls=':')
elif 'backward' in method:
ax.axvline(results[f'original_score__{estimator_name}'].mean(),color='k',ls='--')
# Despine
self.despine_plt(ax)
elinewidth = 0.9 if n_panels <= 3 else 0.5
if plot_error:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
xerr=ci,
capsize=3.0,
ecolor="k",
error_kw=dict(
alpha=0.2,
elinewidth=elinewidth,
),
zorder=2,
)
else:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
zorder=2,
)
if plot_correlated_features:
self._add_correlated_brackets(
ax, np.arange(len(scores_to_plot)),
scores_to_plot,
corr_matrix, sorted_var_names, rho_threshold
)
if num_vars_to_plot >= 20:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 3)
elif num_vars_to_plot > 10:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 2)
else:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 1)
# Put the variable names _into_ the plot
if method not in self.SINGLE_VAR_METHODS and plot_correlated_features:
pass
# This is code is not flexible at the moment.
#results_dict = is_correlated(
# corr_matrix, sorted_var_names, rho_threshold=rho_threshold
#)
if colinear_features is None:
fontweight = ["light"] * len(variable_names_to_plot)
colors = ["k"] * len(variable_names_to_plot)
else:
# Bold text if the VIF > threshold (indicates a multicolinear predictor)
fontweight = [
"bold" if v in colinear_features else "light" for v in sorted_var_names
]
# Bold text if value is insignificant.
colors = ["xkcd:medium blue" if v in colinear_features else "k" for v in sorted_var_names]
ax.set_yticks(range(len(variable_names_to_plot)))
ax.set_yticklabels(variable_names_to_plot)
labels = ax.get_yticklabels()
# Bold var names
##[label.set_fontweight(opt) for opt, label in zip(fontweight, labels)]
[label.set_color(c) for c, label in zip(colors, labels)]
ax.tick_params(axis="both", which="both", length=0)
if xticks is not None:
ax.set_xticks(xticks)
else:
self.set_n_ticks(ax, option="x")
xlabel = (
self.DISPLAY_NAMES_DICT.get(method, method)
if (only_one_method and xlabels is None)
else ""
)
major_ax = self.set_major_axis_labels(
fig,
xlabel=xlabel,
ylabel_left="",
ylabel_right="",
title=title,
fontsize=self.FONT_SIZES["small"],
**kwargs,
)
if ylabels is not None:
self.set_row_labels(
labels=ylabels, axes=axes, pos=-1, pad=1.15, rotation=270, **kwargs
)
self.add_alphabet_label(
n_panels, axes, pos=kwargs.get("alphabet_pos", (0.9, 0.09)),
alphabet_fontsize = kwargs.get("alphabet_fontsize", 10)
)
# Necessary to make sure that the tick labels for the feature names
# do overlap another ax.
fig.tight_layout()
return fig, axes
def _add_correlated_brackets(self, ax, y, width, corr_matrix, top_features, rho_threshold):
"""
Add bracket connecting features above a given correlation threshold.
Parameters
------------------
ax : matplotlib.ax.Axes object
y :
width :
corr_matrix:
top_features:
rho_threshold:
"""
get_colors = lambda n: list(
map(lambda i: "#" + "%06x" % random.randint(0, 0xFFFFFF), range(n))
)
_, pair_indices = find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=rho_threshold,
)
colors = get_colors(len(pair_indices))
top_indices, bottom_indices = [], []
for p, color in zip(pair_indices, colors):
delta=0
if p[0] > p[1]:
bottom_idx = p[1]
top_idx = p[0]
else:
bottom_idx = p[0]
top_idx = p[1]
# If a feature has already shown up in a correlated pair,
# then we want to shift the brackets slightly for ease of
# interpretation.
if bottom_idx in bottom_indices or bottom_idx in top_indices:
delta += 0.1
if top_idx in top_indices or top_idx in bottom_indices:
delta += 0.1
top_indices.append(top_idx)
bottom_indices.append(bottom_idx)
self.annotate_bars(ax, bottom_idx, top_idx, y=y, width=width, delta=delta)
# You can fill this in by using a dictionary with {var_name: legible_name}
def convert_vars_to_readable(self, variables_list, VARIABLE_NAMES_DICT):
"""Substitutes out variable names for human-readable ones
:param variables_list: a list of variable names
:returns: a copy of the list with human-readable names
"""
human_readable_list = list()
for var in variables_list:
if var in VARIABLE_NAMES_DICT:
human_readable_list.append(VARIABLE_NAMES_DICT[var])
else:
human_readable_list.append(var)
return human_readable_list
# This could easily be expanded with a dictionary
def variable_to_color(self, var, VARIABLES_COLOR_DICT):
"""
Returns the color for each variable.
"""
if var == "No Permutations":
return "xkcd:pastel red"
else:
if VARIABLES_COLOR_DICT is None:
return "xkcd:powder blue"
elif not isinstance(VARIABLES_COLOR_DICT, dict) and isinstance(
VARIABLES_COLOR_DICT, str
):
return VARIABLES_COLOR_DICT
else:
return VARIABLES_COLOR_DICT[var] | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/plot_permutation_importance.py | plot_permutation_importance.py | import numpy as np
import collections
from ..common.importance_utils import find_correlated_pairs_among_top_features
from ..common.utils import is_list, is_correlated
from .base_plotting import PlotStructure
import random
class PlotImportance(PlotStructure):
"""
PlotImportance handles plotting feature ranking plotting. The class
is designed to be generic enough to handle all possible ranking methods
computed within Scikit-Explain.
"""
SINGLE_VAR_METHODS = [
"backward_multipass",
"backward_singlepass",
"forward_multipass",
"forward_singlepass",
"ale_variance",
"coefs",
"shap_sum",
"gini",
"combined",
"sage",
"grouped",
"grouped_only",
"lime",
"tree_interpreter",
"sobol_total",
"sobol_1st",
"sobol_interact"
]
DISPLAY_NAMES_DICT = {
"backward_multipass": "Backward Multi-Pass",
"backward_singlepass": "Backward Single-Pass",
"forward_multipass": "Forward Multi-Pass",
"forward_singlepass": "Forward Single-Pass",
"perm_based": "Perm.-based Interact.",
"ale_variance": "ALE-Based Import.",
"ale_variance_interactions": "ALE-Based Interac.",
"coefs": "Coef.",
"shap_sum": "SHAP",
"hstat": "H-Stat",
"gini": "Gini",
"combined": "Method-Average Ranking",
"sage": "SAGE Importance Scores",
"grouped": "Grouped Importance",
"grouped_only": "Grouped Only Importance",
"sobol_total" : 'Sobol Total',
"sobol_1st" : 'Sobol 1st Order',
"sobol_interact" : 'Sobol Higher Orders',
}
def __init__(self, BASE_FONT_SIZE=12, seaborn_kws=None):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE, seaborn_kws=seaborn_kws)
def is_bootstrapped(self, scores):
"""Check if the permutation importance results are bootstrapped"""
return np.ndim(scores) > 1
def _get_axes(self, n_panels, **kwargs):
"""
Determine how many axes are required.
"""
if n_panels == 1:
kwargs["figsize"] = kwargs.get("figsize", (3, 2.5))
elif n_panels == 2 or n_panels == 3:
kwargs["figsize"] = kwargs.get("figsize", (6, 2.5))
else:
figsize = kwargs.get("figsize", (8, 5))
# create subplots, one for each feature
fig, axes = self.create_subplots(n_panels=n_panels, **kwargs)
return fig, axes
def _check_for_estimators(self, data, estimator_names):
"""Check that each estimator is in data"""
for ds in data:
if not (
collections.Counter(ds.attrs["estimators used"])
== collections.Counter(estimator_names)
):
raise AttributeError(
"""
The estimator names given do not match the estimators used to create
given data
"""
)
def plot_variable_importance(
self,
data,
panels,
display_feature_names={},
feature_colors=None,
num_vars_to_plot=10,
estimator_output="raw",
plot_correlated_features=False,
**kwargs,
):
"""Plots any variable importance method for a particular estimator
Parameters
-----------------
data : xarray.Dataset or list of xarray.Dataset
Permutation importance dataset for one or more metrics
panels: list of 2-tuples of estimator names and rank method
E.g., panels = [('singlepass', 'Random Forest',
('multipass', 'Random Forest') ]
will plot the singlepass and multipass results for the
random forest model.
Possible methods include 'multipass', 'singlepass',
'perm_based', 'ale_variance', or 'ale_variance_interactions'
display_feature_names : dict
A dict mapping feature names to readable, "pretty" feature names
feature_colors : dict
A dict mapping features to various colors. Helpful for color coding groups of features
num_vars_to_plot : int
Number of top variables to plot (defalut is None and will use number of multipass results)
kwargs:
- xlabels
- ylabel
- xticks
- p_values
- colinear_features
- rho_threshold
"""
xlabels = kwargs.get("xlabels", None)
ylabels = kwargs.get("ylabels", None)
xticks = kwargs.get("xticks", None)
title = kwargs.get("title", "")
p_values = kwargs.get("p_values", None)
colinear_features = kwargs.get("colinear_features", None)
rho_threshold = kwargs.get("rho_threshold", 0.8)
plot_reference_score = kwargs.get("plot_reference_score", True)
plot_error = kwargs.get('plot_error', True)
only_one_method = all([m[0] == panels[0][0] for m in panels])
only_one_estimator = all([m[1] == panels[0][1] for m in panels])
if not only_one_method:
kwargs["hspace"] = kwargs.get("hspace", 0.6)
if plot_correlated_features:
X = kwargs.get("X", None)
if X is None or X.empty:
raise ValueError(
"Must provide X to InterpretToolkit to compute the correlations!"
)
corr_matrix = X.corr().abs()
data = [data] if not is_list(data) else data
n_panels = len(panels)
fig, axes = self._get_axes(n_panels, **kwargs)
ax_iterator = self.axes_to_iterator(n_panels, axes)
for i, (panel, ax) in enumerate(zip(panels, ax_iterator)):
# Set the facecolor.
#ax.set_facecolor(kwargs.get("facecolor", (0.95, 0.95, 0.95)))
method, estimator_name = panel
results = data[i]
if xlabels is not None:
ax.set_xlabel(xlabels[i], fontsize=self.FONT_SIZES["small"])
else:
if not only_one_method:
ax.set_xlabel(
self.DISPLAY_NAMES_DICT.get(method, method),
fontsize=self.FONT_SIZES["small"],
)
if not only_one_estimator:
ax.set_title(estimator_name)
sorted_var_names = list(
results[f"{method}_rankings__{estimator_name}"].values
)
if num_vars_to_plot is None:
num_vars_to_plot == len(sorted_var_names)
sorted_var_names = sorted_var_names[
: min(num_vars_to_plot, len(sorted_var_names))
]
sorted_var_names = sorted_var_names[::-1]
scores = results[f"{method}_scores__{estimator_name}"].values
scores = scores[: min(num_vars_to_plot, len(sorted_var_names))]
# Reverse the order.
scores = scores[::-1]
# Set very small values to zero.
scores = np.where(np.absolute(np.round(scores, 17)) < 1e-15, 0, scores)
# Get the colors for the plot
colors_to_plot = [
self.variable_to_color(var, feature_colors) for var in sorted_var_names
]
# Get the predictor names
variable_names_to_plot = [
f" {var}"
for var in self.convert_vars_to_readable(
sorted_var_names,
display_feature_names,
)
]
if method == "combined":
scores_to_plot = np.nanpercentile(scores, 50, axis=1)
# Compute the confidence intervals (ci)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [25, 75], axis=1)
)
else:
scores_to_plot = np.nanmean(scores, axis=1)
ci = np.abs(
np.nanpercentile(scores, 50, axis=1)
- np.nanpercentile(scores, [2.5, 97.5], axis=1)
)
if plot_reference_score:
if 'forward' in method:
ax.axvline(results[f'all_permuted_score__{estimator_name}'].mean(),color='k',ls=':')
elif 'backward' in method:
ax.axvline(results[f'original_score__{estimator_name}'].mean(),color='k',ls='--')
# Despine
self.despine_plt(ax)
elinewidth = 0.9 if n_panels <= 3 else 0.5
if plot_error:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
xerr=ci,
capsize=3.0,
ecolor="k",
error_kw=dict(
alpha=0.2,
elinewidth=elinewidth,
),
zorder=2,
)
else:
ax.barh(
np.arange(len(scores_to_plot)),
scores_to_plot,
linewidth=1.75,
edgecolor="white",
alpha=0.5,
color=colors_to_plot,
zorder=2,
)
if plot_correlated_features:
self._add_correlated_brackets(
ax, np.arange(len(scores_to_plot)),
scores_to_plot,
corr_matrix, sorted_var_names, rho_threshold
)
if num_vars_to_plot >= 20:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 3)
elif num_vars_to_plot > 10:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 2)
else:
size = kwargs.get("fontsize", self.FONT_SIZES["teensie"] - 1)
# Put the variable names _into_ the plot
if method not in self.SINGLE_VAR_METHODS and plot_correlated_features:
pass
# This is code is not flexible at the moment.
#results_dict = is_correlated(
# corr_matrix, sorted_var_names, rho_threshold=rho_threshold
#)
if colinear_features is None:
fontweight = ["light"] * len(variable_names_to_plot)
colors = ["k"] * len(variable_names_to_plot)
else:
# Bold text if the VIF > threshold (indicates a multicolinear predictor)
fontweight = [
"bold" if v in colinear_features else "light" for v in sorted_var_names
]
# Bold text if value is insignificant.
colors = ["xkcd:medium blue" if v in colinear_features else "k" for v in sorted_var_names]
ax.set_yticks(range(len(variable_names_to_plot)))
ax.set_yticklabels(variable_names_to_plot)
labels = ax.get_yticklabels()
# Bold var names
##[label.set_fontweight(opt) for opt, label in zip(fontweight, labels)]
[label.set_color(c) for c, label in zip(colors, labels)]
ax.tick_params(axis="both", which="both", length=0)
if xticks is not None:
ax.set_xticks(xticks)
else:
self.set_n_ticks(ax, option="x")
xlabel = (
self.DISPLAY_NAMES_DICT.get(method, method)
if (only_one_method and xlabels is None)
else ""
)
major_ax = self.set_major_axis_labels(
fig,
xlabel=xlabel,
ylabel_left="",
ylabel_right="",
title=title,
fontsize=self.FONT_SIZES["small"],
**kwargs,
)
if ylabels is not None:
self.set_row_labels(
labels=ylabels, axes=axes, pos=-1, pad=1.15, rotation=270, **kwargs
)
self.add_alphabet_label(
n_panels, axes, pos=kwargs.get("alphabet_pos", (0.9, 0.09)),
alphabet_fontsize = kwargs.get("alphabet_fontsize", 10)
)
# Necessary to make sure that the tick labels for the feature names
# do overlap another ax.
fig.tight_layout()
return fig, axes
def _add_correlated_brackets(self, ax, y, width, corr_matrix, top_features, rho_threshold):
"""
Add bracket connecting features above a given correlation threshold.
Parameters
------------------
ax : matplotlib.ax.Axes object
y :
width :
corr_matrix:
top_features:
rho_threshold:
"""
get_colors = lambda n: list(
map(lambda i: "#" + "%06x" % random.randint(0, 0xFFFFFF), range(n))
)
_, pair_indices = find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=rho_threshold,
)
colors = get_colors(len(pair_indices))
top_indices, bottom_indices = [], []
for p, color in zip(pair_indices, colors):
delta=0
if p[0] > p[1]:
bottom_idx = p[1]
top_idx = p[0]
else:
bottom_idx = p[0]
top_idx = p[1]
# If a feature has already shown up in a correlated pair,
# then we want to shift the brackets slightly for ease of
# interpretation.
if bottom_idx in bottom_indices or bottom_idx in top_indices:
delta += 0.1
if top_idx in top_indices or top_idx in bottom_indices:
delta += 0.1
top_indices.append(top_idx)
bottom_indices.append(bottom_idx)
self.annotate_bars(ax, bottom_idx, top_idx, y=y, width=width, delta=delta)
# You can fill this in by using a dictionary with {var_name: legible_name}
def convert_vars_to_readable(self, variables_list, VARIABLE_NAMES_DICT):
"""Substitutes out variable names for human-readable ones
:param variables_list: a list of variable names
:returns: a copy of the list with human-readable names
"""
human_readable_list = list()
for var in variables_list:
if var in VARIABLE_NAMES_DICT:
human_readable_list.append(VARIABLE_NAMES_DICT[var])
else:
human_readable_list.append(var)
return human_readable_list
# This could easily be expanded with a dictionary
def variable_to_color(self, var, VARIABLES_COLOR_DICT):
"""
Returns the color for each variable.
"""
if var == "No Permutations":
return "xkcd:pastel red"
else:
if VARIABLES_COLOR_DICT is None:
return "xkcd:powder blue"
elif not isinstance(VARIABLES_COLOR_DICT, dict) and isinstance(
VARIABLES_COLOR_DICT, str
):
return VARIABLES_COLOR_DICT
else:
return VARIABLES_COLOR_DICT[var] | 0.730578 | 0.371479 |
import matplotlib.pyplot as plt
import seaborn as sns
def rounding(v):
"""Rounding for pretty plots"""
if v > 100:
return int(round(v))
elif v > 0 and v < 100:
return round(v, 1)
elif v >= 0.1 and v < 1:
return round(v, 1)
elif v >= 0 and v < 0.1:
return round(v, 3)
def box_and_whisker(
X_train, top_preds, example, display_feature_names={}, display_units={}, **kwargs
):
"""Create interpretability graphic"""
color = kwargs.get("bar_color", "lightblue")
f, axes = plt.subplots(dpi=300, nrows=len(top_preds), figsize=(4, 5))
sns.despine(
fig=f,
ax=axes,
top=True,
right=True,
left=True,
bottom=False,
offset=None,
trim=False,
)
box_plots = []
for ax, v in zip(axes, top_preds):
box_plot = ax.boxplot(
x=X_train[v], vert=False, whis=[0, 100], patch_artist=True, widths=0.35
)
box_plots.append(box_plot)
ax.annotate(
display_feature_names.get(v, v) + " (" + display_units.get(v, v) + ")",
xy=(0.9, 1.15),
xycoords="axes fraction",
)
ax.annotate(
rounding(example[v]),
xy=(0.9, 0.7),
xycoords="axes fraction",
fontsize=6,
color="red",
)
# plot vertical lines
ax.axvline(x=example[v], color="red", zorder=5)
# Remove y tick labels
ax.set_yticks(
[],
)
# fill with colors
for bplot in box_plots:
for patch in bplot["boxes"]:
patch.set_facecolor(color)
for line in bplot["means"]:
line.set_color(color)
plt.subplots_adjust(wspace=5.75)
f.suptitle("Training Set Distribution for Top Predictors")
axes[0].set_title(
"Vertical red bars show current values for this object",
fontsize=8,
pad=25,
color="red",
)
f.tight_layout()
return f, axes | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/plot/_box_and_whisker.py | _box_and_whisker.py | import matplotlib.pyplot as plt
import seaborn as sns
def rounding(v):
"""Rounding for pretty plots"""
if v > 100:
return int(round(v))
elif v > 0 and v < 100:
return round(v, 1)
elif v >= 0.1 and v < 1:
return round(v, 1)
elif v >= 0 and v < 0.1:
return round(v, 3)
def box_and_whisker(
X_train, top_preds, example, display_feature_names={}, display_units={}, **kwargs
):
"""Create interpretability graphic"""
color = kwargs.get("bar_color", "lightblue")
f, axes = plt.subplots(dpi=300, nrows=len(top_preds), figsize=(4, 5))
sns.despine(
fig=f,
ax=axes,
top=True,
right=True,
left=True,
bottom=False,
offset=None,
trim=False,
)
box_plots = []
for ax, v in zip(axes, top_preds):
box_plot = ax.boxplot(
x=X_train[v], vert=False, whis=[0, 100], patch_artist=True, widths=0.35
)
box_plots.append(box_plot)
ax.annotate(
display_feature_names.get(v, v) + " (" + display_units.get(v, v) + ")",
xy=(0.9, 1.15),
xycoords="axes fraction",
)
ax.annotate(
rounding(example[v]),
xy=(0.9, 0.7),
xycoords="axes fraction",
fontsize=6,
color="red",
)
# plot vertical lines
ax.axvline(x=example[v], color="red", zorder=5)
# Remove y tick labels
ax.set_yticks(
[],
)
# fill with colors
for bplot in box_plots:
for patch in bplot["boxes"]:
patch.set_facecolor(color)
for line in bplot["means"]:
line.set_color(color)
plt.subplots_adjust(wspace=5.75)
f.suptitle("Training Set Distribution for Top Predictors")
axes[0].set_title(
"Vertical red bars show current values for this object",
fontsize=8,
pad=25,
color="red",
)
f.tight_layout()
return f, axes | 0.754192 | 0.526404 |
from functools import partial
from sklearn.metrics._base import _average_binary_score
from sklearn.utils.multiclass import type_of_target
from sklearn.metrics import (
brier_score_loss,
average_precision_score,
precision_recall_curve,
)
import numpy as np
def brier_skill_score(y_values, forecast_probabilities):
"""Computes the brier skill score"""
climo = np.mean((y_values - np.mean(y_values)) ** 2)
return 1.0 - brier_score_loss(y_values, forecast_probabilities) / climo
def modified_precision(precision, known_skew, new_skew):
"""
Modify the success ratio according to equation (3) from
Lampert and Gancarski (2014).
"""
precision[precision < 1e-5] = 1e-5
term1 = new_skew / (1.0 - new_skew)
term2 = (1 / precision) - 1.0
denom = known_skew + ((1 - known_skew) * term1 * term2)
return known_skew / denom
def calc_sr_min(skew):
pod = np.linspace(0, 1, 100)
sr_min = (skew * pod) / (1 - skew + (skew * pod))
return sr_min
def _binary_uninterpolated_average_precision(
y_true, y_score, known_skew, new_skew, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
if known_skew is not None:
precision = modified_precision(precision, known_skew, new_skew)
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
def min_aupdc(
y_true, pos_label, average, sample_weight=None, known_skew=None, new_skew=None
):
"""
Compute the minimum possible area under the performance
diagram curve. Essentially, a vote of NO for all predictions.
"""
min_score = np.zeros((len(y_true)))
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap_min = _average_binary_score(
average_precision, y_true, min_score, average, sample_weight=sample_weight
)
return ap_min
def norm_aupdc(
y_true,
y_score,
known_skew=None,
*,
average="macro",
pos_label=1,
sample_weight=None,
min_method="random",
):
"""
Compute the normalized modified average precision. Normalization removes
the no-skill region either based on skew or random classifier performance.
Modification alters success ratio to be consistent with a known skew.
Parameters:
-------------------
y_true, array of (n_samples,)
Binary, truth labels (0,1)
y_score, array of (n_samples,)
Model predictions (either determinstic or probabilistic)
known_skew, float between 0 and 1
Known or reference skew (# of 1 / n_samples) for
computing the modified success ratio.
min_method, 'skew' or 'random'
If 'skew', then the normalization is based on the minimum AUPDC
formula presented in Boyd et al. (2012).
If 'random', then the normalization is based on the
minimum AUPDC for a random classifier, which is equal
to the known skew.
Boyd, 2012: Unachievable Region in Precision-Recall Space and Its Effect on Empirical Evaluation, ArXiv
"""
new_skew = np.mean(y_true)
if known_skew is None:
known_skew = new_skew
y_type = type_of_target(y_true)
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1."
)
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap = _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
if min_method == "random":
ap_min = known_skew
elif min_method == "skew":
ap_min = min_aupdc(
y_true,
pos_label,
average,
sample_weight=sample_weight,
known_skew=known_skew,
new_skew=new_skew,
)
naupdc = (ap - ap_min) / (1.0 - ap_min)
return naupdc | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/metrics.py | metrics.py | from functools import partial
from sklearn.metrics._base import _average_binary_score
from sklearn.utils.multiclass import type_of_target
from sklearn.metrics import (
brier_score_loss,
average_precision_score,
precision_recall_curve,
)
import numpy as np
def brier_skill_score(y_values, forecast_probabilities):
"""Computes the brier skill score"""
climo = np.mean((y_values - np.mean(y_values)) ** 2)
return 1.0 - brier_score_loss(y_values, forecast_probabilities) / climo
def modified_precision(precision, known_skew, new_skew):
"""
Modify the success ratio according to equation (3) from
Lampert and Gancarski (2014).
"""
precision[precision < 1e-5] = 1e-5
term1 = new_skew / (1.0 - new_skew)
term2 = (1 / precision) - 1.0
denom = known_skew + ((1 - known_skew) * term1 * term2)
return known_skew / denom
def calc_sr_min(skew):
pod = np.linspace(0, 1, 100)
sr_min = (skew * pod) / (1 - skew + (skew * pod))
return sr_min
def _binary_uninterpolated_average_precision(
y_true, y_score, known_skew, new_skew, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
if known_skew is not None:
precision = modified_precision(precision, known_skew, new_skew)
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
def min_aupdc(
y_true, pos_label, average, sample_weight=None, known_skew=None, new_skew=None
):
"""
Compute the minimum possible area under the performance
diagram curve. Essentially, a vote of NO for all predictions.
"""
min_score = np.zeros((len(y_true)))
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap_min = _average_binary_score(
average_precision, y_true, min_score, average, sample_weight=sample_weight
)
return ap_min
def norm_aupdc(
y_true,
y_score,
known_skew=None,
*,
average="macro",
pos_label=1,
sample_weight=None,
min_method="random",
):
"""
Compute the normalized modified average precision. Normalization removes
the no-skill region either based on skew or random classifier performance.
Modification alters success ratio to be consistent with a known skew.
Parameters:
-------------------
y_true, array of (n_samples,)
Binary, truth labels (0,1)
y_score, array of (n_samples,)
Model predictions (either determinstic or probabilistic)
known_skew, float between 0 and 1
Known or reference skew (# of 1 / n_samples) for
computing the modified success ratio.
min_method, 'skew' or 'random'
If 'skew', then the normalization is based on the minimum AUPDC
formula presented in Boyd et al. (2012).
If 'random', then the normalization is based on the
minimum AUPDC for a random classifier, which is equal
to the known skew.
Boyd, 2012: Unachievable Region in Precision-Recall Space and Its Effect on Empirical Evaluation, ArXiv
"""
new_skew = np.mean(y_true)
if known_skew is None:
known_skew = new_skew
y_type = type_of_target(y_true)
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1."
)
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(
_binary_uninterpolated_average_precision,
known_skew=known_skew,
new_skew=new_skew,
pos_label=pos_label,
)
ap = _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
if min_method == "random":
ap_min = known_skew
elif min_method == "skew":
ap_min = min_aupdc(
y_true,
pos_label,
average,
sample_weight=sample_weight,
known_skew=known_skew,
new_skew=new_skew,
)
naupdc = (ap - ap_min) / (1.0 - ap_min)
return naupdc | 0.920567 | 0.582996 |
import xarray as xr
import numpy as np
from skexplain.common.utils import compute_bootstrap_indices
import pandas as pd
def method_average_ranking(data, features, methods, estimator_names, n_features=12):
"""
Compute the median ranking across the results of different ranking methods.
Also, include the 25-75th percentile ranking uncertainty.
Parameters
------------
data : list of xarray.Dataset
The set of predictor ranking results to average over.
methods : list of string
The ranking methods to use from the data (see plot_importance for examples)
estimator_names : string or list of strings
Name of the estimator(s).
Returns
--------
rankings_dict_avg : dict
feature : median ranking pairs
rankings_sorted : np.array
Sorted median rankings (lower values indicates higher rank)
feature_sorted : np.array
The features corresponding the ranks in ``rankings_sorted``
xerr
"""
rankings_dict = {f: [] for f in features}
for d, method in zip(data, methods):
for estimator_name in estimator_names:
features = d[f"{method}_rankings__{estimator_name}"].values[:n_features]
rankings = {f: i for i, f in enumerate(features)}
for f in features:
try:
rankings_dict[f].append(rankings[f])
except:
rankings_dict[f].append(np.nan)
max_len = np.max([len(rankings_dict[k]) for k in rankings_dict.keys()])
for k in rankings_dict.keys():
l = rankings_dict[k]
if len(l) < max_len:
delta = max_len - len(l)
rankings_dict[k] = l + [np.nan] * delta
rankings_dict_avg = {
f: np.nanpercentile(rankings_dict[f], 50) for f in rankings_dict.keys()
}
features = np.array(list(rankings_dict_avg.keys()))
rankings = np.array([rankings_dict_avg[f] for f in features])
idxs = np.argsort(rankings)
rankings_sorted = rankings[idxs]
features_ranked = features[idxs]
scores = np.array([rankings_dict[f] for f in features_ranked])
data = {}
data[f"combined_rankings__{estimator_name}"] = (
[f"n_vars_avg"],
features_ranked,
)
data[f"combined_scores__{estimator_name}"] = (
[f"n_vars_avg", "n_bootstrap"],
scores,
)
data = xr.Dataset(data)
return data
def non_increasing(L):
# Check for decreasing scores.
return all(x >= y for x, y in zip(L, L[1:]))
def compute_importance(results, scoring_strategy, direction):
"""
Compute the importance scores from the permutation importance results.
The importance score varies depending on the orientation of the
loss metric and whether it is multipass or singlepass.
Parameters
--------------
results : InterpretToolkit.permutation_importance results
xr.Dataset
scoring_strategy : 'minimize' or 'maximize'
Whether the strategy for assessing importance was
based on minimizing or maximing the performance metric
after permuting features (e.g., the goal is to 'maximize'
loss metrics like MSE, but 'minimize' rank metrics like AUC)
direction : 'forward' or 'backward'
Whether the permutation method was 'forward' or 'backward'.
Returns
--------------
results : xarray.Dataset
scores for each estimator and multi/singlepass are
converted to proper importance scores.
"""
if scoring_strategy == 'argmin_of_mean':
scoring_strategy = 'minimize'
elif scoring_strategy == 'argmax_of_mean':
scoring_strategy = 'maximize'
print(direction, scoring_strategy)
estimators = results.attrs["estimators used"]
for estimator in estimators:
orig_score = results[f"original_score__{estimator}"].values
if direction == 'forward':
all_permuted_score = results[f"all_permuted_score__{estimator}"].values
for mode in ["singlepass", "multipass"]:
permute_scores = results[f"{mode}_scores__{estimator}"].values
if direction == 'forward':
# For a loss metric, forward importance is generically defined
# as the error(X_J') - error(X_j') where J is the set of
# all features while j is some subset of J or a single feature.
if scoring_strategy == 'maximize':
# E.g., AUC, NAUPDC, CSI, BSS
imp = permute_scores - all_permuted_score
elif scoring_strategy == 'minimize':
# For a rank-based metric, importance is defined opposite
# of the loss metric [ error(X_j') - error(X_J') ]
# E.g., MSE, BS, etc.
print('This happened for forward!')
imp = all_permuted_score - permute_scores
elif direction == 'backward':
# For a loss metric, backward importance is generically defined
# as the error(X_j') - error(X_j) where j is some subset or
# a single feature.
if scoring_strategy == 'minimize':
imp = orig_score - permute_scores
elif scoring_strategy == 'maximize':
# For a rank-based metric, it is defined opposite of that above.
# i.e., error(X_j) - error(X_j')
print('this happened for backward!')
imp = permute_scores - orig_score
"""
decreasing = non_increasing(np.mean(permute_scores, axis=1))
if decreasing:
if orientation == "negative":
# Singlepass MSE
imp = permute_scores - orig_score
else:
# Backward Multipass on AUC/AUPDC (permuted_score - (1-orig_score)).
# Most positively-oriented metrics top off at 1.
top = np.max(permute_scores)
imp = permute_scores - (top - orig_score)
else:
if orientation == "negative":
# Forward Multipass MSE
top = np.max(permute_scores)
imp = (top + orig_score) - permute_scores
else:
# Singlepass AUC/NAUPDC
imp = orig_score - permute_scores
"""
# Normalize the importance score so that range is [0,1]
imp = imp / (np.percentile(imp, 99) - np.percentile(imp, 1))
results[f"{mode}_scores__{estimator}"] = (
[f"n_vars_{mode}", "n_bootstrap"],
imp,
)
return results
def to_skexplain_importance(
importances, estimator_name,
feature_names,
method,
normalize=False,
bootstrap_axis=0,
):
"""
Convert feature ranking-based scores from non-permutation-importance methods
computed by scikit-explain or other methods into a skexplain-style
datset to leverage the built-in plotting code. This method handles the
ranking and sorting of the importance values by assuming higher values
equal higher importance.
Caution: This method assumes that higher values equal higher importance!
Parameters
---------------
importances : 1d or 2d array-like
The feature importance scores. The code assumes that 2D arrays are the result
of bootstrapping. Users can declare the bootstrapping axis with `bootstrap_axis`.
By default, the first axis (=0) is the bootstrap axis for skexplain methods
bootstrap_axis : int (default=0)
estimator_name : str
The estimator name. Used for plotting and creating the dataset.
feature_names : array-like of shape (n_features)
The feature names. Used for plotting and creating the dataset.
method : 'sage', 'coefs', 'shap_std', 'shap_sum', 'tree_interpreter', 'lime' or str
The name of the feature ranking method. The named method perform specific
operations. For example, local methods like 'shap_sum', 'tree_interpreter',
'lime' will sum the importance values to determine feature ranking.
normalize : True/False (default=False)
If True, normalize the feature importance values using min-max scaling.
This is useful when comparing importance across different methods.
"""
bootstrap = False
if method == "sage":
importances_std = importances.std
importances = importances.values
elif method == "coefs":
importances = np.absolute(importances)
elif method == "shap_std":
# Compute the std(SHAP)
importances = np.nanstd(importances, axis=0)
elif method == "shap_sum" or method == "tree_interpreter" or method == 'lime':
# Compute sum of abs values
importances = np.nansum(np.absolute(importances), axis=0)
else:
if np.ndim(importances) == 2:
# average over bootstrapping
bootstrap = True
importances_to_save = importances.copy()
importances = np.nanmean(importances, axis=bootstrap_axis)
# Sort from higher score to lower score
ranked_indices = np.argsort(importances)[::-1]
if bootstrap:
scores_ranked = importances_to_save[ranked_indices, :]
else:
scores_ranked = importances[ranked_indices]
if method == "sage":
std_ranked = importances_std[ranked_indices]
features_ranked = np.array(feature_names)[ranked_indices]
data = {}
data[f"{method}_rankings__{estimator_name}"] = (
[f"n_vars_{method}"],
features_ranked,
)
if not bootstrap:
scores_ranked = scores_ranked.reshape(len(scores_ranked), 1)
importances = importances.reshape(len(importances), 1)
if normalize:
# Normalize the importance score so that range is [0,1]
scores_ranked = scores_ranked / (
np.percentile(scores_ranked, 99) - np.percentile(scores_ranked, 1)
)
data[f"{method}_scores__{estimator_name}"] = (
[f"n_vars_{method}", "n_bootstrap"],
scores_ranked,
)
if method == "sage":
data[f"sage_scores_std__{estimator_name}"] = (
[f"n_vars_sage"],
std_ranked,
)
data = xr.Dataset(data)
data.attrs["estimators used"] = estimator_name
data.attrs["estimator output"] = "probability"
return data
def combine_top_features(results_dict, n_vars=None):
"""Combines the list of top features from different estimators
into a single list where duplicates are removed.
Args:
-------------
results_dict : dict
n_vars : integer
"""
if n_vars is None:
n_vars = 1000
combined_features = []
for estimator_name in results_dict.keys():
features = results_dict[estimator_name]
combined_features.append(features)
unique_features = list(set.intersection(*map(set, combined_features)))[:n_vars]
return unique_features
def retrieve_important_vars(results, estimator_names, multipass=True):
"""
Return a list of the important features stored in the
ImportanceObject
Args:
-------------------
results : python object
ImportanceObject from PermutationImportance
multipass : boolean
if True, returns the multipass permutation importance results
else returns the singlepass permutation importance results
Returns:
top_features : list
a list of features with order determined by
the permutation importance method
"""
perm_method = "multipass" if multipass else "singlepass"
direction = results.attrs['direction']
important_vars_dict = {}
for estimator_name in estimator_names:
top_features = list(results[f"{direction}_{perm_method}_rankings__{estimator_name}"].values)
important_vars_dict[estimator_name] = top_features
return important_vars_dict
def find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=0.8,
):
"""
Of the top features, find correlated pairs above some
linear correlation coefficient threshold
Args:
----------------------
corr_matrix : pandas.DataFrame
top_features : list of strings
rho_threshold : float
"""
top_feature_indices = {f: i for i, f in enumerate(top_features)}
sub_corr_matrix = corr_matrix[top_features].loc[top_features]
pairs = []
for feature in top_features:
# try:
most_corr_feature = (
sub_corr_matrix[feature].sort_values(ascending=False).index[1]
)
# except:
# continue
most_corr_value = sub_corr_matrix[feature].sort_values(ascending=False)[1]
if round(most_corr_value, 5) >= rho_threshold:
pairs.append((feature, most_corr_feature))
pairs = list(set([tuple(sorted(t)) for t in pairs]))
pair_indices = [
(top_feature_indices[p[0]], top_feature_indices[p[1]]) for p in pairs
]
return pairs, pair_indices
def all_permuted_score(estimator, X, y, evaluation_fn, n_permute, subsample, random_seed=123, class_index=1):
random_state = np.random.RandomState(random_seed)
inds = random_state.permutation(len(X))
if isinstance(X, pd.DataFrame):
X = X.values
inds_set = compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=n_permute, seed=90)
scores = []
for inds in inds_set:
X_sampled = X[inds, :]
X_permuted = np.array([ X_sampled[inds, i] for i in range(X.shape[1])]).T
if hasattr(estimator, 'predict_proba'):
predictions = estimator.predict_proba(X_permuted)[:]
# For binary classification problems.
if predictions.shape[1] == 2:
predictions = predictions[:,1]
#print (predictions.shape)
elif hasattr(estimator, 'predict'):
predictions = estimator.predict(X_permuted)[:]
else:
raise AttributeError(f'{estimator} does not have .predict or .predict_proba!')
scores.append(evaluation_fn(y, predictions))
return np.array(scores) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/importance_utils.py | importance_utils.py | import xarray as xr
import numpy as np
from skexplain.common.utils import compute_bootstrap_indices
import pandas as pd
def method_average_ranking(data, features, methods, estimator_names, n_features=12):
"""
Compute the median ranking across the results of different ranking methods.
Also, include the 25-75th percentile ranking uncertainty.
Parameters
------------
data : list of xarray.Dataset
The set of predictor ranking results to average over.
methods : list of string
The ranking methods to use from the data (see plot_importance for examples)
estimator_names : string or list of strings
Name of the estimator(s).
Returns
--------
rankings_dict_avg : dict
feature : median ranking pairs
rankings_sorted : np.array
Sorted median rankings (lower values indicates higher rank)
feature_sorted : np.array
The features corresponding the ranks in ``rankings_sorted``
xerr
"""
rankings_dict = {f: [] for f in features}
for d, method in zip(data, methods):
for estimator_name in estimator_names:
features = d[f"{method}_rankings__{estimator_name}"].values[:n_features]
rankings = {f: i for i, f in enumerate(features)}
for f in features:
try:
rankings_dict[f].append(rankings[f])
except:
rankings_dict[f].append(np.nan)
max_len = np.max([len(rankings_dict[k]) for k in rankings_dict.keys()])
for k in rankings_dict.keys():
l = rankings_dict[k]
if len(l) < max_len:
delta = max_len - len(l)
rankings_dict[k] = l + [np.nan] * delta
rankings_dict_avg = {
f: np.nanpercentile(rankings_dict[f], 50) for f in rankings_dict.keys()
}
features = np.array(list(rankings_dict_avg.keys()))
rankings = np.array([rankings_dict_avg[f] for f in features])
idxs = np.argsort(rankings)
rankings_sorted = rankings[idxs]
features_ranked = features[idxs]
scores = np.array([rankings_dict[f] for f in features_ranked])
data = {}
data[f"combined_rankings__{estimator_name}"] = (
[f"n_vars_avg"],
features_ranked,
)
data[f"combined_scores__{estimator_name}"] = (
[f"n_vars_avg", "n_bootstrap"],
scores,
)
data = xr.Dataset(data)
return data
def non_increasing(L):
# Check for decreasing scores.
return all(x >= y for x, y in zip(L, L[1:]))
def compute_importance(results, scoring_strategy, direction):
"""
Compute the importance scores from the permutation importance results.
The importance score varies depending on the orientation of the
loss metric and whether it is multipass or singlepass.
Parameters
--------------
results : InterpretToolkit.permutation_importance results
xr.Dataset
scoring_strategy : 'minimize' or 'maximize'
Whether the strategy for assessing importance was
based on minimizing or maximing the performance metric
after permuting features (e.g., the goal is to 'maximize'
loss metrics like MSE, but 'minimize' rank metrics like AUC)
direction : 'forward' or 'backward'
Whether the permutation method was 'forward' or 'backward'.
Returns
--------------
results : xarray.Dataset
scores for each estimator and multi/singlepass are
converted to proper importance scores.
"""
if scoring_strategy == 'argmin_of_mean':
scoring_strategy = 'minimize'
elif scoring_strategy == 'argmax_of_mean':
scoring_strategy = 'maximize'
print(direction, scoring_strategy)
estimators = results.attrs["estimators used"]
for estimator in estimators:
orig_score = results[f"original_score__{estimator}"].values
if direction == 'forward':
all_permuted_score = results[f"all_permuted_score__{estimator}"].values
for mode in ["singlepass", "multipass"]:
permute_scores = results[f"{mode}_scores__{estimator}"].values
if direction == 'forward':
# For a loss metric, forward importance is generically defined
# as the error(X_J') - error(X_j') where J is the set of
# all features while j is some subset of J or a single feature.
if scoring_strategy == 'maximize':
# E.g., AUC, NAUPDC, CSI, BSS
imp = permute_scores - all_permuted_score
elif scoring_strategy == 'minimize':
# For a rank-based metric, importance is defined opposite
# of the loss metric [ error(X_j') - error(X_J') ]
# E.g., MSE, BS, etc.
print('This happened for forward!')
imp = all_permuted_score - permute_scores
elif direction == 'backward':
# For a loss metric, backward importance is generically defined
# as the error(X_j') - error(X_j) where j is some subset or
# a single feature.
if scoring_strategy == 'minimize':
imp = orig_score - permute_scores
elif scoring_strategy == 'maximize':
# For a rank-based metric, it is defined opposite of that above.
# i.e., error(X_j) - error(X_j')
print('this happened for backward!')
imp = permute_scores - orig_score
"""
decreasing = non_increasing(np.mean(permute_scores, axis=1))
if decreasing:
if orientation == "negative":
# Singlepass MSE
imp = permute_scores - orig_score
else:
# Backward Multipass on AUC/AUPDC (permuted_score - (1-orig_score)).
# Most positively-oriented metrics top off at 1.
top = np.max(permute_scores)
imp = permute_scores - (top - orig_score)
else:
if orientation == "negative":
# Forward Multipass MSE
top = np.max(permute_scores)
imp = (top + orig_score) - permute_scores
else:
# Singlepass AUC/NAUPDC
imp = orig_score - permute_scores
"""
# Normalize the importance score so that range is [0,1]
imp = imp / (np.percentile(imp, 99) - np.percentile(imp, 1))
results[f"{mode}_scores__{estimator}"] = (
[f"n_vars_{mode}", "n_bootstrap"],
imp,
)
return results
def to_skexplain_importance(
importances, estimator_name,
feature_names,
method,
normalize=False,
bootstrap_axis=0,
):
"""
Convert feature ranking-based scores from non-permutation-importance methods
computed by scikit-explain or other methods into a skexplain-style
datset to leverage the built-in plotting code. This method handles the
ranking and sorting of the importance values by assuming higher values
equal higher importance.
Caution: This method assumes that higher values equal higher importance!
Parameters
---------------
importances : 1d or 2d array-like
The feature importance scores. The code assumes that 2D arrays are the result
of bootstrapping. Users can declare the bootstrapping axis with `bootstrap_axis`.
By default, the first axis (=0) is the bootstrap axis for skexplain methods
bootstrap_axis : int (default=0)
estimator_name : str
The estimator name. Used for plotting and creating the dataset.
feature_names : array-like of shape (n_features)
The feature names. Used for plotting and creating the dataset.
method : 'sage', 'coefs', 'shap_std', 'shap_sum', 'tree_interpreter', 'lime' or str
The name of the feature ranking method. The named method perform specific
operations. For example, local methods like 'shap_sum', 'tree_interpreter',
'lime' will sum the importance values to determine feature ranking.
normalize : True/False (default=False)
If True, normalize the feature importance values using min-max scaling.
This is useful when comparing importance across different methods.
"""
bootstrap = False
if method == "sage":
importances_std = importances.std
importances = importances.values
elif method == "coefs":
importances = np.absolute(importances)
elif method == "shap_std":
# Compute the std(SHAP)
importances = np.nanstd(importances, axis=0)
elif method == "shap_sum" or method == "tree_interpreter" or method == 'lime':
# Compute sum of abs values
importances = np.nansum(np.absolute(importances), axis=0)
else:
if np.ndim(importances) == 2:
# average over bootstrapping
bootstrap = True
importances_to_save = importances.copy()
importances = np.nanmean(importances, axis=bootstrap_axis)
# Sort from higher score to lower score
ranked_indices = np.argsort(importances)[::-1]
if bootstrap:
scores_ranked = importances_to_save[ranked_indices, :]
else:
scores_ranked = importances[ranked_indices]
if method == "sage":
std_ranked = importances_std[ranked_indices]
features_ranked = np.array(feature_names)[ranked_indices]
data = {}
data[f"{method}_rankings__{estimator_name}"] = (
[f"n_vars_{method}"],
features_ranked,
)
if not bootstrap:
scores_ranked = scores_ranked.reshape(len(scores_ranked), 1)
importances = importances.reshape(len(importances), 1)
if normalize:
# Normalize the importance score so that range is [0,1]
scores_ranked = scores_ranked / (
np.percentile(scores_ranked, 99) - np.percentile(scores_ranked, 1)
)
data[f"{method}_scores__{estimator_name}"] = (
[f"n_vars_{method}", "n_bootstrap"],
scores_ranked,
)
if method == "sage":
data[f"sage_scores_std__{estimator_name}"] = (
[f"n_vars_sage"],
std_ranked,
)
data = xr.Dataset(data)
data.attrs["estimators used"] = estimator_name
data.attrs["estimator output"] = "probability"
return data
def combine_top_features(results_dict, n_vars=None):
"""Combines the list of top features from different estimators
into a single list where duplicates are removed.
Args:
-------------
results_dict : dict
n_vars : integer
"""
if n_vars is None:
n_vars = 1000
combined_features = []
for estimator_name in results_dict.keys():
features = results_dict[estimator_name]
combined_features.append(features)
unique_features = list(set.intersection(*map(set, combined_features)))[:n_vars]
return unique_features
def retrieve_important_vars(results, estimator_names, multipass=True):
"""
Return a list of the important features stored in the
ImportanceObject
Args:
-------------------
results : python object
ImportanceObject from PermutationImportance
multipass : boolean
if True, returns the multipass permutation importance results
else returns the singlepass permutation importance results
Returns:
top_features : list
a list of features with order determined by
the permutation importance method
"""
perm_method = "multipass" if multipass else "singlepass"
direction = results.attrs['direction']
important_vars_dict = {}
for estimator_name in estimator_names:
top_features = list(results[f"{direction}_{perm_method}_rankings__{estimator_name}"].values)
important_vars_dict[estimator_name] = top_features
return important_vars_dict
def find_correlated_pairs_among_top_features(
corr_matrix,
top_features,
rho_threshold=0.8,
):
"""
Of the top features, find correlated pairs above some
linear correlation coefficient threshold
Args:
----------------------
corr_matrix : pandas.DataFrame
top_features : list of strings
rho_threshold : float
"""
top_feature_indices = {f: i for i, f in enumerate(top_features)}
sub_corr_matrix = corr_matrix[top_features].loc[top_features]
pairs = []
for feature in top_features:
# try:
most_corr_feature = (
sub_corr_matrix[feature].sort_values(ascending=False).index[1]
)
# except:
# continue
most_corr_value = sub_corr_matrix[feature].sort_values(ascending=False)[1]
if round(most_corr_value, 5) >= rho_threshold:
pairs.append((feature, most_corr_feature))
pairs = list(set([tuple(sorted(t)) for t in pairs]))
pair_indices = [
(top_feature_indices[p[0]], top_feature_indices[p[1]]) for p in pairs
]
return pairs, pair_indices
def all_permuted_score(estimator, X, y, evaluation_fn, n_permute, subsample, random_seed=123, class_index=1):
random_state = np.random.RandomState(random_seed)
inds = random_state.permutation(len(X))
if isinstance(X, pd.DataFrame):
X = X.values
inds_set = compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=n_permute, seed=90)
scores = []
for inds in inds_set:
X_sampled = X[inds, :]
X_permuted = np.array([ X_sampled[inds, i] for i in range(X.shape[1])]).T
if hasattr(estimator, 'predict_proba'):
predictions = estimator.predict_proba(X_permuted)[:]
# For binary classification problems.
if predictions.shape[1] == 2:
predictions = predictions[:,1]
#print (predictions.shape)
elif hasattr(estimator, 'predict'):
predictions = estimator.predict(X_permuted)[:]
else:
raise AttributeError(f'{estimator} does not have .predict or .predict_proba!')
scores.append(evaluation_fn(y, predictions))
return np.array(scores) | 0.885749 | 0.579311 |
import numpy as np
import xarray as xr
import pandas as pd
from collections import ChainMap
from statsmodels.distributions.empirical_distribution import ECDF
from scipy.stats import t
from sklearn.linear_model import Ridge
class MissingFeaturesError(Exception):
""" Raised when features are missing.
E.g., All features are require for
IAS or MEC
"""
def __init__(self, estimator_name, missing_features):
self.message = f"""ALE for {estimator_name} was not computed for all features.
These features were missing: {missing_features}"""
super().__init__(self.message)
def check_all_features_for_ale(ale, estimator_names, features):
""" Is there ALE values for each feature """
data_vars = ale.data_vars
for estimator_name in estimator_names:
_list = [True if f'{f}__{estimator_name}__ale' in data_vars else False for f in features]
if not all(_list):
missing_features = np.array(features)[np.where(~np.array(_list))[0]]
raise MissingFeaturesError(estimator_name, missing_features)
def flatten_nested_list(list_of_lists):
"""Turn a list of list into a single, flatten list"""
all_elements_are_lists = all([is_list(item) for item in list_of_lists])
if not all_elements_are_lists:
new_list_of_lists = []
for item in list_of_lists:
if is_list(item):
new_list_of_lists.append(item)
else:
new_list_of_lists.append([item])
list_of_lists = new_list_of_lists
return [item for elem in list_of_lists for item in elem]
def is_dataset(data):
return isinstance(data, xr.Dataset)
def is_dataframe(data):
return isinstance(data, pd.DataFrame)
def check_is_permuted(X, X_permuted):
permuted_features = []
for f in X.columns:
if not np.array_equal(X.loc[:, f], X_permuted.loc[:, f]):
permuted_features.append(f)
return permuted_features
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
def is_fitted(estimator):
"""
Checks if a scikit-learn estimator/transformer has already been fit.
Parameters
----------
estimator: scikit-learn estimator (e.g. RandomForestClassifier)
or transformer (e.g. MinMaxScaler) object
Returns
-------
Boolean that indicates if ``estimator`` has already been fit (True) or not (False).
"""
attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")]
return len(attrs) != 0
def determine_feature_dtype(X, features):
"""
Determine if any features are categorical.
"""
feature_names = list(X.columns)
non_cat_features = []
cat_features = []
for f in features:
if f not in feature_names:
raise KeyError(f"'{f}' is not a valid feature.")
if str(X.dtypes[f]) == "category":
cat_features.append(f)
else:
non_cat_features.append(f)
return non_cat_features, cat_features
def cartesian(array, out=None):
"""Generate a cartesian product of input array.
Parameters
Codes comes directly from sklearn/utils/extmath.py
----------
array : list of array-like
1-D array to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(array)) containing cartesian products
formed of input array.
X
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
array = [np.asarray(x) for x in array]
shape = (len(x) for x in array)
dtype = array[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(array), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(array):
out[:, n] = array[n][ix[:, n]]
return out
def to_dataframe(results, estimator_names, feature_names):
"""
Convert the feature contribution results to a pandas.DataFrame
with nested indexing.
"""
# results[0] = dict of avg. contributions per estimator
# results[1] = dict of avg. feature values per estimator
contrib_names = feature_names.copy()
contrib_names += ["Bias"]
nested_key = results[0][estimator_names[0]].keys()
dframes = []
for key in nested_key:
data = []
for name in estimator_names:
contribs_dict = results[0][name][key]
vals_dict = results[1][name][key]
data.append(
[contribs_dict[f] for f in contrib_names]
+ [vals_dict[f] for f in feature_names]
)
column_names = [f + "_contrib" for f in contrib_names] + [
f + "_val" for f in feature_names
]
df = pd.DataFrame(data, columns=column_names, index=estimator_names)
dframes.append(df)
result = pd.concat(dframes, keys=list(nested_key))
return result
def to_xarray(data):
"""Converts data dict to xarray.Dataset"""
ds = xr.Dataset(data)
return ds
def is_str(a):
"""Check if argument is a string"""
return isinstance(a, str)
def is_list(a):
"""Check if argument is a list"""
return isinstance(a, list)
def to_list(a):
"""Convert argument to a list"""
return [a]
def is_tuple(a):
"""Check if argument is a tuple"""
return isinstance(a, tuple)
def is_valid_feature(features, official_feature_list):
"""Check if a feature is valid"""
for f in features:
if isinstance(f, tuple):
for sub_f in f:
if sub_f not in official_feature_list:
raise Exception(f"Feature {sub_f} is not a valid feature!")
else:
if f not in official_feature_list:
raise Exception(f"Feature {f} is not a valid feature!")
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
Function from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
Functions from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_all_dict(alist):
"""Check if every element of a list are dicts"""
return all([isinstance(l, dict) for l in alist])
def compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=1, seed=90):
"""
Routine to generate the indices for bootstrapped X.
Args:
----------------
X : pandas.DataFrame, numpy.array
subsample : float or integer
n_bootstrap : integer
Return:
----------------
bootstrap_indices : list
list of indices of the size of subsample or subsample*len(X)
"""
base_random_state = np.random.RandomState(seed=seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
n_samples = len(X)
size = int(n_samples * subsample) if subsample <= 1.0 else subsample
bootstrap_indices = [
random_state.choice(range(n_samples), size=size).tolist()
for random_state in random_states
]
return bootstrap_indices
def merge_dict(dicts):
"""Merge a list of dicts into a single dict"""
return dict(ChainMap(*dicts))
def merge_nested_dict(dicts):
"""
Merge a list of nested dicts into a single dict
"""
merged_dict = {}
for d in dicts:
for key in d.keys():
for subkey in d[key].keys():
if key not in list(merged_dict.keys()):
merged_dict[key] = {subkey: {}}
merged_dict[key][subkey] = d[key][subkey]
return merged_dict
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:, None]
median = np.median(points, axis=0)
diff = np.sum((points - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def cmds(D, k=2):
"""Classical multidimensional scaling
Theory and code references:
https://en.wikipedia.org/wiki/Multidimensional_scaling#Classical_multidimensional_scaling
http://www.nervouscomputer.com/hfs/cmdscale-in-python/
Arguments:
D -- A squared matrix-like object (array, DataFrame, ....), usually a distance matrix
"""
n = D.shape[0]
if D.shape[0] != D.shape[1]:
raise Exception("The matrix D should be squared")
if k > (n - 1):
raise Exception("k should be an integer <= D.shape[0] - 1")
# (1) Set up the squared proximity matrix
D_double = np.square(D)
# (2) Apply double centering: using the centering matrix
# centering matrix
center_mat = np.eye(n) - np.ones((n, n)) / n
# apply the centering
B = -(1 / 2) * center_mat.dot(D_double).dot(center_mat)
# (3) Determine the m largest eigenvalues
# (where m is the number of dimensions desired for the output)
# extract the eigenvalues
eigenvals, eigenvecs = np.linalg.eigh(B)
# sort descending
idx = np.argsort(eigenvals)[::-1]
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
# (4) Now, X=eigenvecs.dot(eigen_sqrt_diag),
# where eigen_sqrt_diag = diag(sqrt(eigenvals))
eigen_sqrt_diag = np.diag(np.sqrt(eigenvals[0:k]))
ret = eigenvecs[:, 0:k].dot(eigen_sqrt_diag)
return ret
def order_groups(X, feature):
"""Assign an order to the values of a categorical feature.
The function returns an order to the unique values in X[feature] according to
their similarity based on the other features.
The distance between two categories is the sum over the distances of each feature.
Arguments:
X -- A pandas DataFrame containing all the features to considering in the ordering
(including the categorical feature to be ordered).
feature -- String, the name of the column holding the categorical feature to be ordered.
"""
features = X.columns
# groups = X[feature].cat.categories.values
groups = X[feature].unique()
D_cumu = pd.DataFrame(0, index=groups, columns=groups)
K = len(groups)
for j in set(features) - set([feature]):
D = pd.DataFrame(index=groups, columns=groups)
# discrete/factor feature j
# e.g. j = 'color'
if (X[j].dtypes.name == "category") | (
(len(X[j].unique()) <= 10) & ("float" not in X[j].dtypes.name)
):
# counts and proportions of each value in j in each group in 'feature'
cross_counts = pd.crosstab(X[feature], X[j])
cross_props = cross_counts.div(np.sum(cross_counts, axis=1), axis=0)
for i in range(K):
group = groups[i]
D_values = abs(cross_props - cross_props.loc[group]).sum(axis=1) / 2
D.loc[group, :] = D_values
D.loc[:, group] = D_values
else:
# continuous feature j
# e.g. j = 'length'
# extract the 1/100 quantiles of the feature j
seq = np.arange(0, 1, 1 / 100)
q_X_j = X[j].quantile(seq).to_list()
# get the ecdf (empiricial cumulative distribution function)
# compute the function from the data points in each group
X_ecdf = X.groupby(feature)[j].agg(ECDF)
# apply each of the functions on the quantiles
# i.e. for each quantile value get the probability that j will take
# a value less than or equal to this value.
q_ecdf = X_ecdf.apply(lambda x: x(q_X_j))
for i in range(K):
group = groups[i]
D_values = q_ecdf.apply(lambda x: max(abs(x - q_ecdf[group])))
D.loc[group, :] = D_values
D.loc[:, group] = D_values
D_cumu = D_cumu + D
# To avoid numpy.core._exceptions._UFuncInputCastingError, convert to dtype float32
D_cumu = D_cumu.astype(float)
# reduce the dimension of the cumulative distance matrix to 1
D1D = cmds(D_cumu, 1).flatten()
# order groups based on the values
order_idx = D1D.argsort()
groups_ordered = D_cumu.index[D1D.argsort()]
return pd.Series(range(K), index=groups_ordered)
def quantile_ied(x_vec, q):
"""
Inverse of empirical distribution function (quantile R type 1).
More details in
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles.html
https://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
https://en.wikipedia.org/wiki/Quantile
Arguments:
x_vec -- A pandas series containing the values to compute the quantile for
q -- An array of probabilities (values between 0 and 1)
"""
x_vec = x_vec.sort_values()
n = len(x_vec) - 1
m = 0
j = (n * q + m).astype(int) # location of the value
g = n * q + m - j
gamma = (g != 0).astype(int)
quant_res = (1 - gamma) * x_vec.shift(1, fill_value=0).iloc[j] + gamma * x_vec.iloc[
j
]
quant_res.index = q
# add min at quantile zero and max at quantile one (if needed)
if 0 in q:
quant_res.loc[0] = x_vec.min()
if 1 in q:
quant_res.loc[1] = x_vec.max()
return quant_res
def CI_estimate(x_vec, C=0.95):
"""Estimate the size of the confidence interval of a data sample.
The confidence interval of the given data sample (x_vec) is
[mean(x_vec) - returned value, mean(x_vec) + returned value].
"""
alpha = 1 - C
n = len(x_vec)
stand_err = x_vec.std() / np.sqrt(n)
critical_val = 1 - (alpha / 2)
z_star = stand_err * t.ppf(critical_val, n - 1)
return z_star
dict_disc_to_bin = {
'quartile': [25, 50, 75],
'quintile': [20, 40, 60, 80],
'decile': [10, 20, 30, 40, 50, 60, 70, 80, 90]
}
def ridge_solve(tup):
data_synthetic_onehot, model_pred, weights = tup
solver = Ridge(alpha=1, fit_intercept=True)
solver.fit(data_synthetic_onehot,
model_pred,
sample_weight=weights.ravel())
# Get explanations
importance = solver.coef_[
data_synthetic_onehot[0].toarray().ravel() == 1].ravel()
bias = solver.intercept_
return importance, bias
def kernel_fn(distances, kernel_width):
return np.sqrt(np.exp(-(distances ** 2) / kernel_width ** 2))
def discretize(X, percentiles=[25, 50, 75], all_bins=None):
if all_bins is None:
all_bins = np.percentile(X, percentiles, axis=0).T
return (np.array([np.digitize(a, bins)
for (a, bins) in zip(X.T, all_bins)]).T, all_bins) | scikit-explain | /scikit-explain-0.1.3.tar.gz/scikit-explain-0.1.3/skexplain/common/utils.py | utils.py | import numpy as np
import xarray as xr
import pandas as pd
from collections import ChainMap
from statsmodels.distributions.empirical_distribution import ECDF
from scipy.stats import t
from sklearn.linear_model import Ridge
class MissingFeaturesError(Exception):
""" Raised when features are missing.
E.g., All features are require for
IAS or MEC
"""
def __init__(self, estimator_name, missing_features):
self.message = f"""ALE for {estimator_name} was not computed for all features.
These features were missing: {missing_features}"""
super().__init__(self.message)
def check_all_features_for_ale(ale, estimator_names, features):
""" Is there ALE values for each feature """
data_vars = ale.data_vars
for estimator_name in estimator_names:
_list = [True if f'{f}__{estimator_name}__ale' in data_vars else False for f in features]
if not all(_list):
missing_features = np.array(features)[np.where(~np.array(_list))[0]]
raise MissingFeaturesError(estimator_name, missing_features)
def flatten_nested_list(list_of_lists):
"""Turn a list of list into a single, flatten list"""
all_elements_are_lists = all([is_list(item) for item in list_of_lists])
if not all_elements_are_lists:
new_list_of_lists = []
for item in list_of_lists:
if is_list(item):
new_list_of_lists.append(item)
else:
new_list_of_lists.append([item])
list_of_lists = new_list_of_lists
return [item for elem in list_of_lists for item in elem]
def is_dataset(data):
return isinstance(data, xr.Dataset)
def is_dataframe(data):
return isinstance(data, pd.DataFrame)
def check_is_permuted(X, X_permuted):
permuted_features = []
for f in X.columns:
if not np.array_equal(X.loc[:, f], X_permuted.loc[:, f]):
permuted_features.append(f)
return permuted_features
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
def is_fitted(estimator):
"""
Checks if a scikit-learn estimator/transformer has already been fit.
Parameters
----------
estimator: scikit-learn estimator (e.g. RandomForestClassifier)
or transformer (e.g. MinMaxScaler) object
Returns
-------
Boolean that indicates if ``estimator`` has already been fit (True) or not (False).
"""
attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")]
return len(attrs) != 0
def determine_feature_dtype(X, features):
"""
Determine if any features are categorical.
"""
feature_names = list(X.columns)
non_cat_features = []
cat_features = []
for f in features:
if f not in feature_names:
raise KeyError(f"'{f}' is not a valid feature.")
if str(X.dtypes[f]) == "category":
cat_features.append(f)
else:
non_cat_features.append(f)
return non_cat_features, cat_features
def cartesian(array, out=None):
"""Generate a cartesian product of input array.
Parameters
Codes comes directly from sklearn/utils/extmath.py
----------
array : list of array-like
1-D array to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(array)) containing cartesian products
formed of input array.
X
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
array = [np.asarray(x) for x in array]
shape = (len(x) for x in array)
dtype = array[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(array), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(array):
out[:, n] = array[n][ix[:, n]]
return out
def to_dataframe(results, estimator_names, feature_names):
"""
Convert the feature contribution results to a pandas.DataFrame
with nested indexing.
"""
# results[0] = dict of avg. contributions per estimator
# results[1] = dict of avg. feature values per estimator
contrib_names = feature_names.copy()
contrib_names += ["Bias"]
nested_key = results[0][estimator_names[0]].keys()
dframes = []
for key in nested_key:
data = []
for name in estimator_names:
contribs_dict = results[0][name][key]
vals_dict = results[1][name][key]
data.append(
[contribs_dict[f] for f in contrib_names]
+ [vals_dict[f] for f in feature_names]
)
column_names = [f + "_contrib" for f in contrib_names] + [
f + "_val" for f in feature_names
]
df = pd.DataFrame(data, columns=column_names, index=estimator_names)
dframes.append(df)
result = pd.concat(dframes, keys=list(nested_key))
return result
def to_xarray(data):
"""Converts data dict to xarray.Dataset"""
ds = xr.Dataset(data)
return ds
def is_str(a):
"""Check if argument is a string"""
return isinstance(a, str)
def is_list(a):
"""Check if argument is a list"""
return isinstance(a, list)
def to_list(a):
"""Convert argument to a list"""
return [a]
def is_tuple(a):
"""Check if argument is a tuple"""
return isinstance(a, tuple)
def is_valid_feature(features, official_feature_list):
"""Check if a feature is valid"""
for f in features:
if isinstance(f, tuple):
for sub_f in f:
if sub_f not in official_feature_list:
raise Exception(f"Feature {sub_f} is not a valid feature!")
else:
if f not in official_feature_list:
raise Exception(f"Feature {f} is not a valid feature!")
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
Function from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
Functions from base.py in sklearn
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
def is_all_dict(alist):
"""Check if every element of a list are dicts"""
return all([isinstance(l, dict) for l in alist])
def compute_bootstrap_indices(X, subsample=1.0, n_bootstrap=1, seed=90):
"""
Routine to generate the indices for bootstrapped X.
Args:
----------------
X : pandas.DataFrame, numpy.array
subsample : float or integer
n_bootstrap : integer
Return:
----------------
bootstrap_indices : list
list of indices of the size of subsample or subsample*len(X)
"""
base_random_state = np.random.RandomState(seed=seed)
random_num_set = base_random_state.choice(10000, size=n_bootstrap, replace=False)
random_states = [np.random.RandomState(s) for s in random_num_set]
n_samples = len(X)
size = int(n_samples * subsample) if subsample <= 1.0 else subsample
bootstrap_indices = [
random_state.choice(range(n_samples), size=size).tolist()
for random_state in random_states
]
return bootstrap_indices
def merge_dict(dicts):
"""Merge a list of dicts into a single dict"""
return dict(ChainMap(*dicts))
def merge_nested_dict(dicts):
"""
Merge a list of nested dicts into a single dict
"""
merged_dict = {}
for d in dicts:
for key in d.keys():
for subkey in d[key].keys():
if key not in list(merged_dict.keys()):
merged_dict[key] = {subkey: {}}
merged_dict[key][subkey] = d[key][subkey]
return merged_dict
def is_outlier(points, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:, None]
median = np.median(points, axis=0)
diff = np.sum((points - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def cmds(D, k=2):
"""Classical multidimensional scaling
Theory and code references:
https://en.wikipedia.org/wiki/Multidimensional_scaling#Classical_multidimensional_scaling
http://www.nervouscomputer.com/hfs/cmdscale-in-python/
Arguments:
D -- A squared matrix-like object (array, DataFrame, ....), usually a distance matrix
"""
n = D.shape[0]
if D.shape[0] != D.shape[1]:
raise Exception("The matrix D should be squared")
if k > (n - 1):
raise Exception("k should be an integer <= D.shape[0] - 1")
# (1) Set up the squared proximity matrix
D_double = np.square(D)
# (2) Apply double centering: using the centering matrix
# centering matrix
center_mat = np.eye(n) - np.ones((n, n)) / n
# apply the centering
B = -(1 / 2) * center_mat.dot(D_double).dot(center_mat)
# (3) Determine the m largest eigenvalues
# (where m is the number of dimensions desired for the output)
# extract the eigenvalues
eigenvals, eigenvecs = np.linalg.eigh(B)
# sort descending
idx = np.argsort(eigenvals)[::-1]
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
# (4) Now, X=eigenvecs.dot(eigen_sqrt_diag),
# where eigen_sqrt_diag = diag(sqrt(eigenvals))
eigen_sqrt_diag = np.diag(np.sqrt(eigenvals[0:k]))
ret = eigenvecs[:, 0:k].dot(eigen_sqrt_diag)
return ret
def order_groups(X, feature):
"""Assign an order to the values of a categorical feature.
The function returns an order to the unique values in X[feature] according to
their similarity based on the other features.
The distance between two categories is the sum over the distances of each feature.
Arguments:
X -- A pandas DataFrame containing all the features to considering in the ordering
(including the categorical feature to be ordered).
feature -- String, the name of the column holding the categorical feature to be ordered.
"""
features = X.columns
# groups = X[feature].cat.categories.values
groups = X[feature].unique()
D_cumu = pd.DataFrame(0, index=groups, columns=groups)
K = len(groups)
for j in set(features) - set([feature]):
D = pd.DataFrame(index=groups, columns=groups)
# discrete/factor feature j
# e.g. j = 'color'
if (X[j].dtypes.name == "category") | (
(len(X[j].unique()) <= 10) & ("float" not in X[j].dtypes.name)
):
# counts and proportions of each value in j in each group in 'feature'
cross_counts = pd.crosstab(X[feature], X[j])
cross_props = cross_counts.div(np.sum(cross_counts, axis=1), axis=0)
for i in range(K):
group = groups[i]
D_values = abs(cross_props - cross_props.loc[group]).sum(axis=1) / 2
D.loc[group, :] = D_values
D.loc[:, group] = D_values
else:
# continuous feature j
# e.g. j = 'length'
# extract the 1/100 quantiles of the feature j
seq = np.arange(0, 1, 1 / 100)
q_X_j = X[j].quantile(seq).to_list()
# get the ecdf (empiricial cumulative distribution function)
# compute the function from the data points in each group
X_ecdf = X.groupby(feature)[j].agg(ECDF)
# apply each of the functions on the quantiles
# i.e. for each quantile value get the probability that j will take
# a value less than or equal to this value.
q_ecdf = X_ecdf.apply(lambda x: x(q_X_j))
for i in range(K):
group = groups[i]
D_values = q_ecdf.apply(lambda x: max(abs(x - q_ecdf[group])))
D.loc[group, :] = D_values
D.loc[:, group] = D_values
D_cumu = D_cumu + D
# To avoid numpy.core._exceptions._UFuncInputCastingError, convert to dtype float32
D_cumu = D_cumu.astype(float)
# reduce the dimension of the cumulative distance matrix to 1
D1D = cmds(D_cumu, 1).flatten()
# order groups based on the values
order_idx = D1D.argsort()
groups_ordered = D_cumu.index[D1D.argsort()]
return pd.Series(range(K), index=groups_ordered)
def quantile_ied(x_vec, q):
"""
Inverse of empirical distribution function (quantile R type 1).
More details in
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mstats.mquantiles.html
https://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
https://en.wikipedia.org/wiki/Quantile
Arguments:
x_vec -- A pandas series containing the values to compute the quantile for
q -- An array of probabilities (values between 0 and 1)
"""
x_vec = x_vec.sort_values()
n = len(x_vec) - 1
m = 0
j = (n * q + m).astype(int) # location of the value
g = n * q + m - j
gamma = (g != 0).astype(int)
quant_res = (1 - gamma) * x_vec.shift(1, fill_value=0).iloc[j] + gamma * x_vec.iloc[
j
]
quant_res.index = q
# add min at quantile zero and max at quantile one (if needed)
if 0 in q:
quant_res.loc[0] = x_vec.min()
if 1 in q:
quant_res.loc[1] = x_vec.max()
return quant_res
def CI_estimate(x_vec, C=0.95):
"""Estimate the size of the confidence interval of a data sample.
The confidence interval of the given data sample (x_vec) is
[mean(x_vec) - returned value, mean(x_vec) + returned value].
"""
alpha = 1 - C
n = len(x_vec)
stand_err = x_vec.std() / np.sqrt(n)
critical_val = 1 - (alpha / 2)
z_star = stand_err * t.ppf(critical_val, n - 1)
return z_star
dict_disc_to_bin = {
'quartile': [25, 50, 75],
'quintile': [20, 40, 60, 80],
'decile': [10, 20, 30, 40, 50, 60, 70, 80, 90]
}
def ridge_solve(tup):
data_synthetic_onehot, model_pred, weights = tup
solver = Ridge(alpha=1, fit_intercept=True)
solver.fit(data_synthetic_onehot,
model_pred,
sample_weight=weights.ravel())
# Get explanations
importance = solver.coef_[
data_synthetic_onehot[0].toarray().ravel() == 1].ravel()
bias = solver.intercept_
return importance, bias
def kernel_fn(distances, kernel_width):
return np.sqrt(np.exp(-(distances ** 2) / kernel_width ** 2))
def discretize(X, percentiles=[25, 50, 75], all_bins=None):
if all_bins is None:
all_bins = np.percentile(X, percentiles, axis=0).T
return (np.array([np.digitize(a, bins)
for (a, bins) in zip(X.T, all_bins)]).T, all_bins) | 0.792304 | 0.412767 |
scikit-ext
==========
|PyPI| |Status| |License| |Travis|
.. |PyPI| image:: https://img.shields.io/pypi/v/scikit-ext.svg
:target: https://pypi.org/project/scikit-ext/
.. |Status| image:: https://img.shields.io/pypi/status/scikit-ext.svg
:target: https://pypi.org/project/scikit-ext/
.. |License| image:: https://img.shields.io/pypi/l/scikit-ext.svg
:target: https://github.com/denver1117/scikit-ext/blob/master/LICENSE
.. |Travis| image:: https://travis-ci.org/denver1117/scikit-ext.svg?branch=master
:target: https://travis-ci.org/denver1117/scikit-ext
About
~~~~~
The ``scikit_ext`` package contains various scikit-learn extensions,
built entirely on top of ``sklearn`` base classes. The package is
separated into two modules:
`estimators <http://scikit-ext.s3-website-us-east-1.amazonaws.com/scikit_ext.html#module-scikit_ext.estimators>`__
and
`scorers <http://scikit-ext.s3-website-us-east-1.amazonaws.com/scikit_ext.html#module-scikit_ext.scorers>`__.
Full documentation can be found
`here <http://scikit-ext.s3-website-us-east-1.amazonaws.com/index.html>`__.
Installation
~~~~~~~~~~~~
`Package Index on PyPI <https://pypi.python.org/pypi/scikit-ext>`__ To
install:
::
pip install scikit-ext
Estimators
~~~~~~~~~~
- ``MultiGridSearchCV``: Extension to native sklearn ``GridSearchCV``
for multiple estimators and param\_grids. Accepts a list of
estimators and param\_grids, iterating through each fitting a
``GridSearchCV`` model for each estimator/param\_grid. Chooses the
best fitted ``GridSearchCV`` model. Inherits sklearn's
``BaseSearchCV`` class, so attributes and methods are all similar to
``GridSearchCV``.
- ``PrunedPipeline``: Extension to native sklearn ``Pipeline`` intended
for text learning pipelines with a vectorization step and a feature
selection step. Instead of remembering all vectorizer vocabulary
elements and selecting appropriate features at prediction time, the
extension prunes the vocabulary after fitting to only include
elements who will ultimately survive the feature selection filter
applied later in the pipeline. This reduces memory and improves
prediction latency. Predictions will be identical to those made with
a trained ``Pipeline`` model. Inherits sklearn's ``Pipeline`` class,
so attributes and methods are all similar to ``Pipeline``.
- ``ZoomGridSearchCV``: Extension to native sklearn ``GridSearchCV``.
Fits multiple ``GridSearchCV`` models, updating the ``param_grid``
after each iteration. The update looks at successful parameter values
for each grid key. A new list of values is created which expands the
resolution of the search values centered around the best performing
value of the previous fit. This allows the standard grid search
process to start with a small number of distant values for each
parameter, and zoom in as the better performing corner of the
hyperparameter search space becomes clear.
- ``IterRandomEstimator``: Meta-Estimator intended primarily for
unsupervised estimators whose fitted model can be heavily dependent
on an arbitrary random initialization state. It is
best used for problems where a ``fit_predict`` method is intended, so
the only data used for prediction will be the same data on which the
model was fitted.
- ``OptimizedEnsemble``: An optimized ensemble class. Will find the
optimal ``n_estimators`` parameter for the given ensemble estimator,
according to the specified input parameters.
- ``OneVsRestAdjClassifier``: One-Vs-Rest multiclass strategy. The
adjusted version is a custom extension which overwrites the inherited
``predict_proba`` method with a more flexible method allowing custom
normalization for the predicted probabilities. Any norm argument that
can be passed directly to ``sklearn.preprocessing.normalize`` is
allowed. Additionally, norm=None will skip the normalization step
alltogeter. To mimick the inherited ``OneVsRestClassfier`` behavior,
set norm='l2'. All other methods are inherited from
``OneVsRestClassifier``.
Scorers
~~~~~~~
- ``TimeScorer``: Score using estimated prediction latency of
estimator.
- ``MemoryScorer``: Score using estimated memory of pickled estimator
object.
- ``CombinedScorer``: Score combining multiple scorers by averaging
their scores.
- ``cluster_distribution_score``: Scoring function which scores the
resulting cluster distribution accross classes. A more even
distribution indicates a higher score.
Authors
~~~~~~~
Evan Harris
License
~~~~~~~
This project is licensed under the MIT License - see the LICENSE file
for details
| scikit-ext | /scikit-ext-0.1.16.tar.gz/scikit-ext-0.1.16/README.rst | README.rst | scikit-ext
==========
|PyPI| |Status| |License| |Travis|
.. |PyPI| image:: https://img.shields.io/pypi/v/scikit-ext.svg
:target: https://pypi.org/project/scikit-ext/
.. |Status| image:: https://img.shields.io/pypi/status/scikit-ext.svg
:target: https://pypi.org/project/scikit-ext/
.. |License| image:: https://img.shields.io/pypi/l/scikit-ext.svg
:target: https://github.com/denver1117/scikit-ext/blob/master/LICENSE
.. |Travis| image:: https://travis-ci.org/denver1117/scikit-ext.svg?branch=master
:target: https://travis-ci.org/denver1117/scikit-ext
About
~~~~~
The ``scikit_ext`` package contains various scikit-learn extensions,
built entirely on top of ``sklearn`` base classes. The package is
separated into two modules:
`estimators <http://scikit-ext.s3-website-us-east-1.amazonaws.com/scikit_ext.html#module-scikit_ext.estimators>`__
and
`scorers <http://scikit-ext.s3-website-us-east-1.amazonaws.com/scikit_ext.html#module-scikit_ext.scorers>`__.
Full documentation can be found
`here <http://scikit-ext.s3-website-us-east-1.amazonaws.com/index.html>`__.
Installation
~~~~~~~~~~~~
`Package Index on PyPI <https://pypi.python.org/pypi/scikit-ext>`__ To
install:
::
pip install scikit-ext
Estimators
~~~~~~~~~~
- ``MultiGridSearchCV``: Extension to native sklearn ``GridSearchCV``
for multiple estimators and param\_grids. Accepts a list of
estimators and param\_grids, iterating through each fitting a
``GridSearchCV`` model for each estimator/param\_grid. Chooses the
best fitted ``GridSearchCV`` model. Inherits sklearn's
``BaseSearchCV`` class, so attributes and methods are all similar to
``GridSearchCV``.
- ``PrunedPipeline``: Extension to native sklearn ``Pipeline`` intended
for text learning pipelines with a vectorization step and a feature
selection step. Instead of remembering all vectorizer vocabulary
elements and selecting appropriate features at prediction time, the
extension prunes the vocabulary after fitting to only include
elements who will ultimately survive the feature selection filter
applied later in the pipeline. This reduces memory and improves
prediction latency. Predictions will be identical to those made with
a trained ``Pipeline`` model. Inherits sklearn's ``Pipeline`` class,
so attributes and methods are all similar to ``Pipeline``.
- ``ZoomGridSearchCV``: Extension to native sklearn ``GridSearchCV``.
Fits multiple ``GridSearchCV`` models, updating the ``param_grid``
after each iteration. The update looks at successful parameter values
for each grid key. A new list of values is created which expands the
resolution of the search values centered around the best performing
value of the previous fit. This allows the standard grid search
process to start with a small number of distant values for each
parameter, and zoom in as the better performing corner of the
hyperparameter search space becomes clear.
- ``IterRandomEstimator``: Meta-Estimator intended primarily for
unsupervised estimators whose fitted model can be heavily dependent
on an arbitrary random initialization state. It is
best used for problems where a ``fit_predict`` method is intended, so
the only data used for prediction will be the same data on which the
model was fitted.
- ``OptimizedEnsemble``: An optimized ensemble class. Will find the
optimal ``n_estimators`` parameter for the given ensemble estimator,
according to the specified input parameters.
- ``OneVsRestAdjClassifier``: One-Vs-Rest multiclass strategy. The
adjusted version is a custom extension which overwrites the inherited
``predict_proba`` method with a more flexible method allowing custom
normalization for the predicted probabilities. Any norm argument that
can be passed directly to ``sklearn.preprocessing.normalize`` is
allowed. Additionally, norm=None will skip the normalization step
alltogeter. To mimick the inherited ``OneVsRestClassfier`` behavior,
set norm='l2'. All other methods are inherited from
``OneVsRestClassifier``.
Scorers
~~~~~~~
- ``TimeScorer``: Score using estimated prediction latency of
estimator.
- ``MemoryScorer``: Score using estimated memory of pickled estimator
object.
- ``CombinedScorer``: Score combining multiple scorers by averaging
their scores.
- ``cluster_distribution_score``: Scoring function which scores the
resulting cluster distribution accross classes. A more even
distribution indicates a higher score.
Authors
~~~~~~~
Evan Harris
License
~~~~~~~
This project is licensed under the MIT License - see the LICENSE file
for details
| 0.935479 | 0.636113 |
import numpy as np
import time
import _pickle as cPickle
from sklearn.metrics.scorer import _BaseScorer
class TimeScorer(_BaseScorer):
def _score(self, method_caller, estimator, X, y_true=None, n_iter=1, unit=True, scoring=None, tradeoff=None, sample_weight=None):
"""
Evaluate prediction latency.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _TimeScorer.
n_iter : int, default 1
Number of timing runs.
unit : bool, default True
Use per-unit latency or total latency.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator prediction latency (ms).
"""
# overwrite kwargs from _kwargs
if "n_iter" in self._kwargs.keys():
n_iter = self._kwargs["n_iter"]
if "unit" in self._kwargs.keys():
unit = self._kwargs["unit"]
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
# run timing iterations
count = 0
time_sum = 0
while count < n_iter:
count += 1
if unit:
time_sum += np.sum([
self._elapsed(estimator, [x])
for x in X])
else:
time_sum += self._elapsed(estimator, X)
unit_time = 1000 * float((time_sum / float(n_iter)) / float(len(X)))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * unit_time)
else:
return 1. / unit_time
def _elapsed(self, estimator, X):
"""
Return elapsed time for predict method of estimator
on X.
"""
start_time = time.time()
y_pred = estimator.predict(X)
end_time = time.time()
return end_time - start_time
class MemoryScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, tradeoff=None, sample_weight=None):
"""
Score using estimated memory of pickled estimator object.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator memory (MB).
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
obj_size = (0.000001 * float(len(cPickle.dumps(estimator))))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * obj_size)
else:
return 1. / obj_size
class CombinedScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, sample_weight=None):
"""
Combine multiple scorers using the average of their scores.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: list of scorer objects, default None
List of scorers to average.
Returns
-------
score : float
Custom score combining input scoring methods
using the mean score..
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if (not isinstance(scoring, list)) and (not isinstance(scoring, tuple)):
scoring = [scoring]
return np.mean([x(estimator, X, y_true) for x in scoring])
def cluster_distribution_score(X, labels):
"""
Scoring function which scores the resulting cluster distribution accross classes.
A more even distribution indicates a higher score.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Cluster Distribution score.
"""
n_clusters = float(len(np.unique(labels)))
max_count = float(np.max(np.bincount(labels)))
return 1.0 / ((max_count / len(labels)) / (1.0 / n_clusters)) | scikit-ext | /scikit-ext-0.1.16.tar.gz/scikit-ext-0.1.16/scikit_ext/scorers.py | scorers.py | import numpy as np
import time
import _pickle as cPickle
from sklearn.metrics.scorer import _BaseScorer
class TimeScorer(_BaseScorer):
def _score(self, method_caller, estimator, X, y_true=None, n_iter=1, unit=True, scoring=None, tradeoff=None, sample_weight=None):
"""
Evaluate prediction latency.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _TimeScorer.
n_iter : int, default 1
Number of timing runs.
unit : bool, default True
Use per-unit latency or total latency.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator prediction latency (ms).
"""
# overwrite kwargs from _kwargs
if "n_iter" in self._kwargs.keys():
n_iter = self._kwargs["n_iter"]
if "unit" in self._kwargs.keys():
unit = self._kwargs["unit"]
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
# run timing iterations
count = 0
time_sum = 0
while count < n_iter:
count += 1
if unit:
time_sum += np.sum([
self._elapsed(estimator, [x])
for x in X])
else:
time_sum += self._elapsed(estimator, X)
unit_time = 1000 * float((time_sum / float(n_iter)) / float(len(X)))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * unit_time)
else:
return 1. / unit_time
def _elapsed(self, estimator, X):
"""
Return elapsed time for predict method of estimator
on X.
"""
start_time = time.time()
y_pred = estimator.predict(X)
end_time = time.time()
return end_time - start_time
class MemoryScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, tradeoff=None, sample_weight=None):
"""
Score using estimated memory of pickled estimator object.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: scorer object, default None
Scorer used for trade-off.
tradeoff: float, default None
Multiplier for tradeoff.
Returns
-------
score : float
Custom score combining scoring method (optional)
and estimator memory (MB).
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if "tradeoff" in self._kwargs.keys():
tradeoff = self._kwargs["tradeoff"]
obj_size = (0.000001 * float(len(cPickle.dumps(estimator))))
if scoring and tradeoff:
scoring_score = scoring(estimator, X, y_true)
return scoring_score - (tradeoff * obj_size)
else:
return 1. / obj_size
class CombinedScorer(_BaseScorer):
def _score(self, method_caller, estimator, X=None, y_true=None, scoring=None, sample_weight=None):
"""
Combine multiple scorers using the average of their scores.
Parameters
----------
method_caller : callable
Returns predictions given an estimator, method name, and other
arguments, potentially caching results.
estimator : object
Trained estimator to use for scoring.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
Not necessary for _MemoryScorer.
y_true : array-like, default None
Gold standard target values for X. Not necessary
for _MemoryScorer.
scoring: list of scorer objects, default None
List of scorers to average.
Returns
-------
score : float
Custom score combining input scoring methods
using the mean score..
"""
# overwrite kwargs from _kwargs
if "scoring" in self._kwargs.keys():
scoring = self._kwargs["scoring"]
if (not isinstance(scoring, list)) and (not isinstance(scoring, tuple)):
scoring = [scoring]
return np.mean([x(estimator, X, y_true) for x in scoring])
def cluster_distribution_score(X, labels):
"""
Scoring function which scores the resulting cluster distribution accross classes.
A more even distribution indicates a higher score.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Cluster Distribution score.
"""
n_clusters = float(len(np.unique(labels)))
max_count = float(np.max(np.bincount(labels)))
return 1.0 / ((max_count / len(labels)) / (1.0 / n_clusters)) | 0.910438 | 0.401219 |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize
from sklearn.base import (
BaseEstimator, ClassifierMixin,
is_classifier, clone)
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.exceptions import NotFittedError
from sklearn.metrics import calinski_harabasz_score
from sklearn.pipeline import Pipeline
class ZoomGridSearchCV(GridSearchCV):
"""
Fits multiple ``GridSearchCV`` models, updating
the ``param_grid`` after each iteration. The update
looks at successful parameter values for each
grid key. A new list of values is created which
expands the resolution of the search values centered
around the best performing value of the previous fit.
This allows the standard grid search process to start
with a small number of distant values for each parameter,
and zoom in as the better performing corner of the
hyperparameter search space becomes clear.
The process only updates paramter keys whose values
are all of type ``int`` or are all of type ``float``. Any
other data type valued parameters or mixed parameters will
simply be copied and reused for each iteration. The only
stopping criteria for iterations is the ``n_iter`` parameter.
Inherits ``GridSearchCV`` so all methods
and attributes are identical except for ``fit``
which is overriden by a method looping through
the ``fit`` method of ``GridSearchCV``. Ultimately,
the class exactly resembles a fitted ``GridSearchCV``
after ``fit`` is run. Running ``fit`` with
``n_iter = 0`` is identical to funning ``fit``
with ``GridSearchCV``.
"""
def __init__(self, estimator, param_grid,
n_iter=1, **kwargs):
GridSearchCV.__init__(
self, estimator, param_grid, **kwargs)
self._fit = GridSearchCV.fit
self.n_iter=n_iter
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit with all sets of parameters. For ``n_iter``
iterations, zoom in on successful parameters, creating
a new parameter grid and refitting.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
n = -1
while n < self.n_iter:
if n > -1:
self._update_grid()
if self.verbose > 0:
print("Grid Updated on Iteration {0}:".format(n))
print(self.param_grid)
else:
if self.verbose > 0:
print("Initial Grid:")
print(self.param_grid)
GridSearchCV.fit(self, X, y=y, groups=groups, **fit_params)
n += 1
def _update_grid(self):
""" Update parameter grid based on previous fit results """
results = pd.DataFrame(self.cv_results_)
# get parameters to update
update_params = {}
for key, value in self.param_grid.items():
if all(isinstance(x, int) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=int)
elif all(isinstance(x, float) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=float)
else:
updated_value = value
if len(updated_value) > 0:
update_params[key] = updated_value
# update parameter grid attribute
self.param_grid = update_params
def _update_elements(self, results, key, value, dtype=int):
""" Update elements of a single param_grid key, value pair """
tmp = (results.loc[~pd.isnull(results["param_{0}".format(key)]),
["param_{0}".format(key), "rank_test_score"]]
.sort_values("rank_test_score"))
best_val = tmp["param_{0}".format(key)].values[0]
value_range = (np.max(value) - np.min(value)) / 5.0
val = (
list(
np.linspace(best_val, best_val + value_range,
int(round(len(value) / 2.0)))) +
list(
np.linspace(best_val - value_range, best_val,
int(round(len(value) / 2.0)))))
val = list(np.unique([dtype(x) for x in val]))
if all(x >= 0 for x in value):
val = [x for x in val if x >= 0]
elif all(x <= 0 for x in value):
val = [x for x in val if x <= 0]
return val
class PrunedPipeline(Pipeline):
"""
A standard sklearn feature Pipeline with additional
pruning method. After fitting, the pruning method is
applied to the fitted pipeline. This applies the
feature selection directly to the fitted vocabulary
(and idf values if applicable), removing all elements of
these attributes that will not ultimately survive the
feature selection filter.
The ``PrunedPipeline`` will make idential predictions
as a similarly trained ``Pipeline``. However, it will require
less memory and will make faster predictions.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
vectorizer_name : str, default vec
Name of ``Pipeline`` step which performs feature extraction. Any
transformer with a ``vocabulary_``dictionary can be the step
with this name.
Ideal transformers are of types
sklearn.feature_extraction.text.CountVectorizer or
sklearn.feature_extraction.text.TfidfVectorizer.
selector_name : str, default select
Name of ``Pipeline`` step which performs feature selection. Any
transformer with a ``get_support`` method returning an iterable
of booleans with length ``len(vocabulary_)`` can be the step with this name.
Ideal transformers are of type sklearn.feature_selection.univariate_selection._BaseFilter.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
def __init__(self, steps, memory=None,
vectorizer_name="vec",
selector_name="select",
verbose=False):
self.steps = steps
self.memory = memory
self.verbose = verbose
self.vectorizer_name = vectorizer_name
self.selector_name = selector_name
self._validate_steps()
self._validate_prune()
def fit(self, X, y=None, **fit_params):
"""
Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Perform prune after standard pipeline fit.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : PrunedPipeline
This estimator
"""
# standard Pipeline fit method
self._validate_prune()
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
# prune pipeline
if self.selector_name and self.vectorizer_name:
self._prune()
return self
def _validate_prune(self):
""" Validate prune step inputs """
names, estimators = zip(*self.steps)
for name in [self.selector_name, self.vectorizer_name]:
if name:
if not name in names:
raise ValueError(
"Name {0} should exist in steps".format(
name))
self.selector_index = names.index(self.selector_name)
self.vectorizer_index = names.index(self.vectorizer_name)
def _prune(self):
"""
Prune fitted ``Pipeline`` object. The pruner runs the
``get_support`` method from the designated feature
selector, returning the selector mask. Then the ``vocabulary_``
(and optional ``idf_`` if exists) attribute is pruned
to only contain elements who survive the selector mask. The
selector step is then removed from the pipeline.
Transform methods on the pipeline will then reflect these
changes, reducing the size of the vectorizer and effectively
skipping the selector step.
"""
# collect pipeline step data
voc = self.steps[self.vectorizer_index][1].vocabulary_
if hasattr(self.steps[self.vectorizer_index][1], "idf_"):
idf = self.steps[self.vectorizer_index][1].idf_
else:
idf = None
support = self.steps[self.selector_index][1].get_support()
# restructure vocabulary
terms = []
indices = []
for key, value in voc.items():
terms.append(key)
indices.append(value)
sort_mask = np.argsort(indices)
terms = np.array(terms)[sort_mask]
# rebuild vocabulary dictionary
new_vocab = {}
new_idf = []
count = 0
for index in range(len(terms)):
if support[index]:
new_vocab[terms[index]] = count
if idf is not None:
new_idf.append(idf[index])
count += 1
# replace vocabulary
self.steps[self.vectorizer_index][1].vocabulary_ = new_vocab
if idf is not None:
self.steps[self.vectorizer_index][1]._tfidf._idf_diag = csr_matrix(np.diag(new_idf))
removed_step = self.steps.pop(self.selector_index)
class MultiGridSearchCV(BaseSearchCV):
"""
An iterator through multiple GridSearchCV
models using various ``estimators`` and associated ``param_grids``.
Providing two equal length iterables as required arguments
containing estimators and paraeter grids, as well as keyword arguments for
GridSearchCV, will then simply iterate through and fit multiple
GridSearchCV models, fitting them sequentially.
Then the maximum ``best_score_`` is compared accross the
GridSearchCV models, and the best one is identified. The best
estimator is set as an attribute, ``best_estimator_`` and
the best GridSearchCV model is set as an attribute,
``best_grid_search_cv_``.
"""
def __init__(self, estimators, param_grids, gs_estimator=GridSearchCV, **kwargs):
self.estimators=estimators
self.param_grids=param_grids
self.gs_estimator=gs_estimator
self.gs_kwargs=kwargs
BaseSearchCV.__init__(
self, None, **kwargs)
def fit(self, X, y=None):
"""
Iterate through estimators and param_grids, fitting
each, and then chosing the best.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
# Iterate through estimators fitting each
models = []
for index in range(len(self.estimators)):
model = self.gs_estimator(
self.estimators[index],
self.param_grids[index],
**self.gs_kwargs)
model.fit(X, y)
models.append(model)
# Generate cross validation results
cv_df = pd.DataFrame()
for index in range(len(models)):
tmpDf = pd.DataFrame(models[index].cv_results_)
tmpDf["grid_search_index"] = index
cv_df = cv_df.append(tmpDf, sort=True)
cv_df.index = range(len(cv_df))
cv_df = cv_df[[c for c in cv_df.columns if "param_" not in c]]
cv_df["rank_test_score"] = map(int,
(len(cv_df) + 1) -
rankdata(cv_df["mean_test_score"], method="ordinal"))
self.cv_results_ = {}
for col in cv_df.columns:
self.cv_results_[col] = list(cv_df[col].values)
# Find best model and set associated attributes
self.scores_ = [x.best_score_ for x in models]
self.best_index_ = np.argmax(self.scores_)
self.best_score_ = models[self.best_index_].best_score_
self.best_grid_search_cv_ = models[self.best_index_]
self.best_estimator_ = models[self.best_index_].best_estimator_
self.scorer_ = self.best_grid_search_cv_.scorer_
self.multimetric_ = self.best_grid_search_cv_.multimetric_
self.n_splits_ = self.best_grid_search_cv_.n_splits_
return self
class IterRandomEstimator(BaseEstimator, ClassifierMixin):
"""
Meta-Estimator intended primarily for unsupervised
estimators whose fitted model can be heavily dependent
on an arbitrary random initialization state. It is
best used for problems where a ``fit_predict`` method
is intended, so the only data used for prediction will be
the same data on which the model was fitted.
The ``fit`` method will fit multiple iterations of the same
base estimator, varying the ``random_state`` argument
for each iteration. The iterations will stop either
when ``max_iter`` is reached, or when the target
score is obtained.
The model does not use cross validation to find the best
estimator. It simply fits and scores on the entire input
data set. A hyperparaeter is not being optimized here,
only random initialization states. The idea is to find
the best fitted model, and keep that exact model, rather
than to find the best hyperparameter set.
"""
def __init__(self, estimator, target_score=None,
max_iter=10, random_state=None,
scoring=calinski_harabasz_score,
fit_params=None, verbose=0):
self.estimator=estimator
self.target_score=target_score
self.max_iter=max_iter
self.random_state=random_state
if not self.random_state:
self.random_state = np.random.randint(100)
self.fit_params=fit_params
self.verbose=verbose
self.scoring=scoring
def fit(self, X, y=None, **fit_params):
"""
Run fit on the estimator attribute multiple times
with various ``random_state`` arguments and choose
the fitted estimator with the best score.
Uses ``calinski_harabasz_score`` if no scoring is provided.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
estimator.verbose = self.verbose
if self.verbose > 0:
if not self.target_score:
print("Fitting {0} estimators unless a target "
"score of {1} is reached".format(
self.max_iter, self.target_score))
else:
print("Fitting {0} estimators".format(
self.max_iter))
count = 0
scores = []
estimators = []
states = []
random_state = self.random_state
if not random_state:
random_state = n
while count < self.max_iter:
estimator = clone(estimator)
if random_state:
random_state = random_state + 1
estimator.random_state = random_state
estimator.fit(X, y, **fit_params)
labels = estimator.labels_
score = self.scoring(X, labels)
scores.append(score)
estimators.append(estimator)
states.append(random_state)
if self.target_score is not None and score > self.target_score:
break
count += 1
self.best_estimator_ = estimators[np.argmax(scores)]
self.best_score_ = np.max(scores)
self.best_index_ = np.argmax(scores)
self.best_params_ = self.best_estimator_.get_params()
self.scores_ = scores
self.random_states_ = states
class OptimizedEnsemble(BaseSearchCV):
"""
An optimized ensemble class. Will find the optimal ``n_estimators``
parameter for the given ensemble estimator, according to the
specified input parameters.
The ``fit`` method will iterate through n_estimators options,
starting with n_estimators_init, and using the step_function
reursively from there. Stop at max_iter or when the score
gain between iterations is less than threshold.
The OptimizedEnsemble class can then itself be used
as an Estimator, or the ``best_estimator_`` attribute
can be accessed directly, which is a fitted version of the input
estimator with the optimal parameters.
"""
def __init__(self, estimator, n_estimators_init=5,
threshold=0.01, max_iter=10,
step_function=lambda x: x*2,
**kwargs):
self.n_estimators_init=n_estimators_init
self.threshold=threshold
self.step_function=step_function
self.max_iter=max_iter
BaseSearchCV.__init__(
self, estimator, **kwargs)
def fit(self, X, y, **fit_params):
"""
Find the optimal ``n_estimators`` parameter using a custom
optimization routine.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv.get_n_splits(X, y, groups=None)
if self.verbose > 0:
print("Fitting {0} folds for each n_estimators candidate, "
"for a maximum of {1} candidates, totalling"
" a maximum of {2} fits".format(n_splits,
self.max_iter, self.max_iter * n_splits))
count = 0
scores = []
n_estimators = []
n_est = self.n_estimators_init
while count < self.max_iter:
estimator = clone(estimator)
estimator.n_estimators = n_est
score = np.mean(cross_val_score(
estimator, X, y, cv=self.cv,
scoring=self.scoring,
fit_params=fit_params,
verbose=self.verbose,
n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch))
scores.append(score)
n_estimators.append(n_est)
if (count > 0 and
(scores[count] - scores[count - 1]) < self.threshold):
break
else:
best_estimator = estimator
count += 1
n_est = self.step_function(n_est)
self.scores_ = scores
self.n_estimators_list_ = n_estimators
if self.refit:
self.best_estimator_ = clone(best_estimator)
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
self.best_index_ = count - 1
self.best_score_ = self.scores_[count - 1]
self.best_n_estimators_ = self.n_estimators_list_[count - 1]
self.best_params_ = self.best_estimator_.get_params()
return self
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score(self, X, y=None):
"""
Call score on the estimator with the best found parameters.
Only available if the underlying estimator supports ``score``.
This uses the score defined by the ``best_estimator_.score`` method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
return self.best_estimator_.score(X, y)
class OneVsRestAdjClassifier(OneVsRestClassifier):
"""
One-vs-the-rest (OvR) multiclass strategy
Also known as one-vs-all, this strategy consists in fitting one classifier per class.
For each classifier, the class is fitted against all the other classes.
In addition to its computational efficiency (only n_classes classifiers are needed),
one advantage of this approach is its interpretability.
Since each class is represented by one and one classifier only, it is possible to gain
knowledge about the class by inspecting its corresponding classifier.
This is the most commonly used strategy for multiclass classification and is a fair default choice.
The adjusted version is a custom extension which overwrites the inherited predict_proba() method with
a more flexible method allowing custom normalization for the predicted probabilities. Any norm
argument that can be passed directly to sklearn.preprocessing.normalize is allowed. Additionally,
norm=None will skip the normalization step alltogeter. To mimick the inherited OneVsRestClassfier
behavior, set norm='l2'. All other methods are inherited from OneVsRestClassifier.
Parameters
----------
estimator : estimator object
An estimator object implementing fit and one of decision_function or predict_proba.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
norm: str, optional, default: None
Normalization method to be passed straight into sklearn.preprocessing.normalize as the norm
input. A value of None (default) will skip the normalization step.
Attributes
----------
estimators_ : list of n_classes estimators
Estimators used for predictions.
classes_ : array, shape = [n_classes]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, norm=None, **kwargs):
OneVsRestClassifier.__init__(
self, estimator, **kwargs)
self.norm = norm
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in self.classes_.
"""
probs = []
for index in range(len(self.estimators_)):
probs.append(self.estimators_[index].predict_proba(X)[:,1])
out = np.array([
[probs[y][index] for y in range(len(self.estimators_))]
for index in range(len(probs[0]))])
if self.norm:
return normalize(out, norm=self.norm)
else:
return out | scikit-ext | /scikit-ext-0.1.16.tar.gz/scikit-ext-0.1.16/scikit_ext/estimators.py | estimators.py | import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import normalize
from sklearn.base import (
BaseEstimator, ClassifierMixin,
is_classifier, clone)
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.exceptions import NotFittedError
from sklearn.metrics import calinski_harabasz_score
from sklearn.pipeline import Pipeline
class ZoomGridSearchCV(GridSearchCV):
"""
Fits multiple ``GridSearchCV`` models, updating
the ``param_grid`` after each iteration. The update
looks at successful parameter values for each
grid key. A new list of values is created which
expands the resolution of the search values centered
around the best performing value of the previous fit.
This allows the standard grid search process to start
with a small number of distant values for each parameter,
and zoom in as the better performing corner of the
hyperparameter search space becomes clear.
The process only updates paramter keys whose values
are all of type ``int`` or are all of type ``float``. Any
other data type valued parameters or mixed parameters will
simply be copied and reused for each iteration. The only
stopping criteria for iterations is the ``n_iter`` parameter.
Inherits ``GridSearchCV`` so all methods
and attributes are identical except for ``fit``
which is overriden by a method looping through
the ``fit`` method of ``GridSearchCV``. Ultimately,
the class exactly resembles a fitted ``GridSearchCV``
after ``fit`` is run. Running ``fit`` with
``n_iter = 0`` is identical to funning ``fit``
with ``GridSearchCV``.
"""
def __init__(self, estimator, param_grid,
n_iter=1, **kwargs):
GridSearchCV.__init__(
self, estimator, param_grid, **kwargs)
self._fit = GridSearchCV.fit
self.n_iter=n_iter
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit with all sets of parameters. For ``n_iter``
iterations, zoom in on successful parameters, creating
a new parameter grid and refitting.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
n = -1
while n < self.n_iter:
if n > -1:
self._update_grid()
if self.verbose > 0:
print("Grid Updated on Iteration {0}:".format(n))
print(self.param_grid)
else:
if self.verbose > 0:
print("Initial Grid:")
print(self.param_grid)
GridSearchCV.fit(self, X, y=y, groups=groups, **fit_params)
n += 1
def _update_grid(self):
""" Update parameter grid based on previous fit results """
results = pd.DataFrame(self.cv_results_)
# get parameters to update
update_params = {}
for key, value in self.param_grid.items():
if all(isinstance(x, int) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=int)
elif all(isinstance(x, float) for x in value):
updated_value = self._update_elements(
results, key, value, dtype=float)
else:
updated_value = value
if len(updated_value) > 0:
update_params[key] = updated_value
# update parameter grid attribute
self.param_grid = update_params
def _update_elements(self, results, key, value, dtype=int):
""" Update elements of a single param_grid key, value pair """
tmp = (results.loc[~pd.isnull(results["param_{0}".format(key)]),
["param_{0}".format(key), "rank_test_score"]]
.sort_values("rank_test_score"))
best_val = tmp["param_{0}".format(key)].values[0]
value_range = (np.max(value) - np.min(value)) / 5.0
val = (
list(
np.linspace(best_val, best_val + value_range,
int(round(len(value) / 2.0)))) +
list(
np.linspace(best_val - value_range, best_val,
int(round(len(value) / 2.0)))))
val = list(np.unique([dtype(x) for x in val]))
if all(x >= 0 for x in value):
val = [x for x in val if x >= 0]
elif all(x <= 0 for x in value):
val = [x for x in val if x <= 0]
return val
class PrunedPipeline(Pipeline):
"""
A standard sklearn feature Pipeline with additional
pruning method. After fitting, the pruning method is
applied to the fitted pipeline. This applies the
feature selection directly to the fitted vocabulary
(and idf values if applicable), removing all elements of
these attributes that will not ultimately survive the
feature selection filter.
The ``PrunedPipeline`` will make idential predictions
as a similarly trained ``Pipeline``. However, it will require
less memory and will make faster predictions.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
vectorizer_name : str, default vec
Name of ``Pipeline`` step which performs feature extraction. Any
transformer with a ``vocabulary_``dictionary can be the step
with this name.
Ideal transformers are of types
sklearn.feature_extraction.text.CountVectorizer or
sklearn.feature_extraction.text.TfidfVectorizer.
selector_name : str, default select
Name of ``Pipeline`` step which performs feature selection. Any
transformer with a ``get_support`` method returning an iterable
of booleans with length ``len(vocabulary_)`` can be the step with this name.
Ideal transformers are of type sklearn.feature_selection.univariate_selection._BaseFilter.
Attributes
----------
named_steps : bunch object, a dictionary with attribute access
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
"""
def __init__(self, steps, memory=None,
vectorizer_name="vec",
selector_name="select",
verbose=False):
self.steps = steps
self.memory = memory
self.verbose = verbose
self.vectorizer_name = vectorizer_name
self.selector_name = selector_name
self._validate_steps()
self._validate_prune()
def fit(self, X, y=None, **fit_params):
"""
Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Perform prune after standard pipeline fit.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : PrunedPipeline
This estimator
"""
# standard Pipeline fit method
self._validate_prune()
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
# prune pipeline
if self.selector_name and self.vectorizer_name:
self._prune()
return self
def _validate_prune(self):
""" Validate prune step inputs """
names, estimators = zip(*self.steps)
for name in [self.selector_name, self.vectorizer_name]:
if name:
if not name in names:
raise ValueError(
"Name {0} should exist in steps".format(
name))
self.selector_index = names.index(self.selector_name)
self.vectorizer_index = names.index(self.vectorizer_name)
def _prune(self):
"""
Prune fitted ``Pipeline`` object. The pruner runs the
``get_support`` method from the designated feature
selector, returning the selector mask. Then the ``vocabulary_``
(and optional ``idf_`` if exists) attribute is pruned
to only contain elements who survive the selector mask. The
selector step is then removed from the pipeline.
Transform methods on the pipeline will then reflect these
changes, reducing the size of the vectorizer and effectively
skipping the selector step.
"""
# collect pipeline step data
voc = self.steps[self.vectorizer_index][1].vocabulary_
if hasattr(self.steps[self.vectorizer_index][1], "idf_"):
idf = self.steps[self.vectorizer_index][1].idf_
else:
idf = None
support = self.steps[self.selector_index][1].get_support()
# restructure vocabulary
terms = []
indices = []
for key, value in voc.items():
terms.append(key)
indices.append(value)
sort_mask = np.argsort(indices)
terms = np.array(terms)[sort_mask]
# rebuild vocabulary dictionary
new_vocab = {}
new_idf = []
count = 0
for index in range(len(terms)):
if support[index]:
new_vocab[terms[index]] = count
if idf is not None:
new_idf.append(idf[index])
count += 1
# replace vocabulary
self.steps[self.vectorizer_index][1].vocabulary_ = new_vocab
if idf is not None:
self.steps[self.vectorizer_index][1]._tfidf._idf_diag = csr_matrix(np.diag(new_idf))
removed_step = self.steps.pop(self.selector_index)
class MultiGridSearchCV(BaseSearchCV):
"""
An iterator through multiple GridSearchCV
models using various ``estimators`` and associated ``param_grids``.
Providing two equal length iterables as required arguments
containing estimators and paraeter grids, as well as keyword arguments for
GridSearchCV, will then simply iterate through and fit multiple
GridSearchCV models, fitting them sequentially.
Then the maximum ``best_score_`` is compared accross the
GridSearchCV models, and the best one is identified. The best
estimator is set as an attribute, ``best_estimator_`` and
the best GridSearchCV model is set as an attribute,
``best_grid_search_cv_``.
"""
def __init__(self, estimators, param_grids, gs_estimator=GridSearchCV, **kwargs):
self.estimators=estimators
self.param_grids=param_grids
self.gs_estimator=gs_estimator
self.gs_kwargs=kwargs
BaseSearchCV.__init__(
self, None, **kwargs)
def fit(self, X, y=None):
"""
Iterate through estimators and param_grids, fitting
each, and then chosing the best.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
# Iterate through estimators fitting each
models = []
for index in range(len(self.estimators)):
model = self.gs_estimator(
self.estimators[index],
self.param_grids[index],
**self.gs_kwargs)
model.fit(X, y)
models.append(model)
# Generate cross validation results
cv_df = pd.DataFrame()
for index in range(len(models)):
tmpDf = pd.DataFrame(models[index].cv_results_)
tmpDf["grid_search_index"] = index
cv_df = cv_df.append(tmpDf, sort=True)
cv_df.index = range(len(cv_df))
cv_df = cv_df[[c for c in cv_df.columns if "param_" not in c]]
cv_df["rank_test_score"] = map(int,
(len(cv_df) + 1) -
rankdata(cv_df["mean_test_score"], method="ordinal"))
self.cv_results_ = {}
for col in cv_df.columns:
self.cv_results_[col] = list(cv_df[col].values)
# Find best model and set associated attributes
self.scores_ = [x.best_score_ for x in models]
self.best_index_ = np.argmax(self.scores_)
self.best_score_ = models[self.best_index_].best_score_
self.best_grid_search_cv_ = models[self.best_index_]
self.best_estimator_ = models[self.best_index_].best_estimator_
self.scorer_ = self.best_grid_search_cv_.scorer_
self.multimetric_ = self.best_grid_search_cv_.multimetric_
self.n_splits_ = self.best_grid_search_cv_.n_splits_
return self
class IterRandomEstimator(BaseEstimator, ClassifierMixin):
"""
Meta-Estimator intended primarily for unsupervised
estimators whose fitted model can be heavily dependent
on an arbitrary random initialization state. It is
best used for problems where a ``fit_predict`` method
is intended, so the only data used for prediction will be
the same data on which the model was fitted.
The ``fit`` method will fit multiple iterations of the same
base estimator, varying the ``random_state`` argument
for each iteration. The iterations will stop either
when ``max_iter`` is reached, or when the target
score is obtained.
The model does not use cross validation to find the best
estimator. It simply fits and scores on the entire input
data set. A hyperparaeter is not being optimized here,
only random initialization states. The idea is to find
the best fitted model, and keep that exact model, rather
than to find the best hyperparameter set.
"""
def __init__(self, estimator, target_score=None,
max_iter=10, random_state=None,
scoring=calinski_harabasz_score,
fit_params=None, verbose=0):
self.estimator=estimator
self.target_score=target_score
self.max_iter=max_iter
self.random_state=random_state
if not self.random_state:
self.random_state = np.random.randint(100)
self.fit_params=fit_params
self.verbose=verbose
self.scoring=scoring
def fit(self, X, y=None, **fit_params):
"""
Run fit on the estimator attribute multiple times
with various ``random_state`` arguments and choose
the fitted estimator with the best score.
Uses ``calinski_harabasz_score`` if no scoring is provided.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
estimator.verbose = self.verbose
if self.verbose > 0:
if not self.target_score:
print("Fitting {0} estimators unless a target "
"score of {1} is reached".format(
self.max_iter, self.target_score))
else:
print("Fitting {0} estimators".format(
self.max_iter))
count = 0
scores = []
estimators = []
states = []
random_state = self.random_state
if not random_state:
random_state = n
while count < self.max_iter:
estimator = clone(estimator)
if random_state:
random_state = random_state + 1
estimator.random_state = random_state
estimator.fit(X, y, **fit_params)
labels = estimator.labels_
score = self.scoring(X, labels)
scores.append(score)
estimators.append(estimator)
states.append(random_state)
if self.target_score is not None and score > self.target_score:
break
count += 1
self.best_estimator_ = estimators[np.argmax(scores)]
self.best_score_ = np.max(scores)
self.best_index_ = np.argmax(scores)
self.best_params_ = self.best_estimator_.get_params()
self.scores_ = scores
self.random_states_ = states
class OptimizedEnsemble(BaseSearchCV):
"""
An optimized ensemble class. Will find the optimal ``n_estimators``
parameter for the given ensemble estimator, according to the
specified input parameters.
The ``fit`` method will iterate through n_estimators options,
starting with n_estimators_init, and using the step_function
reursively from there. Stop at max_iter or when the score
gain between iterations is less than threshold.
The OptimizedEnsemble class can then itself be used
as an Estimator, or the ``best_estimator_`` attribute
can be accessed directly, which is a fitted version of the input
estimator with the optimal parameters.
"""
def __init__(self, estimator, n_estimators_init=5,
threshold=0.01, max_iter=10,
step_function=lambda x: x*2,
**kwargs):
self.n_estimators_init=n_estimators_init
self.threshold=threshold
self.step_function=step_function
self.max_iter=max_iter
BaseSearchCV.__init__(
self, estimator, **kwargs)
def fit(self, X, y, **fit_params):
"""
Find the optimal ``n_estimators`` parameter using a custom
optimization routine.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv.get_n_splits(X, y, groups=None)
if self.verbose > 0:
print("Fitting {0} folds for each n_estimators candidate, "
"for a maximum of {1} candidates, totalling"
" a maximum of {2} fits".format(n_splits,
self.max_iter, self.max_iter * n_splits))
count = 0
scores = []
n_estimators = []
n_est = self.n_estimators_init
while count < self.max_iter:
estimator = clone(estimator)
estimator.n_estimators = n_est
score = np.mean(cross_val_score(
estimator, X, y, cv=self.cv,
scoring=self.scoring,
fit_params=fit_params,
verbose=self.verbose,
n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch))
scores.append(score)
n_estimators.append(n_est)
if (count > 0 and
(scores[count] - scores[count - 1]) < self.threshold):
break
else:
best_estimator = estimator
count += 1
n_est = self.step_function(n_est)
self.scores_ = scores
self.n_estimators_list_ = n_estimators
if self.refit:
self.best_estimator_ = clone(best_estimator)
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
self.best_index_ = count - 1
self.best_score_ = self.scores_[count - 1]
self.best_n_estimators_ = self.n_estimators_list_[count - 1]
self.best_params_ = self.best_estimator_.get_params()
return self
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score(self, X, y=None):
"""
Call score on the estimator with the best found parameters.
Only available if the underlying estimator supports ``score``.
This uses the score defined by the ``best_estimator_.score`` method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
return self.best_estimator_.score(X, y)
class OneVsRestAdjClassifier(OneVsRestClassifier):
"""
One-vs-the-rest (OvR) multiclass strategy
Also known as one-vs-all, this strategy consists in fitting one classifier per class.
For each classifier, the class is fitted against all the other classes.
In addition to its computational efficiency (only n_classes classifiers are needed),
one advantage of this approach is its interpretability.
Since each class is represented by one and one classifier only, it is possible to gain
knowledge about the class by inspecting its corresponding classifier.
This is the most commonly used strategy for multiclass classification and is a fair default choice.
The adjusted version is a custom extension which overwrites the inherited predict_proba() method with
a more flexible method allowing custom normalization for the predicted probabilities. Any norm
argument that can be passed directly to sklearn.preprocessing.normalize is allowed. Additionally,
norm=None will skip the normalization step alltogeter. To mimick the inherited OneVsRestClassfier
behavior, set norm='l2'. All other methods are inherited from OneVsRestClassifier.
Parameters
----------
estimator : estimator object
An estimator object implementing fit and one of decision_function or predict_proba.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
norm: str, optional, default: None
Normalization method to be passed straight into sklearn.preprocessing.normalize as the norm
input. A value of None (default) will skip the normalization step.
Attributes
----------
estimators_ : list of n_classes estimators
Estimators used for predictions.
classes_ : array, shape = [n_classes]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, norm=None, **kwargs):
OneVsRestClassifier.__init__(
self, estimator, **kwargs)
self.norm = norm
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in self.classes_.
"""
probs = []
for index in range(len(self.estimators_)):
probs.append(self.estimators_[index].predict_proba(X)[:,1])
out = np.array([
[probs[y][index] for y in range(len(self.estimators_))]
for index in range(len(probs[0]))])
if self.norm:
return normalize(out, norm=self.norm)
else:
return out | 0.886131 | 0.543833 |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from skfair.common import as_list
def scalar_projection(vec, unto):
return vec.dot(unto) / unto.dot(unto)
def vector_projection(vec, unto):
return scalar_projection(vec, unto) * unto
class InformationFilter(BaseEstimator, TransformerMixin):
"""
The `InformationFilter` uses a variant of the gram smidt process
to filter information out of the dataset. This can be useful if you
want to filter information out of a dataset because of fairness.
To explain how it works: given a training matrix :math:`X` that contains
columns :math:`x_1, ..., x_k`. If we assume columns :math:`x_1` and :math:`x_2`
to be the sensitive columns then the information-filter will
remove information by applying these transformations;
.. math::
\\begin{split}
v_1 & = x_1 \\\\
v_2 & = x_2 - \\frac{x_2 v_1}{v_1 v_1}\\\\
v_3 & = x_3 - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}\\\\
... \\\\
v_k & = x_k - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}
\\end{split}
Concatenating our vectors (but removing the sensitive ones) gives us
a new training matrix :math:`X_{fair} = [v_3, ..., v_k]`.
:param columns: the columns to filter out this can be a sequence of either int
(in the case of numpy) or string (in the case of pandas).
:param alpha: parameter to control how much to filter, for alpha=1 we filter out
all information while for alpha=0 we don't apply any.
"""
def __init__(self, columns, alpha=1):
self.columns = columns
self.alpha = alpha
def _check_coltype(self, X):
for col in as_list(self.columns):
if isinstance(col, str):
if isinstance(X, np.ndarray):
raise ValueError(
f"column {col} is a string but datatype receive is numpy."
)
if isinstance(X, pd.DataFrame):
if col not in X.columns:
raise ValueError(f"column {col} is not in {X.columns}")
if isinstance(col, int):
if col not in range(np.atleast_2d(np.array(X)).shape[1]):
raise ValueError(
f"column {col} is out of bounds for input shape {X.shape}"
)
def _col_idx(self, X, name):
if isinstance(name, str):
if isinstance(X, np.ndarray):
raise ValueError(
"You cannot have a column of type string on a numpy input matrix."
)
return {name: i for i, name in enumerate(X.columns)}[name]
return name
def _make_v_vectors(self, X, col_ids):
vs = np.zeros((X.shape[0], len(col_ids)))
for i, c in enumerate(col_ids):
vs[:, i] = X[:, col_ids[i]]
for j in range(0, i):
vs[:, i] = vs[:, i] - vector_projection(vs[:, i], vs[:, j])
return vs
def fit(self, X, y=None):
"""Learn the projection required to make the dataset orthogonal to sensitive columns."""
self._check_coltype(X)
self.col_ids_ = [
v if isinstance(v, int) else self._col_idx(X, v)
for v in as_list(self.columns)
]
X = check_array(X, estimator=self)
X_fair = X.copy()
v_vectors = self._make_v_vectors(X, self.col_ids_)
# gram smidt process but only on sensitive attributes
for i, col in enumerate(X_fair.T):
for v in v_vectors.T:
X_fair[:, i] = X_fair[:, i] - vector_projection(X_fair[:, i], v)
# we want to learn matrix P: X P = X_fair
# this means we first need to create X_fair in order to learn P
self.projection_, resid, rank, s = np.linalg.lstsq(X, X_fair, rcond=None)
return self
def transform(self, X):
"""Transforms X by applying the information filter."""
check_is_fitted(self, ["projection_", "col_ids_"])
self._check_coltype(X)
X = check_array(X, estimator=self)
# apply the projection and remove the column we won't need
X_fair = X @ self.projection_
X_removed = np.delete(X_fair, self.col_ids_, axis=1)
X_orig = np.delete(X, self.col_ids_, axis=1)
return self.alpha * np.atleast_2d(X_removed) + (1 - self.alpha) * np.atleast_2d(
X_orig
) | scikit-fairness | /scikit-fairness-0.0.1.tar.gz/scikit-fairness-0.0.1/skfair/preprocessing/informationfilter.py | informationfilter.py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from skfair.common import as_list
def scalar_projection(vec, unto):
return vec.dot(unto) / unto.dot(unto)
def vector_projection(vec, unto):
return scalar_projection(vec, unto) * unto
class InformationFilter(BaseEstimator, TransformerMixin):
"""
The `InformationFilter` uses a variant of the gram smidt process
to filter information out of the dataset. This can be useful if you
want to filter information out of a dataset because of fairness.
To explain how it works: given a training matrix :math:`X` that contains
columns :math:`x_1, ..., x_k`. If we assume columns :math:`x_1` and :math:`x_2`
to be the sensitive columns then the information-filter will
remove information by applying these transformations;
.. math::
\\begin{split}
v_1 & = x_1 \\\\
v_2 & = x_2 - \\frac{x_2 v_1}{v_1 v_1}\\\\
v_3 & = x_3 - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}\\\\
... \\\\
v_k & = x_k - \\frac{x_k v_1}{v_1 v_1} - \\frac{x_2 v_2}{v_2 v_2}
\\end{split}
Concatenating our vectors (but removing the sensitive ones) gives us
a new training matrix :math:`X_{fair} = [v_3, ..., v_k]`.
:param columns: the columns to filter out this can be a sequence of either int
(in the case of numpy) or string (in the case of pandas).
:param alpha: parameter to control how much to filter, for alpha=1 we filter out
all information while for alpha=0 we don't apply any.
"""
def __init__(self, columns, alpha=1):
self.columns = columns
self.alpha = alpha
def _check_coltype(self, X):
for col in as_list(self.columns):
if isinstance(col, str):
if isinstance(X, np.ndarray):
raise ValueError(
f"column {col} is a string but datatype receive is numpy."
)
if isinstance(X, pd.DataFrame):
if col not in X.columns:
raise ValueError(f"column {col} is not in {X.columns}")
if isinstance(col, int):
if col not in range(np.atleast_2d(np.array(X)).shape[1]):
raise ValueError(
f"column {col} is out of bounds for input shape {X.shape}"
)
def _col_idx(self, X, name):
if isinstance(name, str):
if isinstance(X, np.ndarray):
raise ValueError(
"You cannot have a column of type string on a numpy input matrix."
)
return {name: i for i, name in enumerate(X.columns)}[name]
return name
def _make_v_vectors(self, X, col_ids):
vs = np.zeros((X.shape[0], len(col_ids)))
for i, c in enumerate(col_ids):
vs[:, i] = X[:, col_ids[i]]
for j in range(0, i):
vs[:, i] = vs[:, i] - vector_projection(vs[:, i], vs[:, j])
return vs
def fit(self, X, y=None):
"""Learn the projection required to make the dataset orthogonal to sensitive columns."""
self._check_coltype(X)
self.col_ids_ = [
v if isinstance(v, int) else self._col_idx(X, v)
for v in as_list(self.columns)
]
X = check_array(X, estimator=self)
X_fair = X.copy()
v_vectors = self._make_v_vectors(X, self.col_ids_)
# gram smidt process but only on sensitive attributes
for i, col in enumerate(X_fair.T):
for v in v_vectors.T:
X_fair[:, i] = X_fair[:, i] - vector_projection(X_fair[:, i], v)
# we want to learn matrix P: X P = X_fair
# this means we first need to create X_fair in order to learn P
self.projection_, resid, rank, s = np.linalg.lstsq(X, X_fair, rcond=None)
return self
def transform(self, X):
"""Transforms X by applying the information filter."""
check_is_fitted(self, ["projection_", "col_ids_"])
self._check_coltype(X)
X = check_array(X, estimator=self)
# apply the projection and remove the column we won't need
X_fair = X @ self.projection_
X_removed = np.delete(X_fair, self.col_ids_, axis=1)
X_orig = np.delete(X, self.col_ids_, axis=1)
return self.alpha * np.atleast_2d(X_removed) + (1 - self.alpha) * np.atleast_2d(
X_orig
) | 0.883104 | 0.645888 |
.. image:: https://raw.githubusercontent.com/GAA-UAM/scikit-fda/develop/docs/logos/title_logo/title_logo.png
:alt: scikit-fda: Functional Data Analysis in Python
scikit-fda: Functional Data Analysis in Python
===================================================
|python|_ |build-status| |docs| |Codecov|_ |PyPIBadge|_ |license|_ |doi|
Functional Data Analysis, or FDA, is the field of Statistics that analyses
data that depend on a continuous parameter.
This package offers classes, methods and functions to give support to FDA
in Python. Includes a wide range of utils to work with functional data, and its
representation, exploratory analysis, or preprocessing, among other tasks
such as inference, classification, regression or clustering of functional data.
See documentation for further information on the features included in the
package.
Documentation
=============
The documentation is available at
`fda.readthedocs.io/en/stable/ <https://fda.readthedocs.io/en/stable/>`_, which
includes detailed information of the different modules, classes and methods of
the package, along with several examples showing different functionalities.
The documentation of the latest version, corresponding with the develop
version of the package, can be found at
`fda.readthedocs.io/en/latest/ <https://fda.readthedocs.io/en/latest/>`_.
Installation
============
Currently, *scikit-fda* is available in Python 3.6 and 3.7, regardless of the
platform.
The stable version can be installed via PyPI_:
.. code::
pip install scikit-fda
Installation from source
------------------------
It is possible to install the latest version of the package, available in the
develop branch, by cloning this repository and doing a manual installation.
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
pip install ./scikit-fda
Make sure that your default Python version is currently supported, or change
the python and pip commands by specifying a version, such as ``python3.6``:
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
python3.6 -m pip install ./scikit-fda
Requirements
------------
*scikit-fda* depends on the following packages:
* `cython <https://github.com/cython/cython>`_ - Python to C compiler
* `fdasrsf <https://github.com/jdtuck/fdasrsf_python>`_ - SRSF framework
* `findiff <https://github.com/maroba/findiff>`_ - Finite differences
* `matplotlib <https://github.com/matplotlib/matplotlib>`_ - Plotting with Python
* `multimethod <https://github.com/coady/multimethod>`_ - Multiple dispatch
* `numpy <https://github.com/numpy/numpy>`_ - The fundamental package for scientific computing with Python
* `pandas <https://github.com/pandas-dev/pandas>`_ - Powerful Python data analysis toolkit
* `rdata <https://github.com/vnmabus/rdata>`_ - Reader of R datasets in .rda format in Python
* `scikit-datasets <https://github.com/daviddiazvico/scikit-datasets>`_ - Scikit-learn compatible datasets
* `scikit-learn <https://github.com/scikit-learn/scikit-learn>`_ - Machine learning in Python
* `scipy <https://github.com/scipy/scipy>`_ - Scientific computation in Python
* `setuptools <https://github.com/pypa/setuptools>`_ - Python Packaging
The dependencies are automatically installed.
Contributions
=============
All contributions are welcome. You can help this project grow in multiple ways,
from creating an issue, reporting an improvement or a bug, to doing a
repository fork and creating a pull request to the development branch.
The people involved at some point in the development of the package can be
found in the `contributors
file <https://github.com/GAA-UAM/scikit-fda/blob/develop/THANKS.txt>`_.
.. Citation
========
If you find this project useful, please cite:
.. todo:: Include citation to scikit-fda paper.
License
=======
The package is licensed under the BSD 3-Clause License. A copy of the
license_ can be found along with the code.
.. _examples: https://fda.readthedocs.io/en/latest/auto_examples/index.html
.. _PyPI: https://pypi.org/project/scikit-fda/
.. |python| image:: https://img.shields.io/pypi/pyversions/scikit-fda.svg
.. _python: https://badge.fury.io/py/scikit-fda
.. |build-status| image:: https://travis-ci.org/GAA-UAM/scikit-fda.svg?branch=develop
:alt: build status
:scale: 100%
:target: https://travis-ci.com/GAA-UAM/scikit-fda
.. |docs| image:: https://readthedocs.org/projects/fda/badge/?version=latest
:alt: Documentation Status
:scale: 100%
:target: http://fda.readthedocs.io/en/latest/?badge=latest
.. |Codecov| image:: https://codecov.io/gh/GAA-UAM/scikit-fda/branch/develop/graph/badge.svg
.. _Codecov: https://codecov.io/github/GAA-UAM/scikit-fda?branch=develop
.. |PyPIBadge| image:: https://badge.fury.io/py/scikit-fda.svg
.. _PyPIBadge: https://badge.fury.io/py/scikit-fda
.. |license| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
.. _license: https://github.com/GAA-UAM/scikit-fda/blob/master/LICENSE.txt
.. |doi| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3468127.svg
:target: https://doi.org/10.5281/zenodo.3468127
| scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/README.rst | README.rst | .. image:: https://raw.githubusercontent.com/GAA-UAM/scikit-fda/develop/docs/logos/title_logo/title_logo.png
:alt: scikit-fda: Functional Data Analysis in Python
scikit-fda: Functional Data Analysis in Python
===================================================
|python|_ |build-status| |docs| |Codecov|_ |PyPIBadge|_ |license|_ |doi|
Functional Data Analysis, or FDA, is the field of Statistics that analyses
data that depend on a continuous parameter.
This package offers classes, methods and functions to give support to FDA
in Python. Includes a wide range of utils to work with functional data, and its
representation, exploratory analysis, or preprocessing, among other tasks
such as inference, classification, regression or clustering of functional data.
See documentation for further information on the features included in the
package.
Documentation
=============
The documentation is available at
`fda.readthedocs.io/en/stable/ <https://fda.readthedocs.io/en/stable/>`_, which
includes detailed information of the different modules, classes and methods of
the package, along with several examples showing different functionalities.
The documentation of the latest version, corresponding with the develop
version of the package, can be found at
`fda.readthedocs.io/en/latest/ <https://fda.readthedocs.io/en/latest/>`_.
Installation
============
Currently, *scikit-fda* is available in Python 3.6 and 3.7, regardless of the
platform.
The stable version can be installed via PyPI_:
.. code::
pip install scikit-fda
Installation from source
------------------------
It is possible to install the latest version of the package, available in the
develop branch, by cloning this repository and doing a manual installation.
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
pip install ./scikit-fda
Make sure that your default Python version is currently supported, or change
the python and pip commands by specifying a version, such as ``python3.6``:
.. code:: bash
git clone https://github.com/GAA-UAM/scikit-fda.git
python3.6 -m pip install ./scikit-fda
Requirements
------------
*scikit-fda* depends on the following packages:
* `cython <https://github.com/cython/cython>`_ - Python to C compiler
* `fdasrsf <https://github.com/jdtuck/fdasrsf_python>`_ - SRSF framework
* `findiff <https://github.com/maroba/findiff>`_ - Finite differences
* `matplotlib <https://github.com/matplotlib/matplotlib>`_ - Plotting with Python
* `multimethod <https://github.com/coady/multimethod>`_ - Multiple dispatch
* `numpy <https://github.com/numpy/numpy>`_ - The fundamental package for scientific computing with Python
* `pandas <https://github.com/pandas-dev/pandas>`_ - Powerful Python data analysis toolkit
* `rdata <https://github.com/vnmabus/rdata>`_ - Reader of R datasets in .rda format in Python
* `scikit-datasets <https://github.com/daviddiazvico/scikit-datasets>`_ - Scikit-learn compatible datasets
* `scikit-learn <https://github.com/scikit-learn/scikit-learn>`_ - Machine learning in Python
* `scipy <https://github.com/scipy/scipy>`_ - Scientific computation in Python
* `setuptools <https://github.com/pypa/setuptools>`_ - Python Packaging
The dependencies are automatically installed.
Contributions
=============
All contributions are welcome. You can help this project grow in multiple ways,
from creating an issue, reporting an improvement or a bug, to doing a
repository fork and creating a pull request to the development branch.
The people involved at some point in the development of the package can be
found in the `contributors
file <https://github.com/GAA-UAM/scikit-fda/blob/develop/THANKS.txt>`_.
.. Citation
========
If you find this project useful, please cite:
.. todo:: Include citation to scikit-fda paper.
License
=======
The package is licensed under the BSD 3-Clause License. A copy of the
license_ can be found along with the code.
.. _examples: https://fda.readthedocs.io/en/latest/auto_examples/index.html
.. _PyPI: https://pypi.org/project/scikit-fda/
.. |python| image:: https://img.shields.io/pypi/pyversions/scikit-fda.svg
.. _python: https://badge.fury.io/py/scikit-fda
.. |build-status| image:: https://travis-ci.org/GAA-UAM/scikit-fda.svg?branch=develop
:alt: build status
:scale: 100%
:target: https://travis-ci.com/GAA-UAM/scikit-fda
.. |docs| image:: https://readthedocs.org/projects/fda/badge/?version=latest
:alt: Documentation Status
:scale: 100%
:target: http://fda.readthedocs.io/en/latest/?badge=latest
.. |Codecov| image:: https://codecov.io/gh/GAA-UAM/scikit-fda/branch/develop/graph/badge.svg
.. _Codecov: https://codecov.io/github/GAA-UAM/scikit-fda?branch=develop
.. |PyPIBadge| image:: https://badge.fury.io/py/scikit-fda.svg
.. _PyPIBadge: https://badge.fury.io/py/scikit-fda
.. |license| image:: https://img.shields.io/badge/License-BSD%203--Clause-blue.svg
.. _license: https://github.com/GAA-UAM/scikit-fda/blob/master/LICENSE.txt
.. |doi| image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3468127.svg
:target: https://doi.org/10.5281/zenodo.3468127
| 0.904158 | 0.76947 |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload
import sklearn.base
if TYPE_CHECKING:
from ..typing._numpy import NDArrayFloat, NDArrayInt
SelfType = TypeVar("SelfType")
TransformerNoTarget = TypeVar(
"TransformerNoTarget",
bound="TransformerMixin[Any, Any, None]",
)
Input = TypeVar("Input", contravariant=True)
Output = TypeVar("Output", covariant=True)
Target = TypeVar("Target", contravariant=True)
TargetPrediction = TypeVar("TargetPrediction")
class BaseEstimator( # noqa: D101
ABC,
sklearn.base.BaseEstimator, # type: ignore[misc]
):
pass # noqa: WPS604
class TransformerMixin( # noqa: D101
ABC,
Generic[Input, Output, Target],
sklearn.base.TransformerMixin, # type: ignore[misc]
):
@overload
def fit(
self: TransformerNoTarget,
X: Input,
) -> TransformerNoTarget:
pass
@overload
def fit(
self: SelfType,
X: Input,
y: Target,
) -> SelfType:
pass
def fit( # noqa: D102
self: SelfType,
X: Input,
y: Target | None = None,
) -> SelfType:
return self
@overload
def fit_transform(
self: TransformerNoTarget,
X: Input,
) -> Output:
pass
@overload
def fit_transform(
self,
X: Input,
y: Target,
) -> Output:
pass
def fit_transform( # noqa: D102
self,
X: Input,
y: Target | None = None,
**fit_params: Any,
) -> Output:
if y is None:
return self.fit( # type: ignore[no-any-return]
X,
**fit_params,
).transform(X)
return self.fit( # type: ignore[no-any-return]
X,
y,
**fit_params,
).transform(X)
class InductiveTransformerMixin( # noqa: D101
TransformerMixin[Input, Output, Target],
):
@abstractmethod
def transform( # noqa: D102
self: SelfType,
X: Input,
) -> Output:
pass
class OutlierMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.OutlierMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return self.fit(X, y).predict(X) # type: ignore[no-any-return]
class ClassifierMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.ClassifierMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: Target,
sample_weight: NDArrayFloat | None = None,
) -> float:
return super().score( # type: ignore[no-any-return]
X,
y,
sample_weight=sample_weight,
)
class ClusterMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.ClusterMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return super().fit_predict(X, y) # type: ignore[no-any-return]
class RegressorMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.RegressorMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: TargetPrediction,
sample_weight: NDArrayFloat | None = None,
) -> float:
from ..misc.scoring import r2_score
y_pred = self.predict(X)
return r2_score(
y,
y_pred,
sample_weight=sample_weight,
) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_sklearn_adapter.py | _sklearn_adapter.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar, overload
import sklearn.base
if TYPE_CHECKING:
from ..typing._numpy import NDArrayFloat, NDArrayInt
SelfType = TypeVar("SelfType")
TransformerNoTarget = TypeVar(
"TransformerNoTarget",
bound="TransformerMixin[Any, Any, None]",
)
Input = TypeVar("Input", contravariant=True)
Output = TypeVar("Output", covariant=True)
Target = TypeVar("Target", contravariant=True)
TargetPrediction = TypeVar("TargetPrediction")
class BaseEstimator( # noqa: D101
ABC,
sklearn.base.BaseEstimator, # type: ignore[misc]
):
pass # noqa: WPS604
class TransformerMixin( # noqa: D101
ABC,
Generic[Input, Output, Target],
sklearn.base.TransformerMixin, # type: ignore[misc]
):
@overload
def fit(
self: TransformerNoTarget,
X: Input,
) -> TransformerNoTarget:
pass
@overload
def fit(
self: SelfType,
X: Input,
y: Target,
) -> SelfType:
pass
def fit( # noqa: D102
self: SelfType,
X: Input,
y: Target | None = None,
) -> SelfType:
return self
@overload
def fit_transform(
self: TransformerNoTarget,
X: Input,
) -> Output:
pass
@overload
def fit_transform(
self,
X: Input,
y: Target,
) -> Output:
pass
def fit_transform( # noqa: D102
self,
X: Input,
y: Target | None = None,
**fit_params: Any,
) -> Output:
if y is None:
return self.fit( # type: ignore[no-any-return]
X,
**fit_params,
).transform(X)
return self.fit( # type: ignore[no-any-return]
X,
y,
**fit_params,
).transform(X)
class InductiveTransformerMixin( # noqa: D101
TransformerMixin[Input, Output, Target],
):
@abstractmethod
def transform( # noqa: D102
self: SelfType,
X: Input,
) -> Output:
pass
class OutlierMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.OutlierMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return self.fit(X, y).predict(X) # type: ignore[no-any-return]
class ClassifierMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.ClassifierMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: Target,
sample_weight: NDArrayFloat | None = None,
) -> float:
return super().score( # type: ignore[no-any-return]
X,
y,
sample_weight=sample_weight,
)
class ClusterMixin( # noqa: D101
ABC,
Generic[Input],
sklearn.base.ClusterMixin, # type: ignore[misc]
):
def fit_predict( # noqa: D102
self,
X: Input,
y: object = None,
) -> NDArrayInt:
return super().fit_predict(X, y) # type: ignore[no-any-return]
class RegressorMixin( # noqa: D101
ABC,
Generic[Input, TargetPrediction],
sklearn.base.RegressorMixin, # type: ignore[misc]
):
def fit( # noqa: D102
self: SelfType,
X: Input,
y: TargetPrediction,
) -> SelfType:
return self
@abstractmethod
def predict( # noqa: D102
self: SelfType,
X: Input,
) -> TargetPrediction:
pass
def score( # noqa: D102
self,
X: Input,
y: TargetPrediction,
sample_weight: NDArrayFloat | None = None,
) -> float:
from ..misc.scoring import r2_score
y_pred = self.predict(X)
return r2_score(
y,
y_pred,
sample_weight=sample_weight,
) | 0.875814 | 0.271692 |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
import numpy as np
from scipy.interpolate import PchipInterpolator
from ..typing._base import DomainRangeLike
from ..typing._numpy import ArrayLike, NDArrayFloat
if TYPE_CHECKING:
from ..representation import FDataGrid
def invert_warping(
warping: FDataGrid,
*,
output_points: Optional[ArrayLike] = None,
) -> FDataGrid:
r"""
Compute the inverse of a diffeomorphism.
Let :math:`\gamma : [a,b] \rightarrow [a,b]` be a function strictly
increasing, calculates the corresponding inverse
:math:`\gamma^{-1} : [a,b] \rightarrow [a,b]` such that
:math:`\gamma^{-1} \circ \gamma = \gamma \circ \gamma^{-1} = \gamma_{id}`.
Uses a PCHIP interpolator to compute approximately the inverse.
Args:
warping: Functions to be inverted.
output_points: Set of points where the
functions are interpolated to obtain the inverse, by default uses
the sample points of the fdatagrid.
Returns:
Inverse of the original functions.
Raises:
ValueError: If the functions are not strictly increasing or are
multidimensional.
Examples:
>>> import numpy as np
>>> from skfda import FDataGrid
We will construct the warping :math:`\gamma : [0,1] \rightarrow [0,1]`
wich maps t to t^3.
>>> t = np.linspace(0, 1)
>>> gamma = FDataGrid(t**3, t)
>>> gamma
FDataGrid(...)
We will compute the inverse.
>>> inverse = invert_warping(gamma)
>>> inverse
FDataGrid(...)
The result of the composition should be approximately the identity
function .
>>> identity = gamma.compose(inverse)
>>> identity([0, 0.25, 0.5, 0.75, 1]).round(3)
array([[[ 0. ],
[ 0.25],
[ 0.5 ],
[ 0.75],
[ 1. ]]])
"""
from ..misc.validation import check_fdata_dimensions
check_fdata_dimensions(
warping,
dim_domain=1,
dim_codomain=1,
)
output_points = (
warping.grid_points[0]
if output_points is None
else np.asarray(output_points)
)
y = warping(output_points)[..., 0]
data_matrix = np.empty((warping.n_samples, len(output_points)))
for i in range(warping.n_samples):
data_matrix[i] = PchipInterpolator(y[i], output_points)(output_points)
return warping.copy(data_matrix=data_matrix, grid_points=output_points)
def normalize_scale(
t: NDArrayFloat,
a: float = 0,
b: float = 1,
) -> NDArrayFloat:
"""
Perfoms an afine translation to normalize an interval.
Args:
t: Array of dim 1 or 2 with at least 2 values.
a: Starting point of the new interval. Defaults 0.
b: Stopping point of the new interval. Defaults 1.
Returns:
Array with the transformed interval.
"""
t = t.T # Broadcast to normalize multiple arrays
t1 = np.array(t, copy=True)
t1 -= t[0] # Translation to [0, t[-1] - t[0]]
t1 *= (b - a) / (t[-1] - t[0]) # Scale to [0, b-a]
t1 += a # Translation to [a, b]
t1[0] = a # Fix possible round errors
t1[-1] = b
return t1.T
def normalize_warping(
warping: FDataGrid,
domain_range: Optional[DomainRangeLike] = None,
) -> FDataGrid:
r"""
Rescale a warping to normalize their :term:`domain`.
Given a set of warpings :math:`\gamma_i:[a,b]\rightarrow [a,b]` it is
used an affine traslation to change the domain of the transformation to
other domain, :math:`\tilde \gamma_i:[\tilde a,\tilde b] \rightarrow
[\tilde a, \tilde b]`.
Args:
warping: Set of warpings to rescale.
domain_range: New domain range of the warping. By
default it is used the same domain range.
Returns:
Normalized warpings.
"""
from ..misc.validation import validate_domain_range
domain_range_tuple = (
warping.domain_range[0]
if domain_range is None
else validate_domain_range(domain_range)[0]
)
data_matrix = normalize_scale(
warping.data_matrix[..., 0],
*domain_range_tuple,
)
grid_points = normalize_scale(warping.grid_points[0], *domain_range_tuple)
return warping.copy(
data_matrix=data_matrix,
grid_points=grid_points,
domain_range=domain_range,
) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_warping.py | _warping.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
import numpy as np
from scipy.interpolate import PchipInterpolator
from ..typing._base import DomainRangeLike
from ..typing._numpy import ArrayLike, NDArrayFloat
if TYPE_CHECKING:
from ..representation import FDataGrid
def invert_warping(
warping: FDataGrid,
*,
output_points: Optional[ArrayLike] = None,
) -> FDataGrid:
r"""
Compute the inverse of a diffeomorphism.
Let :math:`\gamma : [a,b] \rightarrow [a,b]` be a function strictly
increasing, calculates the corresponding inverse
:math:`\gamma^{-1} : [a,b] \rightarrow [a,b]` such that
:math:`\gamma^{-1} \circ \gamma = \gamma \circ \gamma^{-1} = \gamma_{id}`.
Uses a PCHIP interpolator to compute approximately the inverse.
Args:
warping: Functions to be inverted.
output_points: Set of points where the
functions are interpolated to obtain the inverse, by default uses
the sample points of the fdatagrid.
Returns:
Inverse of the original functions.
Raises:
ValueError: If the functions are not strictly increasing or are
multidimensional.
Examples:
>>> import numpy as np
>>> from skfda import FDataGrid
We will construct the warping :math:`\gamma : [0,1] \rightarrow [0,1]`
wich maps t to t^3.
>>> t = np.linspace(0, 1)
>>> gamma = FDataGrid(t**3, t)
>>> gamma
FDataGrid(...)
We will compute the inverse.
>>> inverse = invert_warping(gamma)
>>> inverse
FDataGrid(...)
The result of the composition should be approximately the identity
function .
>>> identity = gamma.compose(inverse)
>>> identity([0, 0.25, 0.5, 0.75, 1]).round(3)
array([[[ 0. ],
[ 0.25],
[ 0.5 ],
[ 0.75],
[ 1. ]]])
"""
from ..misc.validation import check_fdata_dimensions
check_fdata_dimensions(
warping,
dim_domain=1,
dim_codomain=1,
)
output_points = (
warping.grid_points[0]
if output_points is None
else np.asarray(output_points)
)
y = warping(output_points)[..., 0]
data_matrix = np.empty((warping.n_samples, len(output_points)))
for i in range(warping.n_samples):
data_matrix[i] = PchipInterpolator(y[i], output_points)(output_points)
return warping.copy(data_matrix=data_matrix, grid_points=output_points)
def normalize_scale(
t: NDArrayFloat,
a: float = 0,
b: float = 1,
) -> NDArrayFloat:
"""
Perfoms an afine translation to normalize an interval.
Args:
t: Array of dim 1 or 2 with at least 2 values.
a: Starting point of the new interval. Defaults 0.
b: Stopping point of the new interval. Defaults 1.
Returns:
Array with the transformed interval.
"""
t = t.T # Broadcast to normalize multiple arrays
t1 = np.array(t, copy=True)
t1 -= t[0] # Translation to [0, t[-1] - t[0]]
t1 *= (b - a) / (t[-1] - t[0]) # Scale to [0, b-a]
t1 += a # Translation to [a, b]
t1[0] = a # Fix possible round errors
t1[-1] = b
return t1.T
def normalize_warping(
warping: FDataGrid,
domain_range: Optional[DomainRangeLike] = None,
) -> FDataGrid:
r"""
Rescale a warping to normalize their :term:`domain`.
Given a set of warpings :math:`\gamma_i:[a,b]\rightarrow [a,b]` it is
used an affine traslation to change the domain of the transformation to
other domain, :math:`\tilde \gamma_i:[\tilde a,\tilde b] \rightarrow
[\tilde a, \tilde b]`.
Args:
warping: Set of warpings to rescale.
domain_range: New domain range of the warping. By
default it is used the same domain range.
Returns:
Normalized warpings.
"""
from ..misc.validation import validate_domain_range
domain_range_tuple = (
warping.domain_range[0]
if domain_range is None
else validate_domain_range(domain_range)[0]
)
data_matrix = normalize_scale(
warping.data_matrix[..., 0],
*domain_range_tuple,
)
grid_points = normalize_scale(warping.grid_points[0], *domain_range_tuple)
return warping.copy(
data_matrix=data_matrix,
grid_points=grid_points,
domain_range=domain_range,
) | 0.970099 | 0.665635 |
from __future__ import annotations
import functools
import numbers
from functools import singledispatch
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Sequence,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import numpy as np
import scipy.integrate
from pandas.api.indexers import check_array_indexer
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import check_classification_targets
from typing_extensions import Literal, ParamSpec, Protocol
from ..typing._base import GridPoints, GridPointsLike
from ..typing._numpy import NDArrayAny, NDArrayFloat, NDArrayInt, NDArrayStr
from ._sklearn_adapter import BaseEstimator
ArrayDTypeT = TypeVar("ArrayDTypeT", bound="np.generic")
if TYPE_CHECKING:
from ..representation import FData, FDataGrid
from ..representation.basis import Basis
from ..representation.extrapolation import ExtrapolationLike
T = TypeVar("T", bound=FData)
Input = TypeVar("Input", bound=Union[FData, NDArrayFloat])
Output = TypeVar("Output", bound=Union[FData, NDArrayFloat])
Target = TypeVar("Target", bound=NDArrayInt)
_MapAcceptableSelf = TypeVar(
"_MapAcceptableSelf",
bound="_MapAcceptable",
)
class _MapAcceptable(Protocol, Sized):
def __getitem__(
self: _MapAcceptableSelf,
__key: Union[slice, NDArrayInt], # noqa: WPS112
) -> _MapAcceptableSelf:
pass
@property
def nbytes(self) -> int:
pass
_MapAcceptableT = TypeVar(
"_MapAcceptableT",
bound=_MapAcceptable,
contravariant=True,
)
MapFunctionT = TypeVar("MapFunctionT", covariant=True)
P = ParamSpec("P")
class _MapFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for functions that can be mapped over several arrays."""
def __call__(
self,
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> MapFunctionT:
pass
class _PairwiseFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for pairwise array functions."""
def __call__(
self,
__arg1: _MapAcceptableT, # noqa: WPS112
__arg2: _MapAcceptableT, # noqa: WPS112
**kwargs: P.kwargs, # type: ignore[name-defined]
) -> MapFunctionT:
pass
def _to_grid(
X: FData,
y: FData,
eval_points: Optional[NDArrayFloat] = None,
) -> Tuple[FDataGrid, FDataGrid]:
"""Transform a pair of FDatas in grids to perform calculations."""
from .. import FDataGrid
x_is_grid = isinstance(X, FDataGrid)
y_is_grid = isinstance(y, FDataGrid)
if eval_points is not None:
X = X.to_grid(eval_points)
y = y.to_grid(eval_points)
elif x_is_grid and not y_is_grid:
y = y.to_grid(X.grid_points[0])
elif not x_is_grid and y_is_grid:
X = X.to_grid(y.grid_points[0])
elif not x_is_grid and not y_is_grid:
X = X.to_grid()
y = y.to_grid()
return X, y
def _to_grid_points(grid_points_like: GridPointsLike) -> GridPoints:
"""Convert to grid points.
If the original list is one-dimensional (e.g. [1, 2, 3]), return list to
array (in this case [array([1, 2, 3])]).
If the original list is two-dimensional (e.g. [[1, 2, 3], [4, 5]]), return
a list containing other one-dimensional arrays (in this case
[array([1, 2, 3]), array([4, 5])]).
In any other case the behaviour is unespecified.
"""
unidimensional = False
if not isinstance(grid_points_like, Iterable):
grid_points_like = [grid_points_like]
if not isinstance(grid_points_like[0], Iterable):
unidimensional = True
if unidimensional:
return (_int_to_real(np.asarray(grid_points_like)),)
return tuple(_int_to_real(np.asarray(i)) for i in grid_points_like)
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[False] = False,
) -> np.typing.NDArray[ArrayDTypeT]:
pass
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[True],
) -> Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]:
pass
def _cartesian_product( # noqa: WPS234
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: bool = False,
) -> (
np.typing.NDArray[ArrayDTypeT]
| Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]
):
"""
Compute the cartesian product of the axes.
Computes the cartesian product of the axes and returns a numpy array of
1 dimension with all the possible combinations, for an arbitrary number of
dimensions.
Args:
axes: List with axes.
flatten: Whether to return the flatten array or keep one dimension per
axis.
return_shape: If ``True`` return the shape of the array before
flattening.
Returns:
Numpy 2-D array with all the possible combinations.
The entry (i,j) represent the j-th coordinate of the i-th point.
If ``return_shape`` is ``True`` returns also the shape of the array
before flattening.
Examples:
>>> from skfda._utils import _cartesian_product
>>> axes = [[0,1],[2,3]]
>>> _cartesian_product(axes)
array([[0, 2],
[0, 3],
[1, 2],
[1, 3]])
>>> axes = [[0,1],[2,3],[4]]
>>> _cartesian_product(axes)
array([[0, 2, 4],
[0, 3, 4],
[1, 2, 4],
[1, 3, 4]])
>>> axes = [[0,1]]
>>> _cartesian_product(axes)
array([[0],
[1]])
"""
cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)
shape = cartesian.shape
if flatten:
cartesian = cartesian.reshape(-1, len(axes))
if return_shape:
return cartesian, shape
return cartesian # type: ignore[no-any-return]
def _same_domain(fd: Union[Basis, FData], fd2: Union[Basis, FData]) -> bool:
"""Check if the domain range of two objects is the same."""
return np.array_equal(fd.domain_range, fd2.domain_range)
def _one_grid_to_points(
axes: GridPointsLike,
*,
dim_domain: int,
) -> Tuple[NDArrayFloat, Tuple[int, ...]]:
"""
Convert a list of ndarrays, one per domain dimension, in the points.
Returns also the shape containing the information of how each point
is formed.
"""
axes = _to_grid_points(axes)
if len(axes) != dim_domain:
raise ValueError(
f"Length of axes should be {dim_domain}",
)
cartesian, shape = _cartesian_product(axes, return_shape=True)
# Drop domain size dimension, as it is not needed to reshape the output
shape = shape[:-1]
return cartesian, shape
class EvaluateMethod(Protocol):
"""Evaluation method."""
def __call__(
self,
__eval_points: NDArrayFloat, # noqa: WPS112
extrapolation: Optional[ExtrapolationLike],
aligned: bool,
) -> NDArrayFloat:
"""Evaluate a function."""
pass
@overload
def _evaluate_grid(
axes: GridPointsLike,
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[True] = True,
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Iterable[GridPointsLike],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[False],
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool,
) -> NDArrayFloat:
pass
def _evaluate_grid( # noqa: WPS234
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool = True,
) -> NDArrayFloat:
"""
Evaluate the functional object in the cartesian grid.
This method is called internally by :meth:`evaluate` when the argument
`grid` is True.
Evaluates the functional object in the grid generated by the cartesian
product of the axes. The length of the list of axes should be equal
than the domain dimension of the object.
If the list of axes has lengths :math:`n_1, n_2, ..., n_m`, where
:math:`m` is equal than the dimension of the domain, the result of the
evaluation in the grid will be a matrix with :math:`m+1` dimensions and
shape :math:`n_{samples} x n_1 x n_2 x ... x n_m`.
If `aligned` is false each sample is evaluated in a
different grid, and the list of axes should contain a list of axes for
each sample.
If the domain dimension is 1, the result of the behaviour of the
evaluation will be the same than :meth:`evaluate` without the grid
option, but with worst performance.
Args:
axes: List of axes to generated the grid where the
object will be evaluated.
evaluate_method: Function used to evaluate the functional object.
n_samples: Number of samples.
dim_domain: Domain dimension.
dim_codomain: Codomain dimension.
extrapolation: Controls the
extrapolation mode for elements outside the domain range. By
default it is used the mode defined during the instance of the
object.
aligned: If False evaluates each sample
in a different grid.
evaluate_method: method to use to evaluate the points
n_samples: number of samples
dim_domain: dimension of the domain
dim_codomain: dimensions of the codomain
Returns:
Numpy array with dim_domain + 1 dimensions with
the result of the evaluation.
Raises:
ValueError: If there are a different number of axes than the domain
dimension.
"""
# Compute intersection points and resulting shapes
if aligned:
axes = cast(GridPointsLike, axes)
eval_points, shape = _one_grid_to_points(axes, dim_domain=dim_domain)
else:
axes_per_sample = cast(Iterable[GridPointsLike], axes)
axes_per_sample = list(axes_per_sample)
eval_points_tuple, shape_tuple = zip(
*[
_one_grid_to_points(a, dim_domain=dim_domain)
for a in axes_per_sample
],
)
if len(eval_points_tuple) != n_samples:
raise ValueError(
"Should be provided a list of axis per sample",
)
eval_points = np.asarray(eval_points_tuple)
# Evaluate the points
evaluated = evaluate_method(
eval_points,
extrapolation=extrapolation,
aligned=aligned,
)
# Reshape the result
if aligned:
res = evaluated.reshape(
[n_samples] + list(shape) + [dim_codomain],
)
else:
res = np.asarray([
r.reshape(list(s) + [dim_codomain])
for r, s in zip(evaluated, shape_tuple)
])
return res
def nquad_vec(
func: Callable[[NDArrayFloat], NDArrayFloat],
ranges: Sequence[Tuple[float, float]],
) -> NDArrayFloat:
"""Perform multiple integration of vector valued functions."""
initial_depth = len(ranges) - 1
def integrate(*args: Any, depth: int) -> NDArrayFloat: # noqa: WPS430
if depth == 0:
f = functools.partial(func, *args)
else:
f = functools.partial(integrate, *args, depth=depth - 1)
return scipy.integrate.quad_vec( # type: ignore[no-any-return]
f,
*ranges[initial_depth - depth],
)[0]
return integrate(depth=initial_depth)
def _map_in_batches(
function: _MapFunction[_MapAcceptableT, P, np.typing.NDArray[ArrayDTypeT]],
arguments: Tuple[_MapAcceptableT, ...],
indexes: Tuple[NDArrayInt, ...],
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""
Map a function over samples of FData or ndarray tuples efficiently.
This function prevents a large set of indexes to use all available
memory and hang the PC.
"""
if memory_per_batch is None:
# 256MB is not too big
memory_per_batch = 256 * 1024 * 1024 # noqa: WPS432
memory_per_element = sum(a.nbytes // len(a) for a in arguments)
n_elements_per_batch_allowed = memory_per_batch // memory_per_element
if n_elements_per_batch_allowed < 1:
raise ValueError("Too few memory allowed for the operation")
n_indexes = len(indexes[0])
assert all(n_indexes == len(i) for i in indexes)
batches: List[np.typing.NDArray[ArrayDTypeT]] = []
for pos in range(0, n_indexes, n_elements_per_batch_allowed):
batch_args = tuple(
a[i[pos:pos + n_elements_per_batch_allowed]]
for a, i in zip(arguments, indexes)
)
batches.append(function(*batch_args, **kwargs))
return np.concatenate(batches, axis=0)
def _pairwise_symmetric(
function: _PairwiseFunction[
_MapAcceptableT,
P,
np.typing.NDArray[ArrayDTypeT],
],
arg1: _MapAcceptableT,
arg2: Optional[_MapAcceptableT] = None,
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Compute pairwise a commutative function."""
def map_function(
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Just to keep Mypy happy."""
return function(args[0], args[1], **kwargs)
dim1 = len(arg1)
if arg2 is None or arg2 is arg1:
triu_indices = np.triu_indices(dim1)
triang_vec = _map_in_batches(
map_function,
(arg1, arg1),
triu_indices,
memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
matrix = np.empty((dim1, dim1), dtype=triang_vec.dtype)
# Set upper matrix
matrix[triu_indices] = triang_vec
# Set lower matrix
matrix[(triu_indices[1], triu_indices[0])] = triang_vec
return matrix
dim2 = len(arg2)
indices = np.indices((dim1, dim2))
vec = _map_in_batches(
map_function,
(arg1, arg2),
(indices[0].ravel(), indices[1].ravel()),
memory_per_batch=memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
return np.reshape(vec, (dim1, dim2))
def _int_to_real(array: Union[NDArrayInt, NDArrayFloat]) -> NDArrayFloat:
"""Convert integer arrays to floating point."""
return array + 0.0
def _check_array_key(array: NDArrayAny, key: Any) -> Any:
"""Check a getitem key."""
key = check_array_indexer(array, key)
if isinstance(key, tuple):
non_ellipsis = [i for i in key if i is not Ellipsis]
if len(non_ellipsis) > 1:
raise KeyError(key)
key = non_ellipsis[0]
if isinstance(key, numbers.Integral): # To accept also numpy ints
key = int(key)
if key < 0:
key = len(array) + key
if not 0 <= key < len(array):
raise IndexError("index out of bounds")
return slice(key, key + 1)
return key
def _check_estimator(estimator: Type[BaseEstimator]) -> None:
from sklearn.utils.estimator_checks import (
check_get_params_invariance,
check_set_params,
)
name = estimator.__name__
instance = estimator()
check_get_params_invariance(name, instance)
check_set_params(name, instance)
def _classifier_get_classes(
y: NDArrayStr | NDArrayInt,
) -> Tuple[NDArrayStr | NDArrayInt, NDArrayInt]:
check_classification_targets(y)
le = LabelEncoder()
y_ind = le.fit_transform(y)
classes = le.classes_
if classes.size < 2:
raise ValueError(
f'The number of classes has to be greater than'
f'one; got {classes.size} class',
)
return classes, y_ind | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/_utils/_utils.py | _utils.py |
from __future__ import annotations
import functools
import numbers
from functools import singledispatch
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Sequence,
Sized,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import numpy as np
import scipy.integrate
from pandas.api.indexers import check_array_indexer
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import check_classification_targets
from typing_extensions import Literal, ParamSpec, Protocol
from ..typing._base import GridPoints, GridPointsLike
from ..typing._numpy import NDArrayAny, NDArrayFloat, NDArrayInt, NDArrayStr
from ._sklearn_adapter import BaseEstimator
ArrayDTypeT = TypeVar("ArrayDTypeT", bound="np.generic")
if TYPE_CHECKING:
from ..representation import FData, FDataGrid
from ..representation.basis import Basis
from ..representation.extrapolation import ExtrapolationLike
T = TypeVar("T", bound=FData)
Input = TypeVar("Input", bound=Union[FData, NDArrayFloat])
Output = TypeVar("Output", bound=Union[FData, NDArrayFloat])
Target = TypeVar("Target", bound=NDArrayInt)
_MapAcceptableSelf = TypeVar(
"_MapAcceptableSelf",
bound="_MapAcceptable",
)
class _MapAcceptable(Protocol, Sized):
def __getitem__(
self: _MapAcceptableSelf,
__key: Union[slice, NDArrayInt], # noqa: WPS112
) -> _MapAcceptableSelf:
pass
@property
def nbytes(self) -> int:
pass
_MapAcceptableT = TypeVar(
"_MapAcceptableT",
bound=_MapAcceptable,
contravariant=True,
)
MapFunctionT = TypeVar("MapFunctionT", covariant=True)
P = ParamSpec("P")
class _MapFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for functions that can be mapped over several arrays."""
def __call__(
self,
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> MapFunctionT:
pass
class _PairwiseFunction(Protocol[_MapAcceptableT, P, MapFunctionT]):
"""Protocol for pairwise array functions."""
def __call__(
self,
__arg1: _MapAcceptableT, # noqa: WPS112
__arg2: _MapAcceptableT, # noqa: WPS112
**kwargs: P.kwargs, # type: ignore[name-defined]
) -> MapFunctionT:
pass
def _to_grid(
X: FData,
y: FData,
eval_points: Optional[NDArrayFloat] = None,
) -> Tuple[FDataGrid, FDataGrid]:
"""Transform a pair of FDatas in grids to perform calculations."""
from .. import FDataGrid
x_is_grid = isinstance(X, FDataGrid)
y_is_grid = isinstance(y, FDataGrid)
if eval_points is not None:
X = X.to_grid(eval_points)
y = y.to_grid(eval_points)
elif x_is_grid and not y_is_grid:
y = y.to_grid(X.grid_points[0])
elif not x_is_grid and y_is_grid:
X = X.to_grid(y.grid_points[0])
elif not x_is_grid and not y_is_grid:
X = X.to_grid()
y = y.to_grid()
return X, y
def _to_grid_points(grid_points_like: GridPointsLike) -> GridPoints:
"""Convert to grid points.
If the original list is one-dimensional (e.g. [1, 2, 3]), return list to
array (in this case [array([1, 2, 3])]).
If the original list is two-dimensional (e.g. [[1, 2, 3], [4, 5]]), return
a list containing other one-dimensional arrays (in this case
[array([1, 2, 3]), array([4, 5])]).
In any other case the behaviour is unespecified.
"""
unidimensional = False
if not isinstance(grid_points_like, Iterable):
grid_points_like = [grid_points_like]
if not isinstance(grid_points_like[0], Iterable):
unidimensional = True
if unidimensional:
return (_int_to_real(np.asarray(grid_points_like)),)
return tuple(_int_to_real(np.asarray(i)) for i in grid_points_like)
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[False] = False,
) -> np.typing.NDArray[ArrayDTypeT]:
pass
@overload
def _cartesian_product(
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: Literal[True],
) -> Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]:
pass
def _cartesian_product( # noqa: WPS234
axes: Sequence[np.typing.NDArray[ArrayDTypeT]],
*,
flatten: bool = True,
return_shape: bool = False,
) -> (
np.typing.NDArray[ArrayDTypeT]
| Tuple[np.typing.NDArray[ArrayDTypeT], Tuple[int, ...]]
):
"""
Compute the cartesian product of the axes.
Computes the cartesian product of the axes and returns a numpy array of
1 dimension with all the possible combinations, for an arbitrary number of
dimensions.
Args:
axes: List with axes.
flatten: Whether to return the flatten array or keep one dimension per
axis.
return_shape: If ``True`` return the shape of the array before
flattening.
Returns:
Numpy 2-D array with all the possible combinations.
The entry (i,j) represent the j-th coordinate of the i-th point.
If ``return_shape`` is ``True`` returns also the shape of the array
before flattening.
Examples:
>>> from skfda._utils import _cartesian_product
>>> axes = [[0,1],[2,3]]
>>> _cartesian_product(axes)
array([[0, 2],
[0, 3],
[1, 2],
[1, 3]])
>>> axes = [[0,1],[2,3],[4]]
>>> _cartesian_product(axes)
array([[0, 2, 4],
[0, 3, 4],
[1, 2, 4],
[1, 3, 4]])
>>> axes = [[0,1]]
>>> _cartesian_product(axes)
array([[0],
[1]])
"""
cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)
shape = cartesian.shape
if flatten:
cartesian = cartesian.reshape(-1, len(axes))
if return_shape:
return cartesian, shape
return cartesian # type: ignore[no-any-return]
def _same_domain(fd: Union[Basis, FData], fd2: Union[Basis, FData]) -> bool:
"""Check if the domain range of two objects is the same."""
return np.array_equal(fd.domain_range, fd2.domain_range)
def _one_grid_to_points(
axes: GridPointsLike,
*,
dim_domain: int,
) -> Tuple[NDArrayFloat, Tuple[int, ...]]:
"""
Convert a list of ndarrays, one per domain dimension, in the points.
Returns also the shape containing the information of how each point
is formed.
"""
axes = _to_grid_points(axes)
if len(axes) != dim_domain:
raise ValueError(
f"Length of axes should be {dim_domain}",
)
cartesian, shape = _cartesian_product(axes, return_shape=True)
# Drop domain size dimension, as it is not needed to reshape the output
shape = shape[:-1]
return cartesian, shape
class EvaluateMethod(Protocol):
"""Evaluation method."""
def __call__(
self,
__eval_points: NDArrayFloat, # noqa: WPS112
extrapolation: Optional[ExtrapolationLike],
aligned: bool,
) -> NDArrayFloat:
"""Evaluate a function."""
pass
@overload
def _evaluate_grid(
axes: GridPointsLike,
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[True] = True,
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Iterable[GridPointsLike],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: Literal[False],
) -> NDArrayFloat:
pass
@overload
def _evaluate_grid(
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool,
) -> NDArrayFloat:
pass
def _evaluate_grid( # noqa: WPS234
axes: Union[GridPointsLike, Iterable[GridPointsLike]],
*,
evaluate_method: EvaluateMethod,
n_samples: int,
dim_domain: int,
dim_codomain: int,
extrapolation: Optional[ExtrapolationLike] = None,
aligned: bool = True,
) -> NDArrayFloat:
"""
Evaluate the functional object in the cartesian grid.
This method is called internally by :meth:`evaluate` when the argument
`grid` is True.
Evaluates the functional object in the grid generated by the cartesian
product of the axes. The length of the list of axes should be equal
than the domain dimension of the object.
If the list of axes has lengths :math:`n_1, n_2, ..., n_m`, where
:math:`m` is equal than the dimension of the domain, the result of the
evaluation in the grid will be a matrix with :math:`m+1` dimensions and
shape :math:`n_{samples} x n_1 x n_2 x ... x n_m`.
If `aligned` is false each sample is evaluated in a
different grid, and the list of axes should contain a list of axes for
each sample.
If the domain dimension is 1, the result of the behaviour of the
evaluation will be the same than :meth:`evaluate` without the grid
option, but with worst performance.
Args:
axes: List of axes to generated the grid where the
object will be evaluated.
evaluate_method: Function used to evaluate the functional object.
n_samples: Number of samples.
dim_domain: Domain dimension.
dim_codomain: Codomain dimension.
extrapolation: Controls the
extrapolation mode for elements outside the domain range. By
default it is used the mode defined during the instance of the
object.
aligned: If False evaluates each sample
in a different grid.
evaluate_method: method to use to evaluate the points
n_samples: number of samples
dim_domain: dimension of the domain
dim_codomain: dimensions of the codomain
Returns:
Numpy array with dim_domain + 1 dimensions with
the result of the evaluation.
Raises:
ValueError: If there are a different number of axes than the domain
dimension.
"""
# Compute intersection points and resulting shapes
if aligned:
axes = cast(GridPointsLike, axes)
eval_points, shape = _one_grid_to_points(axes, dim_domain=dim_domain)
else:
axes_per_sample = cast(Iterable[GridPointsLike], axes)
axes_per_sample = list(axes_per_sample)
eval_points_tuple, shape_tuple = zip(
*[
_one_grid_to_points(a, dim_domain=dim_domain)
for a in axes_per_sample
],
)
if len(eval_points_tuple) != n_samples:
raise ValueError(
"Should be provided a list of axis per sample",
)
eval_points = np.asarray(eval_points_tuple)
# Evaluate the points
evaluated = evaluate_method(
eval_points,
extrapolation=extrapolation,
aligned=aligned,
)
# Reshape the result
if aligned:
res = evaluated.reshape(
[n_samples] + list(shape) + [dim_codomain],
)
else:
res = np.asarray([
r.reshape(list(s) + [dim_codomain])
for r, s in zip(evaluated, shape_tuple)
])
return res
def nquad_vec(
func: Callable[[NDArrayFloat], NDArrayFloat],
ranges: Sequence[Tuple[float, float]],
) -> NDArrayFloat:
"""Perform multiple integration of vector valued functions."""
initial_depth = len(ranges) - 1
def integrate(*args: Any, depth: int) -> NDArrayFloat: # noqa: WPS430
if depth == 0:
f = functools.partial(func, *args)
else:
f = functools.partial(integrate, *args, depth=depth - 1)
return scipy.integrate.quad_vec( # type: ignore[no-any-return]
f,
*ranges[initial_depth - depth],
)[0]
return integrate(depth=initial_depth)
def _map_in_batches(
function: _MapFunction[_MapAcceptableT, P, np.typing.NDArray[ArrayDTypeT]],
arguments: Tuple[_MapAcceptableT, ...],
indexes: Tuple[NDArrayInt, ...],
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""
Map a function over samples of FData or ndarray tuples efficiently.
This function prevents a large set of indexes to use all available
memory and hang the PC.
"""
if memory_per_batch is None:
# 256MB is not too big
memory_per_batch = 256 * 1024 * 1024 # noqa: WPS432
memory_per_element = sum(a.nbytes // len(a) for a in arguments)
n_elements_per_batch_allowed = memory_per_batch // memory_per_element
if n_elements_per_batch_allowed < 1:
raise ValueError("Too few memory allowed for the operation")
n_indexes = len(indexes[0])
assert all(n_indexes == len(i) for i in indexes)
batches: List[np.typing.NDArray[ArrayDTypeT]] = []
for pos in range(0, n_indexes, n_elements_per_batch_allowed):
batch_args = tuple(
a[i[pos:pos + n_elements_per_batch_allowed]]
for a, i in zip(arguments, indexes)
)
batches.append(function(*batch_args, **kwargs))
return np.concatenate(batches, axis=0)
def _pairwise_symmetric(
function: _PairwiseFunction[
_MapAcceptableT,
P,
np.typing.NDArray[ArrayDTypeT],
],
arg1: _MapAcceptableT,
arg2: Optional[_MapAcceptableT] = None,
memory_per_batch: Optional[int] = None,
*args: P.args, # Should be empty
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Compute pairwise a commutative function."""
def map_function(
*args: _MapAcceptableT,
**kwargs: P.kwargs,
) -> np.typing.NDArray[ArrayDTypeT]:
"""Just to keep Mypy happy."""
return function(args[0], args[1], **kwargs)
dim1 = len(arg1)
if arg2 is None or arg2 is arg1:
triu_indices = np.triu_indices(dim1)
triang_vec = _map_in_batches(
map_function,
(arg1, arg1),
triu_indices,
memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
matrix = np.empty((dim1, dim1), dtype=triang_vec.dtype)
# Set upper matrix
matrix[triu_indices] = triang_vec
# Set lower matrix
matrix[(triu_indices[1], triu_indices[0])] = triang_vec
return matrix
dim2 = len(arg2)
indices = np.indices((dim1, dim2))
vec = _map_in_batches(
map_function,
(arg1, arg2),
(indices[0].ravel(), indices[1].ravel()),
memory_per_batch=memory_per_batch,
**kwargs, # type: ignore[arg-type]
)
return np.reshape(vec, (dim1, dim2))
def _int_to_real(array: Union[NDArrayInt, NDArrayFloat]) -> NDArrayFloat:
"""Convert integer arrays to floating point."""
return array + 0.0
def _check_array_key(array: NDArrayAny, key: Any) -> Any:
"""Check a getitem key."""
key = check_array_indexer(array, key)
if isinstance(key, tuple):
non_ellipsis = [i for i in key if i is not Ellipsis]
if len(non_ellipsis) > 1:
raise KeyError(key)
key = non_ellipsis[0]
if isinstance(key, numbers.Integral): # To accept also numpy ints
key = int(key)
if key < 0:
key = len(array) + key
if not 0 <= key < len(array):
raise IndexError("index out of bounds")
return slice(key, key + 1)
return key
def _check_estimator(estimator: Type[BaseEstimator]) -> None:
from sklearn.utils.estimator_checks import (
check_get_params_invariance,
check_set_params,
)
name = estimator.__name__
instance = estimator()
check_get_params_invariance(name, instance)
check_set_params(name, instance)
def _classifier_get_classes(
y: NDArrayStr | NDArrayInt,
) -> Tuple[NDArrayStr | NDArrayInt, NDArrayInt]:
check_classification_targets(y)
le = LabelEncoder()
y_ind = le.fit_transform(y)
classes = le.classes_
if classes.size < 2:
raise ValueError(
f'The number of classes has to be greater than'
f'one; got {classes.size} class',
)
return classes, y_ind | 0.922067 | 0.426919 |
from __future__ import annotations
import abc
import math
from typing import TypeVar
import numpy as np
import scipy.stats
import sklearn
from scipy.special import comb
from typing_extensions import Literal
from ..._utils._sklearn_adapter import BaseEstimator, InductiveTransformerMixin
from ...typing._numpy import NDArrayFloat, NDArrayInt
T = TypeVar("T", contravariant=True)
SelfType = TypeVar("SelfType")
_Side = Literal["left", "right"]
Input = TypeVar("Input", contravariant=True)
class _DepthOrOutlyingness(
BaseEstimator,
InductiveTransformerMixin[Input, NDArrayFloat, object],
):
"""Abstract class representing a depth or outlyingness function."""
def fit(self: SelfType, X: Input, y: object = None) -> SelfType:
"""
Learn the distribution from the observations.
Args:
X: Functional dataset from which the distribution of the data is
inferred.
y: Unused. Kept only for convention.
Returns:
Fitted estimator.
"""
return self
@abc.abstractmethod
def transform(self, X: Input) -> NDArrayFloat:
"""
Compute the depth or outlyingness inside the learned distribution.
Args:
X: Points whose depth is going to be evaluated.
Returns:
Depth of each observation.
"""
pass
def fit_transform(self, X: Input, y: object = None) -> NDArrayFloat:
"""
Compute the depth or outlyingness of each observation.
This computation is done with respect to the whole dataset.
Args:
X: Dataset.
y: Unused. Kept only for convention.
Returns:
Depth of each observation.
"""
return self.fit(X).transform(X)
def __call__(
self,
X: Input,
*,
distribution: Input | None = None,
) -> NDArrayFloat:
"""
Allow the depth or outlyingness to be used as a function.
Args:
X: Points whose depth is going to be evaluated.
distribution: Functional dataset from which the distribution of
the data is inferred. If ``None`` it is the same as ``X``.
Returns:
Depth of each observation.
"""
copy: _DepthOrOutlyingness[Input] = sklearn.base.clone(self)
if distribution is None:
return copy.fit_transform(X)
return copy.fit(distribution).transform(X)
@property
def max(self) -> float:
"""
Maximum (or supremum if there is no maximum) of the possibly predicted
values.
"""
return 1
@property
def min(self) -> float:
"""
Minimum (or infimum if there is no maximum) of the possibly predicted
values.
"""
return 0
class Depth(_DepthOrOutlyingness[T]):
"""Abstract class representing a depth function."""
class Outlyingness(_DepthOrOutlyingness[T]):
"""Abstract class representing an outlyingness function."""
def _searchsorted_one_dim(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return np.searchsorted(array, values, side=side)
_searchsorted_vectorized = np.vectorize(
_searchsorted_one_dim,
signature='(n),(m),()->(m)',
excluded='side',
)
def _searchsorted_ordered(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return _searchsorted_vectorized( # type: ignore[no-any-return]
array,
values,
side=side,
)
def _cumulative_distribution(column: NDArrayFloat) -> NDArrayFloat:
"""
Calculate the cumulative distribution function at each point.
Args:
column: Array containing the values over which the
distribution function is calculated.
Returns:
Array containing the evaluation at each point of the
distribution function.
Examples:
>>> _cumulative_distribution(np.array([1, 4, 5, 1, 2, 2, 4, 1, 1, 3]))
array([ 0.4, 0.9, 1. , 0.4, 0.6, 0.6, 0.9, 0.4, 0.4, 0.7])
"""
return _searchsorted_ordered(
np.sort(column),
column,
side='right',
) / len(column)
class _UnivariateFraimanMuniz(Depth[NDArrayFloat]):
r"""
Univariate depth used to compute the Fraiman an Muniz depth.
Each column is considered as the samples of an aleatory variable.
The univariate depth of each of the samples of each column is calculated
as follows:
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Where :math:`F` stands for the marginal univariate distribution function of
each column.
"""
def fit(self: SelfType, X: NDArrayFloat, y: object = None) -> SelfType:
self._sorted_values = np.sort(X, axis=0)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat:
cum_dist = _searchsorted_ordered(
np.moveaxis(self._sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
).astype(X.dtype) / len(self._sorted_values)
assert cum_dist.shape[-2] == 1
ret = 0.5 - np.moveaxis(cum_dist, -1, 0)[..., 0]
ret = - np.abs(ret)
ret += 1
return ret
@property
def min(self) -> float:
return 1 / 2
class SimplicialDepth(Depth[NDArrayFloat]):
r"""
Simplicial depth.
The simplicial depth of a point :math:`x` in :math:`\mathbb{R}^p` given a
distribution :math:`F` is the probability that a random simplex with its
:math:`p + 1` points sampled from :math:`F` contains :math:`x`.
References:
Liu, R. Y. (1990). On a Notion of Data Depth Based on Random
Simplices. The Annals of Statistics, 18(1), 405–414.
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> SimplicialDepth:
self._dim = X.shape[-1]
if self._dim == 1:
self.sorted_values = np.sort(X, axis=0)
else:
raise NotImplementedError(
"SimplicialDepth is currently only "
"implemented for one-dimensional data.",
)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
assert self._dim == X.shape[-1]
if self._dim == 1:
positions_left = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
)
positions_left = np.moveaxis(positions_left, -1, 0)[..., 0]
positions_right = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
)
positions_right = np.moveaxis(positions_right, -1, 0)[..., 0]
num_strictly_below = positions_left
num_strictly_above = len(self.sorted_values) - positions_right
total_pairs = comb(len(self.sorted_values), 2)
return ( # type: ignore[no-any-return]
total_pairs - comb(num_strictly_below, 2)
- comb(num_strictly_above, 2)
) / total_pairs
class OutlyingnessBasedDepth(Depth[T]):
r"""
Computes depth based on an outlyingness measure.
An outlyingness function :math:`O(x)` can be converted to a depth
function as
.. math::
D(x) = \frac{1}{1 + O(x)}
if :math:`O(x)` is unbounded or as
.. math::
D(x) = 1 - \frac{O(x)}{\sup O(x)}
if :math:`O(x)` is bounded. If the infimum value of the
outlyiness function is not zero, it is subtracted beforehand.
Args:
outlyingness (Outlyingness): Outlyingness object.
References:
Serfling, R. (2006). Depth functions in nonparametric
multivariate inference. DIMACS Series in Discrete Mathematics and
Theoretical Computer Science, 72, 1.
"""
def __init__(self, outlyingness: Outlyingness[T]):
self.outlyingness = outlyingness
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> OutlyingnessBasedDepth[T]:
self.outlyingness.fit(X)
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
outlyingness_values = self.outlyingness.transform(X)
min_val = self.outlyingness.min
max_val = self.outlyingness.max
if math.isinf(max_val):
return 1 / (1 + outlyingness_values - min_val)
return 1 - (outlyingness_values - min_val) / (max_val - min_val)
class StahelDonohoOutlyingness(Outlyingness[NDArrayFloat]):
r"""
Computes Stahel-Donoho outlyingness.
Stahel-Donoho outlyingness is defined as
.. math::
\sup_{\|u\|=1} \frac{|u^T x - \text{Med}(u^T X))|}{\text{MAD}(u^TX)}
where :math:`\text{X}` is a sample with distribution :math:`F`,
:math:`\text{Med}` is the median and :math:`\text{MAD}` is the
median absolute deviation.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> StahelDonohoOutlyingness:
dim = X.shape[-1]
if dim == 1:
self._location = np.median(X, axis=0)
self._scale = scipy.stats.median_abs_deviation(X, axis=0)
else:
raise NotImplementedError("Only implemented for one dimension")
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
dim = X.shape[-1]
if dim == 1:
# Special case, can be computed exactly
diff: NDArrayFloat = np.abs(X - self._location) / self._scale
return diff[..., 0]
raise NotImplementedError("Only implemented for one dimension")
@property
def max(self) -> float:
return math.inf
class ProjectionDepth(OutlyingnessBasedDepth[NDArrayFloat]):
"""
Computes Projection depth.
It is defined as the depth induced by the
:class:`Stahel-Donoho outlyingness <StahelDonohoOutlyingness>`.
See also:
:class:`StahelDonohoOutlyingness`: Stahel-Donoho outlyingness.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def __init__(self) -> None:
super().__init__(outlyingness=StahelDonohoOutlyingness()) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/depth/multivariate.py | multivariate.py |
from __future__ import annotations
import abc
import math
from typing import TypeVar
import numpy as np
import scipy.stats
import sklearn
from scipy.special import comb
from typing_extensions import Literal
from ..._utils._sklearn_adapter import BaseEstimator, InductiveTransformerMixin
from ...typing._numpy import NDArrayFloat, NDArrayInt
T = TypeVar("T", contravariant=True)
SelfType = TypeVar("SelfType")
_Side = Literal["left", "right"]
Input = TypeVar("Input", contravariant=True)
class _DepthOrOutlyingness(
BaseEstimator,
InductiveTransformerMixin[Input, NDArrayFloat, object],
):
"""Abstract class representing a depth or outlyingness function."""
def fit(self: SelfType, X: Input, y: object = None) -> SelfType:
"""
Learn the distribution from the observations.
Args:
X: Functional dataset from which the distribution of the data is
inferred.
y: Unused. Kept only for convention.
Returns:
Fitted estimator.
"""
return self
@abc.abstractmethod
def transform(self, X: Input) -> NDArrayFloat:
"""
Compute the depth or outlyingness inside the learned distribution.
Args:
X: Points whose depth is going to be evaluated.
Returns:
Depth of each observation.
"""
pass
def fit_transform(self, X: Input, y: object = None) -> NDArrayFloat:
"""
Compute the depth or outlyingness of each observation.
This computation is done with respect to the whole dataset.
Args:
X: Dataset.
y: Unused. Kept only for convention.
Returns:
Depth of each observation.
"""
return self.fit(X).transform(X)
def __call__(
self,
X: Input,
*,
distribution: Input | None = None,
) -> NDArrayFloat:
"""
Allow the depth or outlyingness to be used as a function.
Args:
X: Points whose depth is going to be evaluated.
distribution: Functional dataset from which the distribution of
the data is inferred. If ``None`` it is the same as ``X``.
Returns:
Depth of each observation.
"""
copy: _DepthOrOutlyingness[Input] = sklearn.base.clone(self)
if distribution is None:
return copy.fit_transform(X)
return copy.fit(distribution).transform(X)
@property
def max(self) -> float:
"""
Maximum (or supremum if there is no maximum) of the possibly predicted
values.
"""
return 1
@property
def min(self) -> float:
"""
Minimum (or infimum if there is no maximum) of the possibly predicted
values.
"""
return 0
class Depth(_DepthOrOutlyingness[T]):
"""Abstract class representing a depth function."""
class Outlyingness(_DepthOrOutlyingness[T]):
"""Abstract class representing an outlyingness function."""
def _searchsorted_one_dim(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return np.searchsorted(array, values, side=side)
_searchsorted_vectorized = np.vectorize(
_searchsorted_one_dim,
signature='(n),(m),()->(m)',
excluded='side',
)
def _searchsorted_ordered(
array: NDArrayFloat,
values: NDArrayFloat,
*,
side: _Side = 'left',
) -> NDArrayInt:
return _searchsorted_vectorized( # type: ignore[no-any-return]
array,
values,
side=side,
)
def _cumulative_distribution(column: NDArrayFloat) -> NDArrayFloat:
"""
Calculate the cumulative distribution function at each point.
Args:
column: Array containing the values over which the
distribution function is calculated.
Returns:
Array containing the evaluation at each point of the
distribution function.
Examples:
>>> _cumulative_distribution(np.array([1, 4, 5, 1, 2, 2, 4, 1, 1, 3]))
array([ 0.4, 0.9, 1. , 0.4, 0.6, 0.6, 0.9, 0.4, 0.4, 0.7])
"""
return _searchsorted_ordered(
np.sort(column),
column,
side='right',
) / len(column)
class _UnivariateFraimanMuniz(Depth[NDArrayFloat]):
r"""
Univariate depth used to compute the Fraiman an Muniz depth.
Each column is considered as the samples of an aleatory variable.
The univariate depth of each of the samples of each column is calculated
as follows:
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Where :math:`F` stands for the marginal univariate distribution function of
each column.
"""
def fit(self: SelfType, X: NDArrayFloat, y: object = None) -> SelfType:
self._sorted_values = np.sort(X, axis=0)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat:
cum_dist = _searchsorted_ordered(
np.moveaxis(self._sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
).astype(X.dtype) / len(self._sorted_values)
assert cum_dist.shape[-2] == 1
ret = 0.5 - np.moveaxis(cum_dist, -1, 0)[..., 0]
ret = - np.abs(ret)
ret += 1
return ret
@property
def min(self) -> float:
return 1 / 2
class SimplicialDepth(Depth[NDArrayFloat]):
r"""
Simplicial depth.
The simplicial depth of a point :math:`x` in :math:`\mathbb{R}^p` given a
distribution :math:`F` is the probability that a random simplex with its
:math:`p + 1` points sampled from :math:`F` contains :math:`x`.
References:
Liu, R. Y. (1990). On a Notion of Data Depth Based on Random
Simplices. The Annals of Statistics, 18(1), 405–414.
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> SimplicialDepth:
self._dim = X.shape[-1]
if self._dim == 1:
self.sorted_values = np.sort(X, axis=0)
else:
raise NotImplementedError(
"SimplicialDepth is currently only "
"implemented for one-dimensional data.",
)
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
assert self._dim == X.shape[-1]
if self._dim == 1:
positions_left = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
)
positions_left = np.moveaxis(positions_left, -1, 0)[..., 0]
positions_right = _searchsorted_ordered(
np.moveaxis(self.sorted_values, 0, -1),
np.moveaxis(X, 0, -1),
side='right',
)
positions_right = np.moveaxis(positions_right, -1, 0)[..., 0]
num_strictly_below = positions_left
num_strictly_above = len(self.sorted_values) - positions_right
total_pairs = comb(len(self.sorted_values), 2)
return ( # type: ignore[no-any-return]
total_pairs - comb(num_strictly_below, 2)
- comb(num_strictly_above, 2)
) / total_pairs
class OutlyingnessBasedDepth(Depth[T]):
r"""
Computes depth based on an outlyingness measure.
An outlyingness function :math:`O(x)` can be converted to a depth
function as
.. math::
D(x) = \frac{1}{1 + O(x)}
if :math:`O(x)` is unbounded or as
.. math::
D(x) = 1 - \frac{O(x)}{\sup O(x)}
if :math:`O(x)` is bounded. If the infimum value of the
outlyiness function is not zero, it is subtracted beforehand.
Args:
outlyingness (Outlyingness): Outlyingness object.
References:
Serfling, R. (2006). Depth functions in nonparametric
multivariate inference. DIMACS Series in Discrete Mathematics and
Theoretical Computer Science, 72, 1.
"""
def __init__(self, outlyingness: Outlyingness[T]):
self.outlyingness = outlyingness
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> OutlyingnessBasedDepth[T]:
self.outlyingness.fit(X)
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
outlyingness_values = self.outlyingness.transform(X)
min_val = self.outlyingness.min
max_val = self.outlyingness.max
if math.isinf(max_val):
return 1 / (1 + outlyingness_values - min_val)
return 1 - (outlyingness_values - min_val) / (max_val - min_val)
class StahelDonohoOutlyingness(Outlyingness[NDArrayFloat]):
r"""
Computes Stahel-Donoho outlyingness.
Stahel-Donoho outlyingness is defined as
.. math::
\sup_{\|u\|=1} \frac{|u^T x - \text{Med}(u^T X))|}{\text{MAD}(u^TX)}
where :math:`\text{X}` is a sample with distribution :math:`F`,
:math:`\text{Med}` is the median and :math:`\text{MAD}` is the
median absolute deviation.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def fit( # noqa: D102
self,
X: NDArrayFloat,
y: object = None,
) -> StahelDonohoOutlyingness:
dim = X.shape[-1]
if dim == 1:
self._location = np.median(X, axis=0)
self._scale = scipy.stats.median_abs_deviation(X, axis=0)
else:
raise NotImplementedError("Only implemented for one dimension")
return self
def transform(self, X: NDArrayFloat) -> NDArrayFloat: # noqa: D102
dim = X.shape[-1]
if dim == 1:
# Special case, can be computed exactly
diff: NDArrayFloat = np.abs(X - self._location) / self._scale
return diff[..., 0]
raise NotImplementedError("Only implemented for one dimension")
@property
def max(self) -> float:
return math.inf
class ProjectionDepth(OutlyingnessBasedDepth[NDArrayFloat]):
"""
Computes Projection depth.
It is defined as the depth induced by the
:class:`Stahel-Donoho outlyingness <StahelDonohoOutlyingness>`.
See also:
:class:`StahelDonohoOutlyingness`: Stahel-Donoho outlyingness.
References:
Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho
estimator and depth-weighted means of multivariate data. Annals of
Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132
"""
def __init__(self) -> None:
super().__init__(outlyingness=StahelDonohoOutlyingness()) | 0.946312 | 0.6372 |
from __future__ import annotations
import itertools
from typing import TypeVar
import numpy as np
import scipy.integrate
from ..._utils._sklearn_adapter import BaseEstimator
from ...misc.metrics import l2_distance
from ...misc.metrics._utils import _fit_metric
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from .multivariate import Depth, SimplicialDepth, _UnivariateFraimanMuniz
T = TypeVar("T", bound=FData)
class IntegratedDepth(Depth[FDataGrid]):
r"""
Functional depth as the integral of a multivariate depth.
Args:
multivariate_depth (Depth): Multivariate depth to integrate.
By default it is the one used by Fraiman and Muniz, that is,
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.IntegratedDepth()
>>> depth(fd)
array([ 0.5 , 0.75 , 0.925, 0.875])
References:
Fraiman, R., & Muniz, G. (2001). Trimmed means for functional
data. Test, 10(2), 419–440. https://doi.org/10.1007/BF02595706
"""
def __init__(
self,
*,
multivariate_depth: Depth[NDArrayFloat] | None = None,
) -> None:
self.multivariate_depth = multivariate_depth
def fit( # noqa: D102
self,
X: FDataGrid,
y: object = None,
) -> IntegratedDepth:
self.multivariate_depth_: Depth[NDArrayFloat]
if self.multivariate_depth is None:
self.multivariate_depth_ = _UnivariateFraimanMuniz()
else:
self.multivariate_depth_ = self.multivariate_depth
self._domain_range = X.domain_range
self._grid_points = X.grid_points
self.multivariate_depth_.fit(X.data_matrix)
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
pointwise_depth = self.multivariate_depth_.transform(X.data_matrix)
interval_len = (
self._domain_range[0][1]
- self._domain_range[0][0]
)
integrand = pointwise_depth
for d, s in zip(X.domain_range, X.grid_points):
integrand = scipy.integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
return integrand
@property
def max(self) -> float:
if self.multivariate_depth is None:
return 1
return self.multivariate_depth.max
@property
def min(self) -> float:
if self.multivariate_depth is None:
return 1 / 2
return self.multivariate_depth.min
class ModifiedBandDepth(IntegratedDepth):
"""
Implementation of Modified Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of time
its graph is contained in the bands determined by two sample curves.
In the case the fdatagrid :term:`domain` dimension is 2, instead of curves,
surfaces determine the bands. In larger dimensions, the hyperplanes
determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.ModifiedBandDepth()
>>> values = depth(fd)
>>> values.round(2)
array([ 0.5 , 0.83, 0.73, 0.67])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def __init__(self) -> None:
super().__init__(multivariate_depth=SimplicialDepth())
class BandDepth(Depth[FDataGrid]):
"""
Implementation of Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of the
bands determined by two sample curves containing the whole graph of the
first one. In the case the fdatagrid :term:`domain` dimension is 2, instead
of curves, surfaces determine the bands. In larger dimensions, the
hyperplanes determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.BandDepth()
>>> depth(fd)
array([ 0.5 , 0.83333333, 0.5 , 0.5 ])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def fit(self, X: FDataGrid, y: object = None) -> BandDepth: # noqa: D102
if X.dim_codomain != 1:
raise NotImplementedError(
"Band depth not implemented for vector valued functions",
)
self._distribution = X
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
num_in = np.zeros(shape=len(X), dtype=X.data_matrix.dtype)
n_total = 0
for f1, f2 in itertools.combinations(self._distribution, 2):
between_range_1 = (
(f1.data_matrix <= X.data_matrix)
& (X.data_matrix <= f2.data_matrix)
)
between_range_2 = (
(f2.data_matrix <= X.data_matrix)
& (X.data_matrix <= f1.data_matrix)
)
between_range = between_range_1 | between_range_2
num_in += np.all(
between_range,
axis=tuple(range(1, X.data_matrix.ndim)),
)
n_total += 1
return num_in / n_total
class DistanceBasedDepth(Depth[FDataGrid], BaseEstimator):
r"""
Functional depth based on a metric.
Parameters:
metric:
The metric to use as M in the following depth calculation
.. math::
D(x) = [1 + M(x, \mu)]^{-1}.
as explained in :footcite:`serfling+zuo_2000_depth_function`.
Examples:
>>> import skfda
>>> from skfda.exploratory.depth import DistanceBasedDepth
>>> from skfda.misc.metrics import MahalanobisDistance
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = DistanceBasedDepth(MahalanobisDistance(2))
>>> depth(fd)
array([ 0.41897777, 0.8058132 , 0.31097392, 0.31723619])
References:
.. footbibliography::
"""
def __init__(
self,
metric: Metric[T] = l2_distance,
) -> None:
self.metric = metric
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> DistanceBasedDepth:
"""Fit the model using X as training data.
Args:
X: FDataGrid with the training data or array matrix with shape
(n_samples, n_samples) if metric='precomputed'.
y: Ignored.
Returns:
self
"""
_fit_metric(self.metric, X)
self.mean_ = X.mean()
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
"""Compute the depth of given observations.
Args:
X: FDataGrid with the observations to use in the calculation.
Returns:
Array with the depths.
"""
return 1 / (1 + self.metric(X, self.mean_)) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/depth/_depth.py | _depth.py | from __future__ import annotations
import itertools
from typing import TypeVar
import numpy as np
import scipy.integrate
from ..._utils._sklearn_adapter import BaseEstimator
from ...misc.metrics import l2_distance
from ...misc.metrics._utils import _fit_metric
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from .multivariate import Depth, SimplicialDepth, _UnivariateFraimanMuniz
T = TypeVar("T", bound=FData)
class IntegratedDepth(Depth[FDataGrid]):
r"""
Functional depth as the integral of a multivariate depth.
Args:
multivariate_depth (Depth): Multivariate depth to integrate.
By default it is the one used by Fraiman and Muniz, that is,
.. math::
D(x) = 1 - \left\lvert \frac{1}{2}- F(x)\right\rvert
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.IntegratedDepth()
>>> depth(fd)
array([ 0.5 , 0.75 , 0.925, 0.875])
References:
Fraiman, R., & Muniz, G. (2001). Trimmed means for functional
data. Test, 10(2), 419–440. https://doi.org/10.1007/BF02595706
"""
def __init__(
self,
*,
multivariate_depth: Depth[NDArrayFloat] | None = None,
) -> None:
self.multivariate_depth = multivariate_depth
def fit( # noqa: D102
self,
X: FDataGrid,
y: object = None,
) -> IntegratedDepth:
self.multivariate_depth_: Depth[NDArrayFloat]
if self.multivariate_depth is None:
self.multivariate_depth_ = _UnivariateFraimanMuniz()
else:
self.multivariate_depth_ = self.multivariate_depth
self._domain_range = X.domain_range
self._grid_points = X.grid_points
self.multivariate_depth_.fit(X.data_matrix)
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
pointwise_depth = self.multivariate_depth_.transform(X.data_matrix)
interval_len = (
self._domain_range[0][1]
- self._domain_range[0][0]
)
integrand = pointwise_depth
for d, s in zip(X.domain_range, X.grid_points):
integrand = scipy.integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
return integrand
@property
def max(self) -> float:
if self.multivariate_depth is None:
return 1
return self.multivariate_depth.max
@property
def min(self) -> float:
if self.multivariate_depth is None:
return 1 / 2
return self.multivariate_depth.min
class ModifiedBandDepth(IntegratedDepth):
"""
Implementation of Modified Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of time
its graph is contained in the bands determined by two sample curves.
In the case the fdatagrid :term:`domain` dimension is 2, instead of curves,
surfaces determine the bands. In larger dimensions, the hyperplanes
determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.ModifiedBandDepth()
>>> values = depth(fd)
>>> values.round(2)
array([ 0.5 , 0.83, 0.73, 0.67])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def __init__(self) -> None:
super().__init__(multivariate_depth=SimplicialDepth())
class BandDepth(Depth[FDataGrid]):
"""
Implementation of Band Depth for functional data.
The band depth of each sample is obtained by computing the fraction of the
bands determined by two sample curves containing the whole graph of the
first one. In the case the fdatagrid :term:`domain` dimension is 2, instead
of curves, surfaces determine the bands. In larger dimensions, the
hyperplanes determine the bands.
Examples:
>>> import skfda
>>>
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = skfda.exploratory.depth.BandDepth()
>>> depth(fd)
array([ 0.5 , 0.83333333, 0.5 , 0.5 ])
References:
López-Pintado, S., & Romo, J. (2009). On the Concept of
Depth for Functional Data. Journal of the American Statistical
Association, 104(486), 718–734.
https://doi.org/10.1198/jasa.2009.0108
"""
def fit(self, X: FDataGrid, y: object = None) -> BandDepth: # noqa: D102
if X.dim_codomain != 1:
raise NotImplementedError(
"Band depth not implemented for vector valued functions",
)
self._distribution = X
return self
def transform(self, X: FDataGrid) -> NDArrayFloat: # noqa: D102
num_in = np.zeros(shape=len(X), dtype=X.data_matrix.dtype)
n_total = 0
for f1, f2 in itertools.combinations(self._distribution, 2):
between_range_1 = (
(f1.data_matrix <= X.data_matrix)
& (X.data_matrix <= f2.data_matrix)
)
between_range_2 = (
(f2.data_matrix <= X.data_matrix)
& (X.data_matrix <= f1.data_matrix)
)
between_range = between_range_1 | between_range_2
num_in += np.all(
between_range,
axis=tuple(range(1, X.data_matrix.ndim)),
)
n_total += 1
return num_in / n_total
class DistanceBasedDepth(Depth[FDataGrid], BaseEstimator):
r"""
Functional depth based on a metric.
Parameters:
metric:
The metric to use as M in the following depth calculation
.. math::
D(x) = [1 + M(x, \mu)]^{-1}.
as explained in :footcite:`serfling+zuo_2000_depth_function`.
Examples:
>>> import skfda
>>> from skfda.exploratory.depth import DistanceBasedDepth
>>> from skfda.misc.metrics import MahalanobisDistance
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [0, 2, 4, 6, 8, 10]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> depth = DistanceBasedDepth(MahalanobisDistance(2))
>>> depth(fd)
array([ 0.41897777, 0.8058132 , 0.31097392, 0.31723619])
References:
.. footbibliography::
"""
def __init__(
self,
metric: Metric[T] = l2_distance,
) -> None:
self.metric = metric
def fit( # noqa: D102
self,
X: T,
y: object = None,
) -> DistanceBasedDepth:
"""Fit the model using X as training data.
Args:
X: FDataGrid with the training data or array matrix with shape
(n_samples, n_samples) if metric='precomputed'.
y: Ignored.
Returns:
self
"""
_fit_metric(self.metric, X)
self.mean_ = X.mean()
return self
def transform(self, X: T) -> NDArrayFloat: # noqa: D102
"""Compute the depth of given observations.
Args:
X: FDataGrid with the observations to use in the calculation.
Returns:
Array with the depths.
"""
return 1 / (1 + self.metric(X, self.mean_)) | 0.933081 | 0.494629 |
from __future__ import annotations
from builtins import isinstance
from typing import TypeVar, Union
import numpy as np
from scipy import integrate
from scipy.stats import rankdata
from ...misc.metrics._lp_distances import l2_distance
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from ..depth import Depth, ModifiedBandDepth
F = TypeVar('F', bound=FData)
T = TypeVar('T', bound=Union[NDArrayFloat, FData])
def mean(
X: F,
weights: NDArrayFloat | None = None,
) -> F:
"""
Compute the mean of all the samples in a FData object.
Args:
X: Object containing all the samples whose mean is wanted.
weights: Sample weight. By default, uniform weight are used.
Returns:
Mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
if weights is None:
return X.mean()
weight = (1 / np.sum(weights)) * weights
return (X * weight).sum()
def var(X: FData) -> FDataGrid:
"""
Compute the variance of a set of samples in a FData object.
Args:
X: Object containing all the set of samples whose variance is desired.
Returns:
Variance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.var() # type: ignore[no-any-return]
def gmean(X: FDataGrid) -> FDataGrid:
"""
Compute the geometric mean of all the samples in a FDataGrid object.
Args:
X: Object containing all the samples whose geometric mean is wanted.
Returns:
Geometric mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.gmean()
def cov(X: FData) -> FDataGrid:
"""
Compute the covariance.
Calculates the covariance matrix representing the covariance of the
functional samples at the observation points.
Args:
X: Object containing different samples of a functional variable.
Returns:
Covariance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.cov() # type: ignore[no-any-return]
def modified_epigraph_index(X: FDataGrid) -> NDArrayFloat:
"""
Calculate the Modified Epigraph Index of a FDataGrid.
The MEI represents the mean time a curve stays below other curve.
In this case we will calculate the MEI for each curve in relation
with all the other curves of our dataset.
"""
interval_len = (
X.domain_range[0][1]
- X.domain_range[0][0]
)
# Array containing at each point the number of curves
# are above it.
num_functions_above: NDArrayFloat = rankdata(
-X.data_matrix,
method='max',
axis=0,
) - 1
integrand = num_functions_above
for d, s in zip(X.domain_range, X.grid_points):
integrand = integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
integrand /= X.n_samples
return integrand.flatten()
def depth_based_median(
X: T,
depth_method: Depth[T] | None = None,
) -> T:
"""
Compute the median based on a depth measure.
The depth based median is the deepest curve given a certain
depth measure.
Args:
X: Object containing different samples of a
functional variable.
depth_method: Depth method used to order the data. Defaults to
:func:`modified band
depth <skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed depth_based median.
See also:
:func:`geometric_median`
"""
depth_method_used: Depth[T]
if depth_method is None:
assert isinstance(X, FDataGrid)
depth_method_used = ModifiedBandDepth()
else:
depth_method_used = depth_method
depth = depth_method_used(X)
indices_descending_depth = (-depth).argsort(axis=0)
# The median is the deepest curve
return X[indices_descending_depth[0]]
def _weighted_average(X: T, weights: NDArrayFloat) -> T:
if isinstance(X, FData):
return (X * weights).sum()
return (X.T * weights).T.sum(axis=0) # type: ignore[no-any-return]
def geometric_median(
X: T,
*,
tol: float = 1.e-8,
metric: Metric[T] = l2_distance,
) -> T:
r"""
Compute the geometric median.
The sample geometric median is the point that minimizes the :math:`L_1`
norm of the vector of distances to all observations:
.. math::
\underset{y \in L(\mathcal{T})}{\arg \min}
\sum_{i=1}^N \left \| x_i-y \right \|
The geometric median in the functional case is also described in
:footcite:`gervini_2008_estimation`.
Instead of the proposed algorithm, however, the current implementation
uses the corrected Weiszfeld algorithm to compute the median.
Args:
X: Object containing different samples of a
functional variable.
tol: tolerance used to check convergence.
metric: metric used to compute the vector of distances. By
default is the :math:`L_2` distance.
Returns:
Object containing the computed geometric median.
Example:
>>> from skfda import FDataGrid
>>> data_matrix = [[0.5, 1, 2, .5], [1.5, 1, 4, .5]]
>>> X = FDataGrid(data_matrix)
>>> median = geometric_median(X)
>>> median.data_matrix[0, ..., 0]
array([ 1. , 1. , 3. , 0.5])
See also:
:func:`depth_based_median`
References:
.. footbibliography::
"""
weights = np.full(len(X), 1 / len(X))
median = _weighted_average(X, weights)
distances = metric(X, median)
while True:
zero_distances = (distances == 0)
n_zeros = np.sum(zero_distances)
weights_new = (
(1 / distances) / np.sum(1 / distances) if n_zeros == 0
else (1 / n_zeros) * zero_distances
)
median_new = _weighted_average(X, weights_new)
if l2_distance(median_new, median) < tol:
return median_new
distances = metric(X, median_new)
weights, median = (weights_new, median_new)
def trim_mean(
X: F,
proportiontocut: float,
*,
depth_method: Depth[F] | None = None,
) -> FDataGrid:
"""Compute the trimmed means based on a depth measure.
The trimmed means consists in computing the mean function without a
percentage of least deep curves. That is, we first remove the least deep
curves and then we compute the mean as usual.
Note that in scipy the leftmost and rightmost proportiontocut data are
removed. In this case, as we order the data by the depth, we only remove
those that have the least depth values.
Args:
X: Object containing different samples of a
functional variable.
proportiontocut: Indicates the percentage of functions to
remove. It is not easy to determine as it varies from dataset to
dataset.
depth_method: Method used to order the data. Defaults to
:func:`modified band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed trimmed mean.
"""
if depth_method is None:
depth_method = ModifiedBandDepth()
n_samples_to_keep = (len(X) - int(len(X) * proportiontocut))
# compute the depth of each curve and store the indexes in descending order
depth = depth_method(X)
indices_descending_depth = (-depth).argsort(axis=0)
trimmed_curves = X[indices_descending_depth[:n_samples_to_keep]]
return trimmed_curves.mean() | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/stats/_stats.py | _stats.py | from __future__ import annotations
from builtins import isinstance
from typing import TypeVar, Union
import numpy as np
from scipy import integrate
from scipy.stats import rankdata
from ...misc.metrics._lp_distances import l2_distance
from ...representation import FData, FDataGrid
from ...typing._metric import Metric
from ...typing._numpy import NDArrayFloat
from ..depth import Depth, ModifiedBandDepth
F = TypeVar('F', bound=FData)
T = TypeVar('T', bound=Union[NDArrayFloat, FData])
def mean(
X: F,
weights: NDArrayFloat | None = None,
) -> F:
"""
Compute the mean of all the samples in a FData object.
Args:
X: Object containing all the samples whose mean is wanted.
weights: Sample weight. By default, uniform weight are used.
Returns:
Mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
if weights is None:
return X.mean()
weight = (1 / np.sum(weights)) * weights
return (X * weight).sum()
def var(X: FData) -> FDataGrid:
"""
Compute the variance of a set of samples in a FData object.
Args:
X: Object containing all the set of samples whose variance is desired.
Returns:
Variance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.var() # type: ignore[no-any-return]
def gmean(X: FDataGrid) -> FDataGrid:
"""
Compute the geometric mean of all the samples in a FDataGrid object.
Args:
X: Object containing all the samples whose geometric mean is wanted.
Returns:
Geometric mean of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.gmean()
def cov(X: FData) -> FDataGrid:
"""
Compute the covariance.
Calculates the covariance matrix representing the covariance of the
functional samples at the observation points.
Args:
X: Object containing different samples of a functional variable.
Returns:
Covariance of all the samples in the original object, as a
:term:`functional data object` with just one sample.
"""
return X.cov() # type: ignore[no-any-return]
def modified_epigraph_index(X: FDataGrid) -> NDArrayFloat:
"""
Calculate the Modified Epigraph Index of a FDataGrid.
The MEI represents the mean time a curve stays below other curve.
In this case we will calculate the MEI for each curve in relation
with all the other curves of our dataset.
"""
interval_len = (
X.domain_range[0][1]
- X.domain_range[0][0]
)
# Array containing at each point the number of curves
# are above it.
num_functions_above: NDArrayFloat = rankdata(
-X.data_matrix,
method='max',
axis=0,
) - 1
integrand = num_functions_above
for d, s in zip(X.domain_range, X.grid_points):
integrand = integrate.simps(
integrand,
x=s,
axis=1,
)
interval_len = d[1] - d[0]
integrand /= interval_len
integrand /= X.n_samples
return integrand.flatten()
def depth_based_median(
X: T,
depth_method: Depth[T] | None = None,
) -> T:
"""
Compute the median based on a depth measure.
The depth based median is the deepest curve given a certain
depth measure.
Args:
X: Object containing different samples of a
functional variable.
depth_method: Depth method used to order the data. Defaults to
:func:`modified band
depth <skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed depth_based median.
See also:
:func:`geometric_median`
"""
depth_method_used: Depth[T]
if depth_method is None:
assert isinstance(X, FDataGrid)
depth_method_used = ModifiedBandDepth()
else:
depth_method_used = depth_method
depth = depth_method_used(X)
indices_descending_depth = (-depth).argsort(axis=0)
# The median is the deepest curve
return X[indices_descending_depth[0]]
def _weighted_average(X: T, weights: NDArrayFloat) -> T:
if isinstance(X, FData):
return (X * weights).sum()
return (X.T * weights).T.sum(axis=0) # type: ignore[no-any-return]
def geometric_median(
X: T,
*,
tol: float = 1.e-8,
metric: Metric[T] = l2_distance,
) -> T:
r"""
Compute the geometric median.
The sample geometric median is the point that minimizes the :math:`L_1`
norm of the vector of distances to all observations:
.. math::
\underset{y \in L(\mathcal{T})}{\arg \min}
\sum_{i=1}^N \left \| x_i-y \right \|
The geometric median in the functional case is also described in
:footcite:`gervini_2008_estimation`.
Instead of the proposed algorithm, however, the current implementation
uses the corrected Weiszfeld algorithm to compute the median.
Args:
X: Object containing different samples of a
functional variable.
tol: tolerance used to check convergence.
metric: metric used to compute the vector of distances. By
default is the :math:`L_2` distance.
Returns:
Object containing the computed geometric median.
Example:
>>> from skfda import FDataGrid
>>> data_matrix = [[0.5, 1, 2, .5], [1.5, 1, 4, .5]]
>>> X = FDataGrid(data_matrix)
>>> median = geometric_median(X)
>>> median.data_matrix[0, ..., 0]
array([ 1. , 1. , 3. , 0.5])
See also:
:func:`depth_based_median`
References:
.. footbibliography::
"""
weights = np.full(len(X), 1 / len(X))
median = _weighted_average(X, weights)
distances = metric(X, median)
while True:
zero_distances = (distances == 0)
n_zeros = np.sum(zero_distances)
weights_new = (
(1 / distances) / np.sum(1 / distances) if n_zeros == 0
else (1 / n_zeros) * zero_distances
)
median_new = _weighted_average(X, weights_new)
if l2_distance(median_new, median) < tol:
return median_new
distances = metric(X, median_new)
weights, median = (weights_new, median_new)
def trim_mean(
X: F,
proportiontocut: float,
*,
depth_method: Depth[F] | None = None,
) -> FDataGrid:
"""Compute the trimmed means based on a depth measure.
The trimmed means consists in computing the mean function without a
percentage of least deep curves. That is, we first remove the least deep
curves and then we compute the mean as usual.
Note that in scipy the leftmost and rightmost proportiontocut data are
removed. In this case, as we order the data by the depth, we only remove
those that have the least depth values.
Args:
X: Object containing different samples of a
functional variable.
proportiontocut: Indicates the percentage of functions to
remove. It is not easy to determine as it varies from dataset to
dataset.
depth_method: Method used to order the data. Defaults to
:func:`modified band depth
<skfda.exploratory.depth.ModifiedBandDepth>`.
Returns:
Object containing the computed trimmed mean.
"""
if depth_method is None:
depth_method = ModifiedBandDepth()
n_samples_to_keep = (len(X) - int(len(X) * proportiontocut))
# compute the depth of each curve and store the indexes in descending order
depth = depth_method(X)
indices_descending_depth = (-depth).argsort(axis=0)
trimmed_curves = X[indices_descending_depth[:n_samples_to_keep]]
return trimmed_curves.mean() | 0.980186 | 0.679205 |
from __future__ import annotations
import copy
import itertools
from functools import partial
from typing import Generator, List, Sequence, Tuple, Type, cast
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.backend_bases import Event
from matplotlib.figure import Figure
from matplotlib.widgets import Slider, Widget
from ._baseplot import BasePlot
from ._utils import _get_axes_shape, _get_figure_and_axes, _set_figure_layout
def _set_val_noevents(widget: Widget, val: float) -> None:
e = widget.eventson
widget.eventson = False
widget.set_val(val)
widget.eventson = e
class MultipleDisplay:
"""
MultipleDisplay class used to combine and interact with plots.
This module is used to combine different BasePlot objects that
represent the same curves or surfaces, and represent them
together in the same figure. Besides this, it includes
the functionality necessary to interact with the graphics
by clicking the points, hovering over them... Picking the points allow
us to see our selected function standing out among the others in all
the axes. It is also possible to add widgets to interact with the
plots.
Args:
displays: Baseplot objects that will be plotted in the fig.
criteria: Sequence of criteria used to order the points in the
slider widget. The size should be equal to sliders, as each
criterion is for one slider.
sliders: Sequence of widgets that will be plotted.
label_sliders: Label of each of the sliders.
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
Attributes:
length_data: Number of instances or curves of the different displays.
clicked: Boolean indicating whether a point has being clicked.
selected_sample: Index of the function selected with the interactive
module or widgets.
"""
def __init__(
self,
displays: BasePlot | Sequence[BasePlot],
criteria: Sequence[float] | Sequence[Sequence[float]] = (),
sliders: Type[Widget] | Sequence[Type[Widget]] = (),
label_sliders: str | Sequence[str] | None = None,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
):
if isinstance(displays, BasePlot):
displays = (displays,)
self.displays = [copy.copy(d) for d in displays]
self._n_graphs = sum(d.n_subplots for d in self.displays)
self.length_data = next(
d.n_samples
for d in self.displays
if d.n_samples is not None
)
self.sliders: List[Widget] = []
self.selected_sample: int | None = None
if len(criteria) != 0 and not isinstance(criteria[0], Sequence):
criteria = cast(Sequence[float], criteria)
criteria = (criteria,)
criteria = cast(Sequence[Sequence[float]], criteria)
self.criteria = criteria
if not isinstance(sliders, Sequence):
sliders = (sliders,)
if isinstance(label_sliders, str):
label_sliders = (label_sliders,)
if len(criteria) != len(sliders):
raise ValueError(
f"Size of criteria, and sliders should be equal "
f"(have {len(criteria)} and {len(sliders)}).",
)
self._init_axes(
chart,
fig=fig,
axes=axes,
extra=len(criteria),
)
self._create_sliders(
criteria=criteria,
sliders=sliders,
label_sliders=label_sliders,
)
def _init_axes(
self,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
extra: int = 0,
) -> None:
"""
Initialize the axes and figure.
Args:
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
extra: integer indicating the extra axes needed due to the
necessity for them to plot the sliders.
"""
widget_aspect = 1 / 8
fig, axes = _get_figure_and_axes(chart, fig, axes)
if len(axes) not in {0, self._n_graphs + extra}:
raise ValueError("Invalid number of axes.")
n_rows, n_cols = _get_axes_shape(self._n_graphs + extra)
dim = list(
itertools.chain.from_iterable(
[d.dim] * d.n_subplots
for d in self.displays
),
) + [2] * extra
number_axes = n_rows * n_cols
fig, axes = _set_figure_layout(
fig=fig,
axes=axes,
n_axes=self._n_graphs + extra,
dim=dim,
)
for i in range(self._n_graphs, number_axes):
if i >= self._n_graphs + extra:
axes[i].set_visible(False)
else:
axes[i].set_box_aspect(widget_aspect)
self.fig = fig
self.axes = axes
def _create_sliders(
self,
*,
criteria: Sequence[Sequence[float]],
sliders: Sequence[Type[Widget]],
label_sliders: Sequence[str] | None = None,
) -> None:
"""
Create the sliders with the criteria selected.
Args:
criteria: Different criterion for each of the sliders.
sliders: Widget types.
label_sliders: Sequence of the names of each slider.
"""
for c in criteria:
if len(c) != self.length_data:
raise ValueError(
"Slider criteria should be of the same size as data",
)
for k, criterion in enumerate(criteria):
label = label_sliders[k] if label_sliders else None
self.add_slider(
axes=self.axes[self._n_graphs + k],
criterion=criterion,
widget_class=sliders[k],
label=label,
)
def plot(self) -> Figure:
"""
Plot Multiple Display method.
Plot the different BasePlot objects and widgets selected.
Activates the interactivity functionality of clicking and
hovering points. When clicking a point, the rest will be
made partially transparent in all the corresponding graphs.
Returns:
fig: figure object in which the displays and
widgets will be plotted.
"""
if self._n_graphs > 1:
for d in self.displays[1:]:
if (
d.n_samples is not None
and d.n_samples != self.length_data
):
raise ValueError(
"Length of some data sets are not equal ",
)
for ax in self.axes[:self._n_graphs]:
ax.clear()
int_index = 0
for disp in self.displays:
axes_needed = disp.n_subplots
end_index = axes_needed + int_index
disp._set_figure_and_axes(axes=self.axes[int_index:end_index])
disp.plot()
int_index = end_index
self.fig.canvas.mpl_connect('pick_event', self.pick)
self.fig.suptitle("Multiple display")
self.fig.tight_layout()
return self.fig
def pick(self, event: Event) -> None:
"""
Activate interactive functionality when picking a point.
Callback method that is activated when a point is picked.
If no point was clicked previously, all the points but the
one selected will be more transparent in all the graphs.
If a point was clicked already, this new point will be the
one highlighted among the rest. If the same point is clicked,
the initial state of the graphics is restored.
Args:
event: event object containing the artist of the point
picked.
"""
selected_sample = self._sample_from_artist(event.artist)
if selected_sample is not None:
if self.selected_sample == selected_sample:
self._deselect_samples()
else:
self._select_sample(selected_sample)
def _sample_from_artist(self, artist: Artist) -> int | None:
"""Return the sample corresponding to an artist."""
for d in self.displays:
if d.artists is None:
continue
for i, a in enumerate(d.axes_):
if a == artist.axes:
if len(d.axes_) == 1:
return np.where( # type: ignore[no-any-return]
d.artists == artist,
)[0][0]
else:
return np.where( # type: ignore[no-any-return]
d.artists[:, i] == artist,
)[0][0]
return None
def _visit_artists(self) -> Generator[Tuple[int, Artist], None, None]:
for i in range(self.length_data):
for d in self.displays:
if d.artists is None:
continue
yield from ((i, artist) for artist in np.ravel(d.artists[i]))
def _select_sample(self, selected_sample: int) -> None:
"""Reduce the transparency of all the points but the selected one."""
for i, artist in self._visit_artists():
artist.set_alpha(1.0 if i == selected_sample else 0.1)
for criterion, slider in zip(self.criteria, self.sliders):
val_widget = criterion[selected_sample]
_set_val_noevents(slider, val_widget)
self.selected_sample = selected_sample
self.fig.canvas.draw_idle()
def _deselect_samples(self) -> None:
"""Restore the original transparency of all the points."""
for _, artist in self._visit_artists():
artist.set_alpha(1)
self.selected_sample = None
self.fig.canvas.draw_idle()
def add_slider(
self,
axes: Axes,
criterion: Sequence[float],
widget_class: Type[Widget] = Slider,
label: str | None = None,
) -> None:
"""
Add the slider to the MultipleDisplay object.
Args:
axes: Axes for the widget.
criterion: Criterion used for the slider.
widget_class: Widget type.
label: Name of the slider.
"""
full_desc = "" if label is None else label
ordered_criterion_values, ordered_criterion_indexes = zip(
*sorted(zip(criterion, range(self.length_data))),
)
widget = widget_class(
ax=axes,
label=full_desc,
valmin=ordered_criterion_values[0],
valmax=ordered_criterion_values[-1],
valinit=ordered_criterion_values[0],
valstep=ordered_criterion_values,
valfmt="%.3g",
)
self.sliders.append(widget)
axes.annotate(
f"{ordered_criterion_values[0]:.3g}",
xy=(0, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
axes.annotate(
f"{ordered_criterion_values[-1]:.3g}",
xy=(0.95, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
on_changed_function = partial(
self._value_updated,
ordered_criterion_values=ordered_criterion_values,
ordered_criterion_indexes=ordered_criterion_indexes,
)
widget.on_changed(on_changed_function)
def _value_updated(
self,
value: float,
ordered_criterion_values: Sequence[float],
ordered_criterion_indexes: Sequence[int],
) -> None:
"""
Update the graphs when a widget is clicked.
Args:
value: Current value of the widget.
ordered_criterion_values: Ordered values of the criterion.
ordered_criterion_indexes: Sample numbers ordered using the
criterion.
"""
value_index = int(np.searchsorted(ordered_criterion_values, value))
self.selected_sample = ordered_criterion_indexes[value_index]
self._select_sample(self.selected_sample) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_multiple_display.py | _multiple_display.py | from __future__ import annotations
import copy
import itertools
from functools import partial
from typing import Generator, List, Sequence, Tuple, Type, cast
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.backend_bases import Event
from matplotlib.figure import Figure
from matplotlib.widgets import Slider, Widget
from ._baseplot import BasePlot
from ._utils import _get_axes_shape, _get_figure_and_axes, _set_figure_layout
def _set_val_noevents(widget: Widget, val: float) -> None:
e = widget.eventson
widget.eventson = False
widget.set_val(val)
widget.eventson = e
class MultipleDisplay:
"""
MultipleDisplay class used to combine and interact with plots.
This module is used to combine different BasePlot objects that
represent the same curves or surfaces, and represent them
together in the same figure. Besides this, it includes
the functionality necessary to interact with the graphics
by clicking the points, hovering over them... Picking the points allow
us to see our selected function standing out among the others in all
the axes. It is also possible to add widgets to interact with the
plots.
Args:
displays: Baseplot objects that will be plotted in the fig.
criteria: Sequence of criteria used to order the points in the
slider widget. The size should be equal to sliders, as each
criterion is for one slider.
sliders: Sequence of widgets that will be plotted.
label_sliders: Label of each of the sliders.
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
Attributes:
length_data: Number of instances or curves of the different displays.
clicked: Boolean indicating whether a point has being clicked.
selected_sample: Index of the function selected with the interactive
module or widgets.
"""
def __init__(
self,
displays: BasePlot | Sequence[BasePlot],
criteria: Sequence[float] | Sequence[Sequence[float]] = (),
sliders: Type[Widget] | Sequence[Type[Widget]] = (),
label_sliders: str | Sequence[str] | None = None,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
):
if isinstance(displays, BasePlot):
displays = (displays,)
self.displays = [copy.copy(d) for d in displays]
self._n_graphs = sum(d.n_subplots for d in self.displays)
self.length_data = next(
d.n_samples
for d in self.displays
if d.n_samples is not None
)
self.sliders: List[Widget] = []
self.selected_sample: int | None = None
if len(criteria) != 0 and not isinstance(criteria[0], Sequence):
criteria = cast(Sequence[float], criteria)
criteria = (criteria,)
criteria = cast(Sequence[Sequence[float]], criteria)
self.criteria = criteria
if not isinstance(sliders, Sequence):
sliders = (sliders,)
if isinstance(label_sliders, str):
label_sliders = (label_sliders,)
if len(criteria) != len(sliders):
raise ValueError(
f"Size of criteria, and sliders should be equal "
f"(have {len(criteria)} and {len(sliders)}).",
)
self._init_axes(
chart,
fig=fig,
axes=axes,
extra=len(criteria),
)
self._create_sliders(
criteria=criteria,
sliders=sliders,
label_sliders=label_sliders,
)
def _init_axes(
self,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
extra: int = 0,
) -> None:
"""
Initialize the axes and figure.
Args:
chart: Figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: Figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: Axis where the graphs are plotted. If None, see param fig.
extra: integer indicating the extra axes needed due to the
necessity for them to plot the sliders.
"""
widget_aspect = 1 / 8
fig, axes = _get_figure_and_axes(chart, fig, axes)
if len(axes) not in {0, self._n_graphs + extra}:
raise ValueError("Invalid number of axes.")
n_rows, n_cols = _get_axes_shape(self._n_graphs + extra)
dim = list(
itertools.chain.from_iterable(
[d.dim] * d.n_subplots
for d in self.displays
),
) + [2] * extra
number_axes = n_rows * n_cols
fig, axes = _set_figure_layout(
fig=fig,
axes=axes,
n_axes=self._n_graphs + extra,
dim=dim,
)
for i in range(self._n_graphs, number_axes):
if i >= self._n_graphs + extra:
axes[i].set_visible(False)
else:
axes[i].set_box_aspect(widget_aspect)
self.fig = fig
self.axes = axes
def _create_sliders(
self,
*,
criteria: Sequence[Sequence[float]],
sliders: Sequence[Type[Widget]],
label_sliders: Sequence[str] | None = None,
) -> None:
"""
Create the sliders with the criteria selected.
Args:
criteria: Different criterion for each of the sliders.
sliders: Widget types.
label_sliders: Sequence of the names of each slider.
"""
for c in criteria:
if len(c) != self.length_data:
raise ValueError(
"Slider criteria should be of the same size as data",
)
for k, criterion in enumerate(criteria):
label = label_sliders[k] if label_sliders else None
self.add_slider(
axes=self.axes[self._n_graphs + k],
criterion=criterion,
widget_class=sliders[k],
label=label,
)
def plot(self) -> Figure:
"""
Plot Multiple Display method.
Plot the different BasePlot objects and widgets selected.
Activates the interactivity functionality of clicking and
hovering points. When clicking a point, the rest will be
made partially transparent in all the corresponding graphs.
Returns:
fig: figure object in which the displays and
widgets will be plotted.
"""
if self._n_graphs > 1:
for d in self.displays[1:]:
if (
d.n_samples is not None
and d.n_samples != self.length_data
):
raise ValueError(
"Length of some data sets are not equal ",
)
for ax in self.axes[:self._n_graphs]:
ax.clear()
int_index = 0
for disp in self.displays:
axes_needed = disp.n_subplots
end_index = axes_needed + int_index
disp._set_figure_and_axes(axes=self.axes[int_index:end_index])
disp.plot()
int_index = end_index
self.fig.canvas.mpl_connect('pick_event', self.pick)
self.fig.suptitle("Multiple display")
self.fig.tight_layout()
return self.fig
def pick(self, event: Event) -> None:
"""
Activate interactive functionality when picking a point.
Callback method that is activated when a point is picked.
If no point was clicked previously, all the points but the
one selected will be more transparent in all the graphs.
If a point was clicked already, this new point will be the
one highlighted among the rest. If the same point is clicked,
the initial state of the graphics is restored.
Args:
event: event object containing the artist of the point
picked.
"""
selected_sample = self._sample_from_artist(event.artist)
if selected_sample is not None:
if self.selected_sample == selected_sample:
self._deselect_samples()
else:
self._select_sample(selected_sample)
def _sample_from_artist(self, artist: Artist) -> int | None:
"""Return the sample corresponding to an artist."""
for d in self.displays:
if d.artists is None:
continue
for i, a in enumerate(d.axes_):
if a == artist.axes:
if len(d.axes_) == 1:
return np.where( # type: ignore[no-any-return]
d.artists == artist,
)[0][0]
else:
return np.where( # type: ignore[no-any-return]
d.artists[:, i] == artist,
)[0][0]
return None
def _visit_artists(self) -> Generator[Tuple[int, Artist], None, None]:
for i in range(self.length_data):
for d in self.displays:
if d.artists is None:
continue
yield from ((i, artist) for artist in np.ravel(d.artists[i]))
def _select_sample(self, selected_sample: int) -> None:
"""Reduce the transparency of all the points but the selected one."""
for i, artist in self._visit_artists():
artist.set_alpha(1.0 if i == selected_sample else 0.1)
for criterion, slider in zip(self.criteria, self.sliders):
val_widget = criterion[selected_sample]
_set_val_noevents(slider, val_widget)
self.selected_sample = selected_sample
self.fig.canvas.draw_idle()
def _deselect_samples(self) -> None:
"""Restore the original transparency of all the points."""
for _, artist in self._visit_artists():
artist.set_alpha(1)
self.selected_sample = None
self.fig.canvas.draw_idle()
def add_slider(
self,
axes: Axes,
criterion: Sequence[float],
widget_class: Type[Widget] = Slider,
label: str | None = None,
) -> None:
"""
Add the slider to the MultipleDisplay object.
Args:
axes: Axes for the widget.
criterion: Criterion used for the slider.
widget_class: Widget type.
label: Name of the slider.
"""
full_desc = "" if label is None else label
ordered_criterion_values, ordered_criterion_indexes = zip(
*sorted(zip(criterion, range(self.length_data))),
)
widget = widget_class(
ax=axes,
label=full_desc,
valmin=ordered_criterion_values[0],
valmax=ordered_criterion_values[-1],
valinit=ordered_criterion_values[0],
valstep=ordered_criterion_values,
valfmt="%.3g",
)
self.sliders.append(widget)
axes.annotate(
f"{ordered_criterion_values[0]:.3g}",
xy=(0, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
axes.annotate(
f"{ordered_criterion_values[-1]:.3g}",
xy=(0.95, -0.5),
xycoords='axes fraction',
annotation_clip=False,
)
on_changed_function = partial(
self._value_updated,
ordered_criterion_values=ordered_criterion_values,
ordered_criterion_indexes=ordered_criterion_indexes,
)
widget.on_changed(on_changed_function)
def _value_updated(
self,
value: float,
ordered_criterion_values: Sequence[float],
ordered_criterion_indexes: Sequence[int],
) -> None:
"""
Update the graphs when a widget is clicked.
Args:
value: Current value of the widget.
ordered_criterion_values: Ordered values of the criterion.
ordered_criterion_indexes: Sample numbers ordered using the
criterion.
"""
value_index = int(np.searchsorted(ordered_criterion_values, value))
self.selected_sample = ordered_criterion_indexes[value_index]
self._select_sample(self.selected_sample) | 0.948858 | 0.437042 |
from __future__ import annotations
from typing import Any, Sequence
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.colors import Colormap
from matplotlib.figure import Figure
from matplotlib.patches import Ellipse
from ...representation import FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ..depth import Depth
from ..outliers import MSPlotOutlierDetector
from ._baseplot import BasePlot
class MagnitudeShapePlot(BasePlot):
r"""
Implementation of the magnitude-shape plot.
This plot, which is based on the calculation of the :func:`directional
outlyingness <fda.magnitude_shape_plot.directional_outlyingness>`
of each of the samples, serves as a visualization tool for the centrality
of curves. Furthermore, an outlier detection procedure is included.
The norm of the mean of the directional outlyingness (:math:`\lVert
\mathbf{MO}\rVert`) is plotted in the x-axis, and the variation of the
directional outlyingness (:math:`VO`) in the y-axis.
The outliers are detected using an instance of
:class:`MSPlotOutlierDetector`.
For more information see :footcite:ts:`dai+genton_2018_visualization`.
Args:
fdata: Object containing the data.
multivariate_depth:
Method used to order the data. Defaults to :class:`projection
depth <fda.depth_measures.multivariate.ProjectionDepth>`.
pointwise_weights: an array containing the
weights of each points of discretisation where values have
been recorded.
cutoff_factor: Factor that multiplies the cutoff value, in order to
consider more or less curves as outliers.
assume_centered: If True, the support of the
robust location and the covariance estimates is computed, and a
covariance estimate is recomputed from it, without centering
the data. Useful to work with data whose mean is significantly
equal to zero but is not exactly zero. If False, default value,
the robust location and covariance are directly computed with
the FastMCD algorithm without additional treatment.
support_fraction: The
proportion of points to be included in the support of the
raw MCD estimate.
Default is None, which implies that the minimum value of
support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state: If int,
random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number
generator; If None, the random number generator is the
RandomState instance used by np.random. By default, it is 0.
ellipsoid: Whether to draw the non outlying ellipsoid.
Attributes:
points(numpy.ndarray): 2-dimensional matrix where each row
contains the points plotted in the graph.
outliers (1-D array, (fdata.n_samples,)): Contains 1 or 0 to denote
if a sample is an outlier or not, respecively.
colormap(matplotlib.pyplot.LinearSegmentedColormap, optional): Colormap
from which the colors of the plot are extracted. Defaults to
'seismic'.
color (float, optional): Tone of the colormap in which the nonoutlier
points are plotted. Defaults to 0.2.
outliercol (float, optional): Tone of the colormap in which the
outliers are plotted. Defaults to 0.8.
xlabel (string, optional): Label of the x-axis. Defaults to 'MO',
mean of the directional outlyingness.
ylabel (string, optional): Label of the y-axis. Defaults to 'VO',
variation of the directional outlyingness.
title (string, optional): Title of the plot. defaults to 'MS-Plot'.
Representation in a Jupyter notebook:
.. jupyter-execute::
from skfda.datasets import make_gaussian_process
from skfda.misc.covariances import Exponential
from skfda.exploratory.visualization import MagnitudeShapePlot
fd = make_gaussian_process(
n_samples=20, cov=Exponential(), random_state=1)
MagnitudeShapePlot(fd)
Example:
>>> import skfda
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [ 0., 2., 4., 6., 8., 10.]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> MagnitudeShapePlot(fd)
MagnitudeShapePlot(
fdata=FDataGrid(
array([[[ 1. ],
[ 1. ],
[ 2. ],
[ 3. ],
[ 2.5],
[ 2. ]],
[[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]],
[[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]],
[[-0.5],
[-0.5],
[-0.5],
[-1. ],
[-1. ],
[-1. ]]]),
grid_points=(array([ 0., 2., 4., 6., 8., 10.]),),
domain_range=((0.0, 10.0),),
...),
multivariate_depth=None,
pointwise_weights=None,
cutoff_factor=1,
points=array([[ 1.66666667, 0.12777778],
[ 0. , 0. ],
[-0.8 , 0.17666667],
[-1.74444444, 0.94395062]]),
outliers=array([False, False, False, False]),
colormap=seismic,
color=0.2,
outliercol=0.8,
xlabel='MO',
ylabel='VO',
title='')
References:
.. footbibliography::
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
ellipsoid: bool = True,
**kwargs: Any,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
if fdata.dim_codomain > 1:
raise NotImplementedError(
"Only support 1 dimension on the codomain.")
self.outlier_detector = MSPlotOutlierDetector(**kwargs)
y = self.outlier_detector.fit_predict(fdata)
outliers = (y == -1)
self.ellipsoid = ellipsoid
self._fdata = fdata
self._outliers = outliers
self._colormap = plt.cm.get_cmap('seismic')
self._color = 0.2
self._outliercol = 0.8
self.xlabel = 'MO'
self.ylabel = 'VO'
self.title = (
"" if self.fdata.dataset_name is None else self.fdata.dataset_name
)
@property
def fdata(self) -> FDataGrid:
return self._fdata
@property
def multivariate_depth(self) -> Depth[NDArrayFloat] | None:
return self.outlier_detector.multivariate_depth
@property
def pointwise_weights(self) -> NDArrayFloat | None:
return self.outlier_detector.pointwise_weights
@property
def cutoff_factor(self) -> float:
return self.outlier_detector.cutoff_factor
@property
def points(self) -> NDArrayFloat:
return self.outlier_detector.points_
@property
def outliers(self) -> NDArrayInt:
return self._outliers # type: ignore[no-any-return]
@property
def colormap(self) -> Colormap:
return self._colormap
@colormap.setter
def colormap(self, value: Colormap) -> None:
if not isinstance(value, matplotlib.colors.Colormap):
raise ValueError(
"colormap must be of type "
"matplotlib.colors.Colormap",
)
self._colormap = value
@property
def color(self) -> float:
return self._color
@color.setter
def color(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"color must be a number between 0 and 1.")
self._color = value
@property
def outliercol(self) -> float:
return self._outliercol
@outliercol.setter
def outliercol(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"outcol must be a number between 0 and 1.")
self._outliercol = value
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
colors = np.zeros((self.fdata.n_samples, 4))
colors[np.where(self.outliers == 1)] = self.colormap(self.outliercol)
colors[np.where(self.outliers == 0)] = self.colormap(self.color)
colors_rgba = [tuple(i) for i in colors]
if self.ellipsoid:
center = self.outlier_detector.cov_.location_
prec = self.outlier_detector.cov_.get_precision()
K = (
self.outlier_detector.cutoff_value_
/ self.outlier_detector.scaling_
)
eigvals, eigvecs = np.linalg.eigh(prec)
a, b = np.sqrt(K / eigvals)
if eigvecs[0, 1] * eigvecs[1, 0] > 0:
eigvecs[:, 0] *= -1
angle = np.rad2deg(np.arctan2(eigvecs[1, 0], eigvecs[0, 0]))
ellipse = Ellipse(
xy=center,
width=2 * a,
height=2 * b,
angle=angle,
facecolor='C0',
alpha=0.1,
)
axes[0].add_patch(ellipse)
for i, _ in enumerate(self.points[:, 0].ravel()):
self.artists[i, 0] = axes[0].scatter(
self.points[:, 0].ravel()[i],
self.points[:, 1].ravel()[i],
color=colors_rgba[i],
picker=True,
pickradius=2,
)
axes[0].set_xlabel(self.xlabel)
axes[0].set_ylabel(self.ylabel)
axes[0].set_title(self.title)
def __repr__(self) -> str:
"""Return repr(self)."""
return (
f"MagnitudeShapePlot("
f"\nfdata={repr(self.fdata)},"
f"\nmultivariate_depth={self.multivariate_depth},"
f"\npointwise_weights={repr(self.pointwise_weights)},"
f"\ncutoff_factor={repr(self.cutoff_factor)},"
f"\npoints={repr(self.points)},"
f"\noutliers={repr(self.outliers)},"
f"\ncolormap={self.colormap.name},"
f"\ncolor={repr(self.color)},"
f"\noutliercol={repr(self.outliercol)},"
f"\nxlabel={repr(self.xlabel)},"
f"\nylabel={repr(self.ylabel)},"
f"\ntitle={repr(self.title)})"
).replace('\n', '\n ') | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_magnitude_shape_plot.py | _magnitude_shape_plot.py | from __future__ import annotations
from typing import Any, Sequence
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.colors import Colormap
from matplotlib.figure import Figure
from matplotlib.patches import Ellipse
from ...representation import FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ..depth import Depth
from ..outliers import MSPlotOutlierDetector
from ._baseplot import BasePlot
class MagnitudeShapePlot(BasePlot):
r"""
Implementation of the magnitude-shape plot.
This plot, which is based on the calculation of the :func:`directional
outlyingness <fda.magnitude_shape_plot.directional_outlyingness>`
of each of the samples, serves as a visualization tool for the centrality
of curves. Furthermore, an outlier detection procedure is included.
The norm of the mean of the directional outlyingness (:math:`\lVert
\mathbf{MO}\rVert`) is plotted in the x-axis, and the variation of the
directional outlyingness (:math:`VO`) in the y-axis.
The outliers are detected using an instance of
:class:`MSPlotOutlierDetector`.
For more information see :footcite:ts:`dai+genton_2018_visualization`.
Args:
fdata: Object containing the data.
multivariate_depth:
Method used to order the data. Defaults to :class:`projection
depth <fda.depth_measures.multivariate.ProjectionDepth>`.
pointwise_weights: an array containing the
weights of each points of discretisation where values have
been recorded.
cutoff_factor: Factor that multiplies the cutoff value, in order to
consider more or less curves as outliers.
assume_centered: If True, the support of the
robust location and the covariance estimates is computed, and a
covariance estimate is recomputed from it, without centering
the data. Useful to work with data whose mean is significantly
equal to zero but is not exactly zero. If False, default value,
the robust location and covariance are directly computed with
the FastMCD algorithm without additional treatment.
support_fraction: The
proportion of points to be included in the support of the
raw MCD estimate.
Default is None, which implies that the minimum value of
support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state: If int,
random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number
generator; If None, the random number generator is the
RandomState instance used by np.random. By default, it is 0.
ellipsoid: Whether to draw the non outlying ellipsoid.
Attributes:
points(numpy.ndarray): 2-dimensional matrix where each row
contains the points plotted in the graph.
outliers (1-D array, (fdata.n_samples,)): Contains 1 or 0 to denote
if a sample is an outlier or not, respecively.
colormap(matplotlib.pyplot.LinearSegmentedColormap, optional): Colormap
from which the colors of the plot are extracted. Defaults to
'seismic'.
color (float, optional): Tone of the colormap in which the nonoutlier
points are plotted. Defaults to 0.2.
outliercol (float, optional): Tone of the colormap in which the
outliers are plotted. Defaults to 0.8.
xlabel (string, optional): Label of the x-axis. Defaults to 'MO',
mean of the directional outlyingness.
ylabel (string, optional): Label of the y-axis. Defaults to 'VO',
variation of the directional outlyingness.
title (string, optional): Title of the plot. defaults to 'MS-Plot'.
Representation in a Jupyter notebook:
.. jupyter-execute::
from skfda.datasets import make_gaussian_process
from skfda.misc.covariances import Exponential
from skfda.exploratory.visualization import MagnitudeShapePlot
fd = make_gaussian_process(
n_samples=20, cov=Exponential(), random_state=1)
MagnitudeShapePlot(fd)
Example:
>>> import skfda
>>> data_matrix = [[1, 1, 2, 3, 2.5, 2],
... [0.5, 0.5, 1, 2, 1.5, 1],
... [-1, -1, -0.5, 1, 1, 0.5],
... [-0.5, -0.5, -0.5, -1, -1, -1]]
>>> grid_points = [ 0., 2., 4., 6., 8., 10.]
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> MagnitudeShapePlot(fd)
MagnitudeShapePlot(
fdata=FDataGrid(
array([[[ 1. ],
[ 1. ],
[ 2. ],
[ 3. ],
[ 2.5],
[ 2. ]],
[[ 0.5],
[ 0.5],
[ 1. ],
[ 2. ],
[ 1.5],
[ 1. ]],
[[-1. ],
[-1. ],
[-0.5],
[ 1. ],
[ 1. ],
[ 0.5]],
[[-0.5],
[-0.5],
[-0.5],
[-1. ],
[-1. ],
[-1. ]]]),
grid_points=(array([ 0., 2., 4., 6., 8., 10.]),),
domain_range=((0.0, 10.0),),
...),
multivariate_depth=None,
pointwise_weights=None,
cutoff_factor=1,
points=array([[ 1.66666667, 0.12777778],
[ 0. , 0. ],
[-0.8 , 0.17666667],
[-1.74444444, 0.94395062]]),
outliers=array([False, False, False, False]),
colormap=seismic,
color=0.2,
outliercol=0.8,
xlabel='MO',
ylabel='VO',
title='')
References:
.. footbibliography::
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Sequence[Axes] | None = None,
ellipsoid: bool = True,
**kwargs: Any,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
if fdata.dim_codomain > 1:
raise NotImplementedError(
"Only support 1 dimension on the codomain.")
self.outlier_detector = MSPlotOutlierDetector(**kwargs)
y = self.outlier_detector.fit_predict(fdata)
outliers = (y == -1)
self.ellipsoid = ellipsoid
self._fdata = fdata
self._outliers = outliers
self._colormap = plt.cm.get_cmap('seismic')
self._color = 0.2
self._outliercol = 0.8
self.xlabel = 'MO'
self.ylabel = 'VO'
self.title = (
"" if self.fdata.dataset_name is None else self.fdata.dataset_name
)
@property
def fdata(self) -> FDataGrid:
return self._fdata
@property
def multivariate_depth(self) -> Depth[NDArrayFloat] | None:
return self.outlier_detector.multivariate_depth
@property
def pointwise_weights(self) -> NDArrayFloat | None:
return self.outlier_detector.pointwise_weights
@property
def cutoff_factor(self) -> float:
return self.outlier_detector.cutoff_factor
@property
def points(self) -> NDArrayFloat:
return self.outlier_detector.points_
@property
def outliers(self) -> NDArrayInt:
return self._outliers # type: ignore[no-any-return]
@property
def colormap(self) -> Colormap:
return self._colormap
@colormap.setter
def colormap(self, value: Colormap) -> None:
if not isinstance(value, matplotlib.colors.Colormap):
raise ValueError(
"colormap must be of type "
"matplotlib.colors.Colormap",
)
self._colormap = value
@property
def color(self) -> float:
return self._color
@color.setter
def color(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"color must be a number between 0 and 1.")
self._color = value
@property
def outliercol(self) -> float:
return self._outliercol
@outliercol.setter
def outliercol(self, value: float) -> None:
if value < 0 or value > 1:
raise ValueError(
"outcol must be a number between 0 and 1.")
self._outliercol = value
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
colors = np.zeros((self.fdata.n_samples, 4))
colors[np.where(self.outliers == 1)] = self.colormap(self.outliercol)
colors[np.where(self.outliers == 0)] = self.colormap(self.color)
colors_rgba = [tuple(i) for i in colors]
if self.ellipsoid:
center = self.outlier_detector.cov_.location_
prec = self.outlier_detector.cov_.get_precision()
K = (
self.outlier_detector.cutoff_value_
/ self.outlier_detector.scaling_
)
eigvals, eigvecs = np.linalg.eigh(prec)
a, b = np.sqrt(K / eigvals)
if eigvecs[0, 1] * eigvecs[1, 0] > 0:
eigvecs[:, 0] *= -1
angle = np.rad2deg(np.arctan2(eigvecs[1, 0], eigvecs[0, 0]))
ellipse = Ellipse(
xy=center,
width=2 * a,
height=2 * b,
angle=angle,
facecolor='C0',
alpha=0.1,
)
axes[0].add_patch(ellipse)
for i, _ in enumerate(self.points[:, 0].ravel()):
self.artists[i, 0] = axes[0].scatter(
self.points[:, 0].ravel()[i],
self.points[:, 1].ravel()[i],
color=colors_rgba[i],
picker=True,
pickradius=2,
)
axes[0].set_xlabel(self.xlabel)
axes[0].set_ylabel(self.ylabel)
axes[0].set_title(self.title)
def __repr__(self) -> str:
"""Return repr(self)."""
return (
f"MagnitudeShapePlot("
f"\nfdata={repr(self.fdata)},"
f"\nmultivariate_depth={self.multivariate_depth},"
f"\npointwise_weights={repr(self.pointwise_weights)},"
f"\ncutoff_factor={repr(self.cutoff_factor)},"
f"\npoints={repr(self.points)},"
f"\noutliers={repr(self.outliers)},"
f"\ncolormap={self.colormap.name},"
f"\ncolor={repr(self.color)},"
f"\noutliercol={repr(self.outliercol)},"
f"\nxlabel={repr(self.xlabel)},"
f"\nylabel={repr(self.ylabel)},"
f"\ntitle={repr(self.title)})"
).replace('\n', '\n ') | 0.959317 | 0.741545 |
from __future__ import annotations
from typing import Sequence, Tuple
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.collections import PatchCollection
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
from typing_extensions import Protocol
from ...misc.validation import check_fdata_same_dimensions
from ...representation import FData, FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ._baseplot import BasePlot
from ._utils import ColorLike, _darken, _set_labels
class ClusteringEstimator(Protocol):
@property
def n_clusters(self) -> int:
pass
@property
def cluster_centers_(self) -> FDataGrid:
pass
@property
def labels_(self) -> NDArrayInt:
pass
def fit(self, X: FDataGrid) -> ClusteringEstimator:
pass
def predict(self, X: FDataGrid) -> NDArrayInt:
pass
class FuzzyClusteringEstimator(ClusteringEstimator, Protocol):
def predict_proba(self, X: FDataGrid) -> NDArrayFloat:
pass
def _plot_clustering_checks(
estimator: ClusteringEstimator,
fdata: FData,
sample_colors: Sequence[ColorLike] | None,
sample_labels: Sequence[str] | None,
cluster_colors: Sequence[ColorLike] | None,
cluster_labels: Sequence[str] | None,
center_colors: Sequence[ColorLike] | None,
center_labels: Sequence[str] | None,
) -> None:
"""Check the arguments."""
if (
sample_colors is not None
and len(sample_colors) != fdata.n_samples
):
raise ValueError(
"sample_colors must contain a color for each sample.",
)
if (
sample_labels is not None
and len(sample_labels) != fdata.n_samples
):
raise ValueError(
"sample_labels must contain a label for each sample.",
)
if (
cluster_colors is not None
and len(cluster_colors) != estimator.n_clusters
):
raise ValueError(
"cluster_colors must contain a color for each cluster.",
)
if (
cluster_labels is not None
and len(cluster_labels) != estimator.n_clusters
):
raise ValueError(
"cluster_labels must contain a label for each cluster.",
)
if (
center_colors is not None
and len(center_colors) != estimator.n_clusters
):
raise ValueError(
"center_colors must contain a color for each center.",
)
if (
center_labels is not None
and len(center_labels) != estimator.n_clusters
):
raise ValueError(
"centers_labels must contain a label for each center.",
)
def _get_labels(
x_label: str | None,
y_label: str | None,
title: str | None,
xlabel_str: str,
) -> Tuple[str, str, str]:
"""
Get the axes labels.
Set the arguments *xlabel*, *ylabel*, *title* passed to the plot
functions :func:`plot_cluster_lines
<skfda.exploratory.visualization.clustering_plots.plot_cluster_lines>` and
:func:`plot_cluster_bars
<skfda.exploratory.visualization.clustering_plots.plot_cluster_bars>`,
in case they are not set yet.
Args:
x_label: Label for the x-axes.
y_label: Label for the y-axes.
title: Title for the figure where the clustering results are
ploted.
xlabel_str: In case xlabel is None, string to use for the labels
in the x-axes.
Returns:
xlabel: Labels for the x-axes.
ylabel: Labels for the y-axes.
title: Title for the figure where the clustering results are
plotted.
"""
if x_label is None:
x_label = xlabel_str
if y_label is None:
y_label = "Degree of membership"
if title is None:
title = "Degrees of membership of the samples to each cluster"
return x_label, y_label, title
class ClusterPlot(BasePlot):
"""
ClusterPlot class.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes: axis over where the graphs are plotted.
If None, see param fig.
n_rows: designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols: designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
sample_labels: contains in order the labels of each
sample of the fdatagrid.
cluster_colors: contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels: contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors: contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels: contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width: width of the centroid curves.
colormap: colormap from which the colors of the plot are
taken. Defaults to `rainbow`.
"""
def __init__(
self,
estimator: ClusteringEstimator,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
center_colors: Sequence[ColorLike] | None = None,
center_labels: Sequence[str] | None = None,
center_width: int = 3,
colormap: matplotlib.colors.Colormap = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = cluster_colors
self.cluster_labels = cluster_labels
self.center_colors = center_colors
self.center_labels = center_labels
self.center_width = center_width
self.colormap = colormap
@property
def n_subplots(self) -> int:
return self.fdata.dim_codomain
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot_clusters(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
"""Implement the plot of the FDataGrid samples by clusters."""
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=self.center_colors,
center_labels=self.center_labels,
)
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$' for i in range(self.fdata.n_samples)
]
if self.cluster_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$' for i in range(self.estimator.n_clusters)
]
if self.center_colors is None:
self.center_colors = [_darken(c, 0.5) for c in self.cluster_colors]
if self.center_labels is None:
self.center_labels = [
f'$CENTER: {i}$' for i in range(self.estimator.n_clusters)
]
colors_by_cluster = np.asarray(self.cluster_colors)[self.labels]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
artists = [
axes[j].plot(
self.fdata.grid_points[0],
self.fdata.data_matrix[i, :, j],
c=colors_by_cluster[i],
label=self.sample_labels[i],
)
for j in range(self.fdata.dim_codomain)
for i in range(self.fdata.n_samples)
]
self.artists = np.array(artists).reshape(
(self.n_subplots, self.n_samples),
).T
for j in range(self.fdata.dim_codomain):
for i in range(self.estimator.n_clusters):
axes[j].plot(
self.fdata.grid_points[0],
self.estimator.cluster_centers_.data_matrix[i, :, j],
c=self.center_colors[i],
label=self.center_labels[i],
linewidth=self.center_width,
)
axes[j].legend(handles=patches)
_set_labels(self.fdata, fig, axes)
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
self.labels = self.estimator.labels_
self._plot_clusters(fig=fig, axes=axes)
class ClusterMembershipLinesPlot(BasePlot):
"""
Class ClusterMembershipLinesPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FDataGrid,
*,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sample_colors: Sequence[ColorLike] | None = None,
sample_labels: Sequence[str] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.sample_colors = sample_colors
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=self.sample_colors,
sample_labels=self.sample_labels,
cluster_colors=None,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Cluster",
)
if self.sample_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
labels_by_cluster = self.estimator.labels_
self.sample_colors = self.cluster_colors[labels_by_cluster]
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$'
for i in range(self.fdata.n_samples)
]
if self.cluster_labels is None:
self.cluster_labels = [
f'${i}$'
for i in range(self.estimator.n_clusters)
]
axes[0].get_xaxis().set_major_locator(MaxNLocator(integer=True))
self.artists = np.array([
axes[0].plot(
np.arange(self.estimator.n_clusters),
membership[i],
label=self.sample_labels[i],
color=self.sample_colors[i],
)
for i in range(self.fdata.n_samples)
])
axes[0].set_xticks(np.arange(self.estimator.n_clusters))
axes[0].set_xticklabels(self.cluster_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
fig.suptitle(title)
class ClusterMembershipPlot(BasePlot):
"""
Class ClusterMembershipPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FData,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sort: int = -1,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = (
None
if cluster_colors is None
else list(cluster_colors)
)
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
self.sort = sort
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
self.artists = np.full(
(self.n_samples, self.n_subplots),
None,
dtype=Artist,
)
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
if self.sort < -1 or self.sort >= self.estimator.n_clusters:
raise ValueError(
"The sorting number must belong to "
"the interval [-1, n_clusters)",
)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Sample",
)
if self.sample_labels is None:
self.sample_labels = list(
np.arange(
self.fdata.n_samples,
).astype(np.str_),
)
if self.cluster_colors is None:
self.cluster_colors = list(
self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$'
for i in range(self.estimator.n_clusters)
]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
if self.sort == -1:
labels_dim = membership
else:
sample_indices = np.argsort(-membership[:, self.sort])
self.sample_labels = list(
np.array(self.sample_labels)[sample_indices],
)
labels_dim = np.copy(membership[sample_indices])
temp_labels = np.copy(labels_dim[:, 0])
labels_dim[:, 0] = labels_dim[:, self.sort]
labels_dim[:, self.sort] = temp_labels
# Swap
self.cluster_colors[0], self.cluster_colors[self.sort] = (
self.cluster_colors[self.sort],
self.cluster_colors[0],
)
conc = np.zeros((self.fdata.n_samples, 1))
labels_dim = np.concatenate((conc, labels_dim), axis=-1)
bars = [
axes[0].bar(
np.arange(self.fdata.n_samples),
labels_dim[:, i + 1],
bottom=np.sum(labels_dim[:, :(i + 1)], axis=1),
color=self.cluster_colors[i],
)
for i in range(self.estimator.n_clusters)
]
for b in bars:
b.remove()
b.figure = None
for i in range(self.n_samples):
collection = PatchCollection(
[
Rectangle(
bar.patches[i].get_xy(),
bar.patches[i].get_width(),
bar.patches[i].get_height(),
color=bar.patches[i].get_facecolor(),
) for bar in bars
],
match_original=True,
)
axes[0].add_collection(collection)
self.artists[i, 0] = collection
fig.canvas.draw()
axes[0].set_xticks(np.arange(self.fdata.n_samples))
axes[0].set_xticklabels(self.sample_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
axes[0].legend(handles=patches)
fig.suptitle(title) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/clustering.py | clustering.py |
from __future__ import annotations
from typing import Sequence, Tuple
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.collections import PatchCollection
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
from typing_extensions import Protocol
from ...misc.validation import check_fdata_same_dimensions
from ...representation import FData, FDataGrid
from ...typing._numpy import NDArrayFloat, NDArrayInt
from ._baseplot import BasePlot
from ._utils import ColorLike, _darken, _set_labels
class ClusteringEstimator(Protocol):
@property
def n_clusters(self) -> int:
pass
@property
def cluster_centers_(self) -> FDataGrid:
pass
@property
def labels_(self) -> NDArrayInt:
pass
def fit(self, X: FDataGrid) -> ClusteringEstimator:
pass
def predict(self, X: FDataGrid) -> NDArrayInt:
pass
class FuzzyClusteringEstimator(ClusteringEstimator, Protocol):
def predict_proba(self, X: FDataGrid) -> NDArrayFloat:
pass
def _plot_clustering_checks(
estimator: ClusteringEstimator,
fdata: FData,
sample_colors: Sequence[ColorLike] | None,
sample_labels: Sequence[str] | None,
cluster_colors: Sequence[ColorLike] | None,
cluster_labels: Sequence[str] | None,
center_colors: Sequence[ColorLike] | None,
center_labels: Sequence[str] | None,
) -> None:
"""Check the arguments."""
if (
sample_colors is not None
and len(sample_colors) != fdata.n_samples
):
raise ValueError(
"sample_colors must contain a color for each sample.",
)
if (
sample_labels is not None
and len(sample_labels) != fdata.n_samples
):
raise ValueError(
"sample_labels must contain a label for each sample.",
)
if (
cluster_colors is not None
and len(cluster_colors) != estimator.n_clusters
):
raise ValueError(
"cluster_colors must contain a color for each cluster.",
)
if (
cluster_labels is not None
and len(cluster_labels) != estimator.n_clusters
):
raise ValueError(
"cluster_labels must contain a label for each cluster.",
)
if (
center_colors is not None
and len(center_colors) != estimator.n_clusters
):
raise ValueError(
"center_colors must contain a color for each center.",
)
if (
center_labels is not None
and len(center_labels) != estimator.n_clusters
):
raise ValueError(
"centers_labels must contain a label for each center.",
)
def _get_labels(
x_label: str | None,
y_label: str | None,
title: str | None,
xlabel_str: str,
) -> Tuple[str, str, str]:
"""
Get the axes labels.
Set the arguments *xlabel*, *ylabel*, *title* passed to the plot
functions :func:`plot_cluster_lines
<skfda.exploratory.visualization.clustering_plots.plot_cluster_lines>` and
:func:`plot_cluster_bars
<skfda.exploratory.visualization.clustering_plots.plot_cluster_bars>`,
in case they are not set yet.
Args:
x_label: Label for the x-axes.
y_label: Label for the y-axes.
title: Title for the figure where the clustering results are
ploted.
xlabel_str: In case xlabel is None, string to use for the labels
in the x-axes.
Returns:
xlabel: Labels for the x-axes.
ylabel: Labels for the y-axes.
title: Title for the figure where the clustering results are
plotted.
"""
if x_label is None:
x_label = xlabel_str
if y_label is None:
y_label = "Degree of membership"
if title is None:
title = "Degrees of membership of the samples to each cluster"
return x_label, y_label, title
class ClusterPlot(BasePlot):
"""
ClusterPlot class.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graphs are plotted in
case ax is not specified. If None and ax is also None, the figure
is initialized.
axes: axis over where the graphs are plotted.
If None, see param fig.
n_rows: designates the number of rows of the figure to plot the
different dimensions of the image. Only specified if fig and
ax are None.
n_cols: designates the number of columns of the figure to plot
the different dimensions of the image. Only specified if fig
and ax are None.
sample_labels: contains in order the labels of each
sample of the fdatagrid.
cluster_colors: contains in order the colors of each
cluster the samples of the fdatagrid are classified into.
cluster_labels: contains in order the names of each
cluster the samples of the fdatagrid are classified into.
center_colors: contains in order the colors of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_labels: contains in order the labels of each
centroid of the clusters the samples of the fdatagrid are
classified into.
center_width: width of the centroid curves.
colormap: colormap from which the colors of the plot are
taken. Defaults to `rainbow`.
"""
def __init__(
self,
estimator: ClusteringEstimator,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
n_rows: int | None = None,
n_cols: int | None = None,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
center_colors: Sequence[ColorLike] | None = None,
center_labels: Sequence[str] | None = None,
center_width: int = 3,
colormap: matplotlib.colors.Colormap = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
n_rows=n_rows,
n_cols=n_cols,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = cluster_colors
self.cluster_labels = cluster_labels
self.center_colors = center_colors
self.center_labels = center_labels
self.center_width = center_width
self.colormap = colormap
@property
def n_subplots(self) -> int:
return self.fdata.dim_codomain
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot_clusters(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
"""Implement the plot of the FDataGrid samples by clusters."""
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=self.center_colors,
center_labels=self.center_labels,
)
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$' for i in range(self.fdata.n_samples)
]
if self.cluster_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$' for i in range(self.estimator.n_clusters)
]
if self.center_colors is None:
self.center_colors = [_darken(c, 0.5) for c in self.cluster_colors]
if self.center_labels is None:
self.center_labels = [
f'$CENTER: {i}$' for i in range(self.estimator.n_clusters)
]
colors_by_cluster = np.asarray(self.cluster_colors)[self.labels]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
artists = [
axes[j].plot(
self.fdata.grid_points[0],
self.fdata.data_matrix[i, :, j],
c=colors_by_cluster[i],
label=self.sample_labels[i],
)
for j in range(self.fdata.dim_codomain)
for i in range(self.fdata.n_samples)
]
self.artists = np.array(artists).reshape(
(self.n_subplots, self.n_samples),
).T
for j in range(self.fdata.dim_codomain):
for i in range(self.estimator.n_clusters):
axes[j].plot(
self.fdata.grid_points[0],
self.estimator.cluster_centers_.data_matrix[i, :, j],
c=self.center_colors[i],
label=self.center_labels[i],
linewidth=self.center_width,
)
axes[j].legend(handles=patches)
_set_labels(self.fdata, fig, axes)
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
self.labels = self.estimator.labels_
self._plot_clusters(fig=fig, axes=axes)
class ClusterMembershipLinesPlot(BasePlot):
"""
Class ClusterMembershipLinesPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FDataGrid,
*,
chart: Figure | Axes | None = None,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sample_colors: Sequence[ColorLike] | None = None,
sample_labels: Sequence[str] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.sample_colors = sample_colors
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=self.sample_colors,
sample_labels=self.sample_labels,
cluster_colors=None,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Cluster",
)
if self.sample_colors is None:
self.cluster_colors = self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
)
labels_by_cluster = self.estimator.labels_
self.sample_colors = self.cluster_colors[labels_by_cluster]
if self.sample_labels is None:
self.sample_labels = [
f'$SAMPLE: {i}$'
for i in range(self.fdata.n_samples)
]
if self.cluster_labels is None:
self.cluster_labels = [
f'${i}$'
for i in range(self.estimator.n_clusters)
]
axes[0].get_xaxis().set_major_locator(MaxNLocator(integer=True))
self.artists = np.array([
axes[0].plot(
np.arange(self.estimator.n_clusters),
membership[i],
label=self.sample_labels[i],
color=self.sample_colors[i],
)
for i in range(self.fdata.n_samples)
])
axes[0].set_xticks(np.arange(self.estimator.n_clusters))
axes[0].set_xticklabels(self.cluster_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
fig.suptitle(title)
class ClusterMembershipPlot(BasePlot):
"""
Class ClusterMembershipPlot.
Args:
estimator: estimator used to calculate the
clusters.
X: contains the samples which are grouped
into different clusters.
fig: figure over which the graph is
plotted in case ax is not specified. If None and ax is also None,
the figure is initialized.
axes: axis over where the graph is plotted.
If None, see param fig.
sample_colors: contains in order the colors
of each sample of the fdatagrid.
sample_labels: contains in order the labels
of each sample of the fdatagrid.
cluster_labels: contains in order the names of
each cluster the samples of the fdatagrid are classified into.
colormap: colormap from which the colors of the
plot are taken.
x_label: Label for the x-axis. Defaults to "Cluster".
y_label: Label for the y-axis. Defaults to
"Degree of membership".
title: Title for the figure where the clustering
results are ploted.
Defaults to "Degrees of membership of the samples to each cluster".
"""
def __init__(
self,
estimator: FuzzyClusteringEstimator,
fdata: FData,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | Sequence[Axes] | None = None,
sort: int = -1,
sample_labels: Sequence[str] | None = None,
cluster_colors: Sequence[ColorLike] | None = None,
cluster_labels: Sequence[str] | None = None,
colormap: matplotlib.colors.Colormap = None,
x_label: str | None = None,
y_label: str | None = None,
title: str | None = None,
) -> None:
if colormap is None:
colormap = plt.cm.get_cmap('rainbow')
super().__init__(
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.estimator = estimator
self.sample_labels = sample_labels
self.cluster_colors = (
None
if cluster_colors is None
else list(cluster_colors)
)
self.cluster_labels = cluster_labels
self.x_label = x_label
self.y_label = y_label
self.title = title
self.colormap = colormap
self.sort = sort
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Sequence[Axes],
) -> None:
self.artists = np.full(
(self.n_samples, self.n_subplots),
None,
dtype=Artist,
)
try:
check_is_fitted(self.estimator)
check_fdata_same_dimensions(
self.estimator.cluster_centers_,
self.fdata,
)
except NotFittedError:
self.estimator.fit(self.fdata)
membership = self.estimator.predict_proba(self.fdata)
if self.sort < -1 or self.sort >= self.estimator.n_clusters:
raise ValueError(
"The sorting number must belong to "
"the interval [-1, n_clusters)",
)
_plot_clustering_checks(
estimator=self.estimator,
fdata=self.fdata,
sample_colors=None,
sample_labels=self.sample_labels,
cluster_colors=self.cluster_colors,
cluster_labels=self.cluster_labels,
center_colors=None,
center_labels=None,
)
x_label, y_label, title = _get_labels(
self.x_label,
self.y_label,
self.title,
"Sample",
)
if self.sample_labels is None:
self.sample_labels = list(
np.arange(
self.fdata.n_samples,
).astype(np.str_),
)
if self.cluster_colors is None:
self.cluster_colors = list(
self.colormap(
np.arange(self.estimator.n_clusters)
/ (self.estimator.n_clusters - 1),
),
)
if self.cluster_labels is None:
self.cluster_labels = [
f'$CLUSTER: {i}$'
for i in range(self.estimator.n_clusters)
]
patches = [
mpatches.Patch(
color=self.cluster_colors[i],
label=self.cluster_labels[i],
)
for i in range(self.estimator.n_clusters)
]
if self.sort == -1:
labels_dim = membership
else:
sample_indices = np.argsort(-membership[:, self.sort])
self.sample_labels = list(
np.array(self.sample_labels)[sample_indices],
)
labels_dim = np.copy(membership[sample_indices])
temp_labels = np.copy(labels_dim[:, 0])
labels_dim[:, 0] = labels_dim[:, self.sort]
labels_dim[:, self.sort] = temp_labels
# Swap
self.cluster_colors[0], self.cluster_colors[self.sort] = (
self.cluster_colors[self.sort],
self.cluster_colors[0],
)
conc = np.zeros((self.fdata.n_samples, 1))
labels_dim = np.concatenate((conc, labels_dim), axis=-1)
bars = [
axes[0].bar(
np.arange(self.fdata.n_samples),
labels_dim[:, i + 1],
bottom=np.sum(labels_dim[:, :(i + 1)], axis=1),
color=self.cluster_colors[i],
)
for i in range(self.estimator.n_clusters)
]
for b in bars:
b.remove()
b.figure = None
for i in range(self.n_samples):
collection = PatchCollection(
[
Rectangle(
bar.patches[i].get_xy(),
bar.patches[i].get_width(),
bar.patches[i].get_height(),
color=bar.patches[i].get_facecolor(),
) for bar in bars
],
match_original=True,
)
axes[0].add_collection(collection)
self.artists[i, 0] = collection
fig.canvas.draw()
axes[0].set_xticks(np.arange(self.fdata.n_samples))
axes[0].set_xticklabels(self.sample_labels)
axes[0].set_xlabel(x_label)
axes[0].set_ylabel(y_label)
axes[0].legend(handles=patches)
fig.suptitle(title) | 0.967302 | 0.519765 |
from __future__ import annotations
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from ...representation import FDataGrid
from ..outliers import OutliergramOutlierDetector
from ._baseplot import BasePlot
class Outliergram(BasePlot):
"""
Outliergram method of visualization.
Plots the :class:`Modified Band Depth
(MBD)<skfda.exploratory.depth.ModifiedBandDepth>` on the Y axis and the
:func:`Modified Epigraph Index
(MEI)<skfda.exploratory.stats.modified_epigraph_index>` on the X axis.
These points will create the form of a parabola.
The shape outliers will be the points that appear far from this curve.
Args:
fdata: functional data set that we want to examine.
chart: figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: axis where the graphs are plotted. If None, see param fig.
n_rows: designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols: designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
Attributes:
mbd: result of the calculation of the Modified Band Depth on our
dataset. Represents the mean time a curve stays between other pair
of curves, being a good measure of centrality.
mei: result of the calculation of the Modified Epigraph Index on our
dataset. Represents the mean time a curve stays below other curve.
References:
López-Pintado S., Romo J.. (2011). A half-region depth for functional
data, Computational Statistics & Data Analysis, volume 55
(page 1679-1695).
Arribas-Gil A., Romo J.. Shape outlier detection and visualization for
functional data: the outliergram
https://academic.oup.com/biostatistics/article/15/4/603/266279
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
factor: float = 1.5,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.factor = factor
self.outlier_detector = OutliergramOutlierDetector(factor=factor)
self.outlier_detector.fit(fdata)
indices = np.argsort(self.outlier_detector.mei_)
self._parabola_ordered = self.outlier_detector.parabola_[indices]
self._mei_ordered = self.outlier_detector.mei_[indices]
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
for i, (mei, mbd) in enumerate(
zip(self.outlier_detector.mei_, self.outlier_detector.mbd_),
):
self.artists[i, 0] = axes[0].scatter(
mei,
mbd,
picker=2,
)
axes[0].plot(
self._mei_ordered,
self._parabola_ordered,
)
shifted_parabola = (
self._parabola_ordered
- self.outlier_detector.max_inlier_distance_
)
axes[0].plot(
self._mei_ordered,
shifted_parabola,
linestyle='dashed',
)
# Set labels of graph
if self.fdata.dataset_name is not None:
axes[0].set_title(self.fdata.dataset_name)
axes[0].set_xlabel("MEI")
axes[0].set_ylabel("MBD")
axes[0].set_xlim([0, 1])
axes[0].set_ylim([
0, # Minimum MBD
1, # Maximum MBD
]) | scikit-fda-sim | /scikit-fda-sim-0.7.1.tar.gz/scikit-fda-sim-0.7.1/skfda/exploratory/visualization/_outliergram.py | _outliergram.py | from __future__ import annotations
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from ...representation import FDataGrid
from ..outliers import OutliergramOutlierDetector
from ._baseplot import BasePlot
class Outliergram(BasePlot):
"""
Outliergram method of visualization.
Plots the :class:`Modified Band Depth
(MBD)<skfda.exploratory.depth.ModifiedBandDepth>` on the Y axis and the
:func:`Modified Epigraph Index
(MEI)<skfda.exploratory.stats.modified_epigraph_index>` on the X axis.
These points will create the form of a parabola.
The shape outliers will be the points that appear far from this curve.
Args:
fdata: functional data set that we want to examine.
chart: figure over with the graphs are plotted or axis over
where the graphs are plotted. If None and ax is also
None, the figure is initialized.
fig: figure over with the graphs are plotted in case ax is not
specified. If None and ax is also None, the figure is
initialized.
axes: axis where the graphs are plotted. If None, see param fig.
n_rows: designates the number of rows of the figure
to plot the different dimensions of the image. Only specified
if fig and ax are None.
n_cols: designates the number of columns of the
figure to plot the different dimensions of the image. Only
specified if fig and ax are None.
Attributes:
mbd: result of the calculation of the Modified Band Depth on our
dataset. Represents the mean time a curve stays between other pair
of curves, being a good measure of centrality.
mei: result of the calculation of the Modified Epigraph Index on our
dataset. Represents the mean time a curve stays below other curve.
References:
López-Pintado S., Romo J.. (2011). A half-region depth for functional
data, Computational Statistics & Data Analysis, volume 55
(page 1679-1695).
Arribas-Gil A., Romo J.. Shape outlier detection and visualization for
functional data: the outliergram
https://academic.oup.com/biostatistics/article/15/4/603/266279
"""
def __init__(
self,
fdata: FDataGrid,
chart: Figure | Axes | None = None,
*,
fig: Figure | None = None,
axes: Axes | None = None,
factor: float = 1.5,
) -> None:
BasePlot.__init__(
self,
chart,
fig=fig,
axes=axes,
)
self.fdata = fdata
self.factor = factor
self.outlier_detector = OutliergramOutlierDetector(factor=factor)
self.outlier_detector.fit(fdata)
indices = np.argsort(self.outlier_detector.mei_)
self._parabola_ordered = self.outlier_detector.parabola_[indices]
self._mei_ordered = self.outlier_detector.mei_[indices]
@property
def n_samples(self) -> int:
return self.fdata.n_samples
def _plot(
self,
fig: Figure,
axes: Axes,
) -> None:
self.artists = np.zeros(
(self.n_samples, 1),
dtype=Artist,
)
for i, (mei, mbd) in enumerate(
zip(self.outlier_detector.mei_, self.outlier_detector.mbd_),
):
self.artists[i, 0] = axes[0].scatter(
mei,
mbd,
picker=2,
)
axes[0].plot(
self._mei_ordered,
self._parabola_ordered,
)
shifted_parabola = (
self._parabola_ordered
- self.outlier_detector.max_inlier_distance_
)
axes[0].plot(
self._mei_ordered,
shifted_parabola,
linestyle='dashed',
)
# Set labels of graph
if self.fdata.dataset_name is not None:
axes[0].set_title(self.fdata.dataset_name)
axes[0].set_xlabel("MEI")
axes[0].set_ylabel("MBD")
axes[0].set_xlim([0, 1])
axes[0].set_ylim([
0, # Minimum MBD
1, # Maximum MBD
]) | 0.952364 | 0.799403 |