code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
SAMPLING_WKO = pd.TimedeltaIndex(
['00:00:01', '00:00:05', '00:00:30', '00:01:00', '00:03:00',
'00:03:30', '00:04:00', '00:04:30', '00:05:00', '00:05:30',
'00:06:00', '00:06:30', '00:07:00', '00:10:00', '00:20:00',
'00:30:00', '00:45:00', '01:00:00', '02:00:00', '03:00:00',
'04:00:00'])
def std_dev_squared_error(y_true, y_pred):
"""Compute the standard deviation of the squared error.
Parameters
----------
y_true : ndarray, shape (n_samples,)
Ground truth (correct) target values.
y_pred : ndarray, shape (n_samples,)
Estimated target values.
Returns
-------
std_dev : float
Standard deviation of the squared error.
"""
return np.sqrt(np.sum((y_true - y_pred) ** 2 / (y_true.size - 2)))
def aerobic_meta_model(record_power_profile, time_samples=None):
"""Compute the aerobic metabolism model from the record power-profile.
Read more in the :ref:`User Guide <mpa_estimate>`.
Parameters
----------
record_power_profile : Series
The record power profile from which to extract the aerobic model.
time_samples : TimedeltaIndex or None, optional
The time samples of the record power-profile to take into account. If
None, the sampling of the method of Pinot et al. is applied, which is
equivalent to the sampling from WKO+.
Returns
-------
mpa : float
Maximum Aerobic Power.
t_mpa : Timedelta
Time of the Maximum Aerobic Power.
aei : float
Aerobic Endurance Index.
fit_info_mpa_fitting : dict
This is a dictionary with the information collected about the fitting
related to the MAP. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
fit_info_aei_fitting : dict
This is a dictionary with the information collected about the fitting
related to the AEI. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
Notes
-----
The method implemented here follow the work presented in [1]_.
References
----------
.. [1] Pinot et al., "Determination of Maximal Aerobic Power
on the Field in Cycling", Jounal of Science and Cycling, vol. 3(1),
pp. 26-31, 2014.
"""
if time_samples is None:
time_samples = SAMPLING_WKO.copy()
# keep only the time samples available in the record power-profile
mask_time_samples = time_samples < record_power_profile.index.max()
time_samples = time_samples[mask_time_samples]
# to avoid losing data, we will first interpolate the time samples
# using all the data available in the record power-profile before
# to select only the samples required.
ts_union = record_power_profile.index.union(time_samples)
record_power_profile = (record_power_profile.reindex(ts_union)
.interpolate('linear')
.reindex(time_samples))
# only samples between 10 minutes and 4 hours are considered for the
# regression
mask_samples_map = np.bitwise_and(time_samples >= '00:10:00',
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols = LinearRegression()
ols.fit(extracted_time, extracted_profile)
std_fit = std_dev_squared_error(extracted_profile,
ols.predict(extracted_time))
fit_info_mpa_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
# mpa will be find between 3 minutes and 7 minutes
mask_samples_map = np.bitwise_and(time_samples >= '00:03:00',
time_samples <= '00:10:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
aerobic_model = ols.predict(extracted_time)
# find the first value in the 2 * std confidence interval
samples_within = np.abs(extracted_profile - aerobic_model) < 2 * std_fit
if np.count_nonzero(samples_within):
index_mpa = np.flatnonzero(samples_within)[0]
time_mpa = record_power_profile.loc[mask_samples_map].index[index_mpa]
mpa = record_power_profile.loc[mask_samples_map].iloc[index_mpa]
else:
raise ValueError('There is no value entering in the confidence'
' level between 3 and 7 minutes.')
# find aerobic endurance index
mask_samples_aei = np.bitwise_and(time_samples >= time_mpa,
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_aei].values
extracted_profile = extracted_profile / mpa * 100
extracted_time = record_power_profile.loc[mask_samples_aei].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols.fit(extracted_time, extracted_profile)
fit_info_aei_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
return (mpa, time_mpa, ols.coef_[0],
fit_info_mpa_fitting, fit_info_aei_fitting) | scikit-cycling | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/metrics/power_profile.py | power_profile.py |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
SAMPLING_WKO = pd.TimedeltaIndex(
['00:00:01', '00:00:05', '00:00:30', '00:01:00', '00:03:00',
'00:03:30', '00:04:00', '00:04:30', '00:05:00', '00:05:30',
'00:06:00', '00:06:30', '00:07:00', '00:10:00', '00:20:00',
'00:30:00', '00:45:00', '01:00:00', '02:00:00', '03:00:00',
'04:00:00'])
def std_dev_squared_error(y_true, y_pred):
"""Compute the standard deviation of the squared error.
Parameters
----------
y_true : ndarray, shape (n_samples,)
Ground truth (correct) target values.
y_pred : ndarray, shape (n_samples,)
Estimated target values.
Returns
-------
std_dev : float
Standard deviation of the squared error.
"""
return np.sqrt(np.sum((y_true - y_pred) ** 2 / (y_true.size - 2)))
def aerobic_meta_model(record_power_profile, time_samples=None):
"""Compute the aerobic metabolism model from the record power-profile.
Read more in the :ref:`User Guide <mpa_estimate>`.
Parameters
----------
record_power_profile : Series
The record power profile from which to extract the aerobic model.
time_samples : TimedeltaIndex or None, optional
The time samples of the record power-profile to take into account. If
None, the sampling of the method of Pinot et al. is applied, which is
equivalent to the sampling from WKO+.
Returns
-------
mpa : float
Maximum Aerobic Power.
t_mpa : Timedelta
Time of the Maximum Aerobic Power.
aei : float
Aerobic Endurance Index.
fit_info_mpa_fitting : dict
This is a dictionary with the information collected about the fitting
related to the MAP. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
fit_info_aei_fitting : dict
This is a dictionary with the information collected about the fitting
related to the AEI. The attributes will be the following:
- `slope`: slope of the linear fitting,
- `intercept`: intercept of the linear fitting,
- `std_err`: standard error of the fitting,
- `coeff_det`: coefficient of determination.
Notes
-----
The method implemented here follow the work presented in [1]_.
References
----------
.. [1] Pinot et al., "Determination of Maximal Aerobic Power
on the Field in Cycling", Jounal of Science and Cycling, vol. 3(1),
pp. 26-31, 2014.
"""
if time_samples is None:
time_samples = SAMPLING_WKO.copy()
# keep only the time samples available in the record power-profile
mask_time_samples = time_samples < record_power_profile.index.max()
time_samples = time_samples[mask_time_samples]
# to avoid losing data, we will first interpolate the time samples
# using all the data available in the record power-profile before
# to select only the samples required.
ts_union = record_power_profile.index.union(time_samples)
record_power_profile = (record_power_profile.reindex(ts_union)
.interpolate('linear')
.reindex(time_samples))
# only samples between 10 minutes and 4 hours are considered for the
# regression
mask_samples_map = np.bitwise_and(time_samples >= '00:10:00',
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols = LinearRegression()
ols.fit(extracted_time, extracted_profile)
std_fit = std_dev_squared_error(extracted_profile,
ols.predict(extracted_time))
fit_info_mpa_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
# mpa will be find between 3 minutes and 7 minutes
mask_samples_map = np.bitwise_and(time_samples >= '00:03:00',
time_samples <= '00:10:00')
extracted_profile = record_power_profile.loc[mask_samples_map].values
extracted_time = record_power_profile.loc[mask_samples_map].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
aerobic_model = ols.predict(extracted_time)
# find the first value in the 2 * std confidence interval
samples_within = np.abs(extracted_profile - aerobic_model) < 2 * std_fit
if np.count_nonzero(samples_within):
index_mpa = np.flatnonzero(samples_within)[0]
time_mpa = record_power_profile.loc[mask_samples_map].index[index_mpa]
mpa = record_power_profile.loc[mask_samples_map].iloc[index_mpa]
else:
raise ValueError('There is no value entering in the confidence'
' level between 3 and 7 minutes.')
# find aerobic endurance index
mask_samples_aei = np.bitwise_and(time_samples >= time_mpa,
time_samples <= '04:00:00')
extracted_profile = record_power_profile.loc[mask_samples_aei].values
extracted_profile = extracted_profile / mpa * 100
extracted_time = record_power_profile.loc[mask_samples_aei].index.values
extracted_time = np.log(extracted_time /
np.timedelta64(1, 's')).reshape(-1, 1)
ols.fit(extracted_time, extracted_profile)
fit_info_aei_fitting = {
'slope': ols.coef_[0],
'intercept': ols.intercept_,
'std_err': std_fit,
'coeff_det': ols.score(extracted_time, extracted_profile)}
return (mpa, time_mpa, ols.coef_[0],
fit_info_mpa_fitting, fit_info_aei_fitting) | 0.93638 | 0.803791 |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy import constants
from ..extraction import gradient_elevation
from ..extraction import acceleration
def strava_power_model(activity, cyclist_weight, bike_weight=6.8,
coef_roll_res=0.0045, pressure=101325.0,
temperature=15.0, coef_drag=1, surface_rider=0.32,
use_acceleration=False):
"""Strava model used to estimate power.
It corresponds the mathematical formulation which add all forces applied to
a cyclist in movement.
Read more in the :ref:`User Guide <strava>`.
Parameters
----------
activity : DataFrame
The activity containing the ride information.
cyclist_weight : float
The cyclist weight in kg.
bike_weight : float, default=6.8
The bike weight in kg.
coef_roll_res : float, default=0.0045
Rolling resistance coefficient.
pressure : float, default=101325.0
Pressure in Pascal.
temperature : float, default=15.0
Temperature in Celsius.
coef_drag : float, default=1
The drag coefficient also known as Cx.
surface_rider : float, default=0.32
Surface area of the rider facing wind also known as S. The unit is m^2.
use_acceleration : bool, default=False
Either to add the power required to accelerate. This estimation can
become unstable if the acceleration varies for reason which are not
linked to power changes (i.e., braking, bends, etc.)
Returns
-------
power : Series
The power estimated.
References
----------
.. [1] How Strava Calculates Power
https://support.strava.com/hc/en-us/articles/216917107-How-Strava-Calculates-Power
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.model import strava_power_model
>>> ride = bikeread(load_fit()[0])
>>> power = strava_power_model(ride, cyclist_weight=72)
>>> print(power['2014-05-07 12:26:28':
... '2014-05-07 12:26:38']) # Show 10 sec of estimated power
2014-05-07 12:26:28 196.567898
2014-05-07 12:26:29 198.638094
2014-05-07 12:26:30 191.444894
2014-05-07 12:26:31 26.365864
2014-05-07 12:26:32 89.826104
2014-05-07 12:26:33 150.842325
2014-05-07 12:26:34 210.083958
2014-05-07 12:26:35 331.573965
2014-05-07 12:26:36 425.013711
2014-05-07 12:26:37 428.806914
2014-05-07 12:26:38 425.410451
Freq: S, dtype: float64
"""
if 'gradient-elevation' not in activity.columns:
activity = gradient_elevation(activity)
if use_acceleration and 'acceleration' not in activity.columns:
activity = acceleration(activity)
temperature_kelvin = constants.convert_temperature(
temperature, 'Celsius', 'Kelvin')
total_weight = cyclist_weight + bike_weight # kg
speed = activity['speed'] # m.s^-1
power_roll_res = coef_roll_res * constants.g * total_weight * speed
# air density at 0 degree Celsius and a standard atmosphere
molar_mass_dry_air = 28.97 / 1000 # kg.mol^-1
standard_atmosphere = constants.physical_constants[
'standard atmosphere'][0] # Pa
zero_celsius_kelvin = constants.convert_temperature(
0, 'Celsius', 'Kelvin') # 273.15 K
air_density_ref = (
(standard_atmosphere * molar_mass_dry_air) /
(constants.gas_constant * zero_celsius_kelvin)) # kg.m^-3
air_density = air_density_ref * (
(pressure * zero_celsius_kelvin) /
(standard_atmosphere * temperature_kelvin)) # kg.m^-3
power_wind = 0.5 * air_density * surface_rider * coef_drag * speed**3
slope = activity['gradient-elevation'] # grade
power_gravity = (total_weight * constants.g *
np.sin(np.arctan(slope)) * speed)
power_total = power_roll_res + power_wind + power_gravity
if use_acceleration:
acc = activity['acceleration'] # m.s^-1
power_acceleration = total_weight * acc * speed
power_total = power_total + power_acceleration
return power_total.clip(0) | scikit-cycling | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/skcycling/model/power.py | power.py |
# Authors: Guillaume Lemaitre <[email protected]>
# Cedric Lemaitre
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy import constants
from ..extraction import gradient_elevation
from ..extraction import acceleration
def strava_power_model(activity, cyclist_weight, bike_weight=6.8,
coef_roll_res=0.0045, pressure=101325.0,
temperature=15.0, coef_drag=1, surface_rider=0.32,
use_acceleration=False):
"""Strava model used to estimate power.
It corresponds the mathematical formulation which add all forces applied to
a cyclist in movement.
Read more in the :ref:`User Guide <strava>`.
Parameters
----------
activity : DataFrame
The activity containing the ride information.
cyclist_weight : float
The cyclist weight in kg.
bike_weight : float, default=6.8
The bike weight in kg.
coef_roll_res : float, default=0.0045
Rolling resistance coefficient.
pressure : float, default=101325.0
Pressure in Pascal.
temperature : float, default=15.0
Temperature in Celsius.
coef_drag : float, default=1
The drag coefficient also known as Cx.
surface_rider : float, default=0.32
Surface area of the rider facing wind also known as S. The unit is m^2.
use_acceleration : bool, default=False
Either to add the power required to accelerate. This estimation can
become unstable if the acceleration varies for reason which are not
linked to power changes (i.e., braking, bends, etc.)
Returns
-------
power : Series
The power estimated.
References
----------
.. [1] How Strava Calculates Power
https://support.strava.com/hc/en-us/articles/216917107-How-Strava-Calculates-Power
Examples
--------
>>> from skcycling.datasets import load_fit
>>> from skcycling.io import bikeread
>>> from skcycling.model import strava_power_model
>>> ride = bikeread(load_fit()[0])
>>> power = strava_power_model(ride, cyclist_weight=72)
>>> print(power['2014-05-07 12:26:28':
... '2014-05-07 12:26:38']) # Show 10 sec of estimated power
2014-05-07 12:26:28 196.567898
2014-05-07 12:26:29 198.638094
2014-05-07 12:26:30 191.444894
2014-05-07 12:26:31 26.365864
2014-05-07 12:26:32 89.826104
2014-05-07 12:26:33 150.842325
2014-05-07 12:26:34 210.083958
2014-05-07 12:26:35 331.573965
2014-05-07 12:26:36 425.013711
2014-05-07 12:26:37 428.806914
2014-05-07 12:26:38 425.410451
Freq: S, dtype: float64
"""
if 'gradient-elevation' not in activity.columns:
activity = gradient_elevation(activity)
if use_acceleration and 'acceleration' not in activity.columns:
activity = acceleration(activity)
temperature_kelvin = constants.convert_temperature(
temperature, 'Celsius', 'Kelvin')
total_weight = cyclist_weight + bike_weight # kg
speed = activity['speed'] # m.s^-1
power_roll_res = coef_roll_res * constants.g * total_weight * speed
# air density at 0 degree Celsius and a standard atmosphere
molar_mass_dry_air = 28.97 / 1000 # kg.mol^-1
standard_atmosphere = constants.physical_constants[
'standard atmosphere'][0] # Pa
zero_celsius_kelvin = constants.convert_temperature(
0, 'Celsius', 'Kelvin') # 273.15 K
air_density_ref = (
(standard_atmosphere * molar_mass_dry_air) /
(constants.gas_constant * zero_celsius_kelvin)) # kg.m^-3
air_density = air_density_ref * (
(pressure * zero_celsius_kelvin) /
(standard_atmosphere * temperature_kelvin)) # kg.m^-3
power_wind = 0.5 * air_density * surface_rider * coef_drag * speed**3
slope = activity['gradient-elevation'] # grade
power_gravity = (total_weight * constants.g *
np.sin(np.arctan(slope)) * speed)
power_total = power_roll_res + power_wind + power_gravity
if use_acceleration:
acc = activity['acceleration'] # m.s^-1
power_acceleration = total_weight * acc * speed
power_total = power_total + power_acceleration
return power_total.clip(0) | 0.940939 | 0.582491 |
Scikit-cycling
==============
.. image:: https://travis-ci.org/scikit-cycling/scikit-cycling.svg?branch=master
:target: https://travis-ci.org/scikit-cycling/scikit-cycling
.. image:: https://ci.appveyor.com/api/projects/status/f2mvtb9y1mcy99vg?svg=true
:target: https://ci.appveyor.com/project/glemaitre/scikit-cycling
.. image:: https://readthedocs.org/projects/scikit-cycling/badge/?version=latest
:target: http://scikit-cycling.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/scikit-cycling/scikit-cycling/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-cycling/scikit-cycling
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/scikit-cycling/Lobby?utm_source=share-link&utm_medium=link&utm_cam
Installation
------------
Dependencies
~~~~~~~~~~~~
Scikit-cycling requires:
* scipy
* numpy
* pandas
* six
* fit-parse
* joblib
* scikit-learn
Installation
~~~~~~~~~~~~
``scikit-cycling`` is currently available on the PyPi’s reporitories and you can
install it via pip::
pip install -U scikit-cycling
The package is release also in conda-forge::
conda install -c conda-forge scikit-cycling
If you prefer, you can clone it and run the ``setup.py`` file. Use the
following commands to get a copy from Github and install all dependencies::
git clone https://github.com/scikit-cycling/scikit-cycling.git
cd scikit-cycling
pip install .
Or install using ``pip`` and GitHub::
pip install -U git+https://github.com/scikit-cycling/scikit-cycling.git
| scikit-cycling | /scikit_cycling-0.1.3-cp35-cp35m-win32.whl/scikit_cycling-0.1.3.dist-info/DESCRIPTION.rst | DESCRIPTION.rst | Scikit-cycling
==============
.. image:: https://travis-ci.org/scikit-cycling/scikit-cycling.svg?branch=master
:target: https://travis-ci.org/scikit-cycling/scikit-cycling
.. image:: https://ci.appveyor.com/api/projects/status/f2mvtb9y1mcy99vg?svg=true
:target: https://ci.appveyor.com/project/glemaitre/scikit-cycling
.. image:: https://readthedocs.org/projects/scikit-cycling/badge/?version=latest
:target: http://scikit-cycling.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/scikit-cycling/scikit-cycling/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-cycling/scikit-cycling
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/scikit-cycling/Lobby?utm_source=share-link&utm_medium=link&utm_cam
Installation
------------
Dependencies
~~~~~~~~~~~~
Scikit-cycling requires:
* scipy
* numpy
* pandas
* six
* fit-parse
* joblib
* scikit-learn
Installation
~~~~~~~~~~~~
``scikit-cycling`` is currently available on the PyPi’s reporitories and you can
install it via pip::
pip install -U scikit-cycling
The package is release also in conda-forge::
conda install -c conda-forge scikit-cycling
If you prefer, you can clone it and run the ``setup.py`` file. Use the
following commands to get a copy from Github and install all dependencies::
git clone https://github.com/scikit-cycling/scikit-cycling.git
cd scikit-cycling
pip install .
Or install using ``pip`` and GitHub::
pip install -U git+https://github.com/scikit-cycling/scikit-cycling.git
| 0.820577 | 0.63477 |
===============================
SciKit Data
===============================
.. image:: https://img.shields.io/pypi/v/scikit-data.svg
:target: https://pypi.python.org/pypi/scikit-data
.. image:: https://img.shields.io/travis/OpenDataScienceLab/skdata.svg
:target: https://travis-ci.org/OpenDataScienceLab/skdata
.. image:: https://readthedocs.org/projects/skdata/badge/?version=latest
:target: https://skdata.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Conda package current release info
==================================
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/version.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/downloads.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
About SciKit Data
=================
The propose of this library is to allow the data analysis process more easy and automatic.
The data analysis process is composed of following steps:
* The statement of problem
* Collecting your data
* Cleaning the data
* Normalizing the data
* Transforming the data
* Exploratory statistics
* Exploratory visualization
* Predictive modeling
* Validating your model
* Visualizing and interpreting your results
* Deploying your solution
(Cuesta, Hector and Kumar, Sampath; 2016)
This project contemplates the follow features:
* Data Preparation
* Data Exploration
* Prepare data to Predictive modeling
* Visualizing results
* Reproducible data analysis
Data Preparation
----------------
Data preparation is about how to obtain, clean, normalize, and transform the data into an
optimal dataset, trying to avoid any possible data quality issues such as invalid, ambiguous,
out-of-range, or missing values.
(...)
Scrubbing data, also called data cleansing, is the process of correcting or
removing data in a dataset that is incorrect, inaccurate, incomplete,
improperly formatted, or duplicated.
(...)
In order to avoid dirty data, our dataset should possess the following characteristics:
* Correct
* Completeness
* Accuracy
* Consistency
* Uniformity
(...)
**Data transformation**
Data transformation is usually related to databases and data warehouses where values from
a source format are extract, transform, and load in a destination format.
Extract, Transform, and Load (ETL) obtains data from various data sources, performs some
transformation functions depending on our data model, and loads the resulting data into
the destination.
(...)
Some important transformations:
* Text facet and Clustering
* Numeric fact
* Replace
**Data reduction methods**
Data reduction is the transformation of numerical or alphabetical digital information
derived empirically or experimentally into a corrected, ordered, and simplified form.
Reduced data size is very small in volume and comparatively original, hence, the storage
efficiency will increase and at the same time we can minimize the data handling costs and
will minimize the analysis time also.
We can use several types of data reduction methods, which are listed as follows:
* Filtering and sampling
* Binned algorithm
* Dimensionality reduction
(Cuesta, Hector and Kumar, Sampath; 2016)
Data exploration
----------------
Data exploration is essentially looking at the processed data in a graphical or statistical form
and trying to find patterns, connections, and relations in the data. Visualization is used to
provide overviews in which meaningful patterns may be found.
(...)
The goals of exploratory data analysis (EDA) are as follows:
* Detection of data errors
* Checking of assumptions
* Finding hidden patters (like tendency)
* Preliminary selection of appropriate models
* Determining relationships between the variables
(...)
The four types of EDA are univariate nongraphical, multivariate nongraphical, univariate
graphical, and multivariate graphical. The nongraphical methods refer to the calculation of
summary statistics or the outlier detection. In this book, we will focus on the univariate and
(Cuesta, Hector and Kumar, Sampath; 2016)
**Outlier Detection**
Two outlier detection method should be used, initially, for SkData are:
* IQR;
* Chauvenet.
Another methods should be implemented soon [1].
Prepare data to Predictive modeling
-----------------------------------
From the galaxy of information we have to extract usable hidden patterns and trends using
relevant algorithms. To extract the future behavior of these hidden patterns, we can use
predictive modeling. Predictive modeling is a statistical technique to predict future
behavior by analyzing existing information, that is, historical data. We have to use proper
statistical models that best forecast the hidden patterns of the data or
information (Cuesta, Hector and Kumar, Sampath; 2016).
SkData, should allow you to format your data to send it to some predictive library
as scikit-learn.
Visualizing results
-------------------
In an explanatory data analysis process, simple visualization techniques are very useful for
discovering patterns, since the human eye plays an important role. Sometimes, we have to
generate a three-dimensional plot for finding the visual pattern. But, for getting better
visual patterns, we can also use a scatter plot matrix, instead of a three-dimensional plot. In
practice, the hypothesis of the study, dimensionality of the feature space, and data all play
important roles in ensuring a good visualization technique (Cuesta, Hector and Kumar, Sampath; 2016).
Quantitative and Qualitative data analysis
------------------------------------------
Quantitative data are numerical measurements expressed in terms of numbers.
Qualitative data are categorical measurements expressed in terms of natural language
descriptions.
Quantitative analytics involves analysis of numerical data. The type of the analysis will
depend on the level of measurement. There are four kinds of measurements:
* Nominal data has no logical order and is used as classification data.
* Ordinal data has a logical order and differences between values are not constant.
* Interval data is continuous and depends on logical order. The data has standardized differences between values, but do not include zero.
* Ratio data is continuous with logical order as well as regular intervals differences between values and may include zero.
Qualitative analysis can explore the complexity and meaning of social phenomena. Data for
qualitative study may include written texts (for example, documents or e-mail) and/or
audible and visual data (digital images or sounds).
(Cuesta, Hector and Kumar, Sampath; 2016)
Reproducibility for Data Analysis
---------------------------------
A good way to promote reproducibility for data analysis is store the
operation history. This history can be used to prepare another dataset
with the same steps (operations).
Books used as reference to guide this project:
----------------------------------------------
- https://www.packtpub.com/big-data-and-business-intelligence/clean-data
- https://www.packtpub.com/big-data-and-business-intelligence/python-data-analysis
- https://www.packtpub.com/big-data-and-business-intelligence/mastering-machine-learning-scikit-learn
- https://www.packtpub.com/big-data-and-business-intelligence/practical-data-analysis-second-edition
Some other materials used as reference:
---------------------------------------
- https://github.com/rsouza/MMD/blob/master/notebooks/3.1_Kaggle_Titanic.ipynb
- https://github.com/agconti/kaggle-titanic/blob/master/Titanic.ipynb
- https://github.com/donnemartin/data-science-ipython-notebooks/blob/master/kaggle/titanic.ipynb
Installing scikit-data
======================
Using conda
-----------
Installing `scikit-data` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
.. code-block:: console
$ conda config --add channels conda-forge
Once the `conda-forge` channel has been enabled, `scikit-data` can be installed with:
.. code-block:: console
$ conda install scikit-data
It is possible to list all of the versions of `scikit-data` available on your platform with:
.. code-block:: console
$ conda search scikit-data --channel conda-forge
Using pip
---------
To install scikit-data, run this command in your terminal:
.. code-block:: console
$ pip install skdata
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
More Information
----------------
* License: MIT
* Documentation: https://skdata.readthedocs.io
References
----------
* CUESTA, Hector; KUMAR, Sampath. Practical Data Analysis. Packt Publishing Ltd, 2016.
**Electronic materials**
* [1] http://www.datasciencecentral.com/profiles/blogs/introduction-to-outlier-detection-methods
| scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/README.rst | README.rst | ===============================
SciKit Data
===============================
.. image:: https://img.shields.io/pypi/v/scikit-data.svg
:target: https://pypi.python.org/pypi/scikit-data
.. image:: https://img.shields.io/travis/OpenDataScienceLab/skdata.svg
:target: https://travis-ci.org/OpenDataScienceLab/skdata
.. image:: https://readthedocs.org/projects/skdata/badge/?version=latest
:target: https://skdata.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Conda package current release info
==================================
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/version.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
.. image:: https://anaconda.org/conda-forge/scikit-data/badges/downloads.svg
:target: https://anaconda.org/conda-forge/scikit-data
:alt: Anaconda-Server Badge
About SciKit Data
=================
The propose of this library is to allow the data analysis process more easy and automatic.
The data analysis process is composed of following steps:
* The statement of problem
* Collecting your data
* Cleaning the data
* Normalizing the data
* Transforming the data
* Exploratory statistics
* Exploratory visualization
* Predictive modeling
* Validating your model
* Visualizing and interpreting your results
* Deploying your solution
(Cuesta, Hector and Kumar, Sampath; 2016)
This project contemplates the follow features:
* Data Preparation
* Data Exploration
* Prepare data to Predictive modeling
* Visualizing results
* Reproducible data analysis
Data Preparation
----------------
Data preparation is about how to obtain, clean, normalize, and transform the data into an
optimal dataset, trying to avoid any possible data quality issues such as invalid, ambiguous,
out-of-range, or missing values.
(...)
Scrubbing data, also called data cleansing, is the process of correcting or
removing data in a dataset that is incorrect, inaccurate, incomplete,
improperly formatted, or duplicated.
(...)
In order to avoid dirty data, our dataset should possess the following characteristics:
* Correct
* Completeness
* Accuracy
* Consistency
* Uniformity
(...)
**Data transformation**
Data transformation is usually related to databases and data warehouses where values from
a source format are extract, transform, and load in a destination format.
Extract, Transform, and Load (ETL) obtains data from various data sources, performs some
transformation functions depending on our data model, and loads the resulting data into
the destination.
(...)
Some important transformations:
* Text facet and Clustering
* Numeric fact
* Replace
**Data reduction methods**
Data reduction is the transformation of numerical or alphabetical digital information
derived empirically or experimentally into a corrected, ordered, and simplified form.
Reduced data size is very small in volume and comparatively original, hence, the storage
efficiency will increase and at the same time we can minimize the data handling costs and
will minimize the analysis time also.
We can use several types of data reduction methods, which are listed as follows:
* Filtering and sampling
* Binned algorithm
* Dimensionality reduction
(Cuesta, Hector and Kumar, Sampath; 2016)
Data exploration
----------------
Data exploration is essentially looking at the processed data in a graphical or statistical form
and trying to find patterns, connections, and relations in the data. Visualization is used to
provide overviews in which meaningful patterns may be found.
(...)
The goals of exploratory data analysis (EDA) are as follows:
* Detection of data errors
* Checking of assumptions
* Finding hidden patters (like tendency)
* Preliminary selection of appropriate models
* Determining relationships between the variables
(...)
The four types of EDA are univariate nongraphical, multivariate nongraphical, univariate
graphical, and multivariate graphical. The nongraphical methods refer to the calculation of
summary statistics or the outlier detection. In this book, we will focus on the univariate and
(Cuesta, Hector and Kumar, Sampath; 2016)
**Outlier Detection**
Two outlier detection method should be used, initially, for SkData are:
* IQR;
* Chauvenet.
Another methods should be implemented soon [1].
Prepare data to Predictive modeling
-----------------------------------
From the galaxy of information we have to extract usable hidden patterns and trends using
relevant algorithms. To extract the future behavior of these hidden patterns, we can use
predictive modeling. Predictive modeling is a statistical technique to predict future
behavior by analyzing existing information, that is, historical data. We have to use proper
statistical models that best forecast the hidden patterns of the data or
information (Cuesta, Hector and Kumar, Sampath; 2016).
SkData, should allow you to format your data to send it to some predictive library
as scikit-learn.
Visualizing results
-------------------
In an explanatory data analysis process, simple visualization techniques are very useful for
discovering patterns, since the human eye plays an important role. Sometimes, we have to
generate a three-dimensional plot for finding the visual pattern. But, for getting better
visual patterns, we can also use a scatter plot matrix, instead of a three-dimensional plot. In
practice, the hypothesis of the study, dimensionality of the feature space, and data all play
important roles in ensuring a good visualization technique (Cuesta, Hector and Kumar, Sampath; 2016).
Quantitative and Qualitative data analysis
------------------------------------------
Quantitative data are numerical measurements expressed in terms of numbers.
Qualitative data are categorical measurements expressed in terms of natural language
descriptions.
Quantitative analytics involves analysis of numerical data. The type of the analysis will
depend on the level of measurement. There are four kinds of measurements:
* Nominal data has no logical order and is used as classification data.
* Ordinal data has a logical order and differences between values are not constant.
* Interval data is continuous and depends on logical order. The data has standardized differences between values, but do not include zero.
* Ratio data is continuous with logical order as well as regular intervals differences between values and may include zero.
Qualitative analysis can explore the complexity and meaning of social phenomena. Data for
qualitative study may include written texts (for example, documents or e-mail) and/or
audible and visual data (digital images or sounds).
(Cuesta, Hector and Kumar, Sampath; 2016)
Reproducibility for Data Analysis
---------------------------------
A good way to promote reproducibility for data analysis is store the
operation history. This history can be used to prepare another dataset
with the same steps (operations).
Books used as reference to guide this project:
----------------------------------------------
- https://www.packtpub.com/big-data-and-business-intelligence/clean-data
- https://www.packtpub.com/big-data-and-business-intelligence/python-data-analysis
- https://www.packtpub.com/big-data-and-business-intelligence/mastering-machine-learning-scikit-learn
- https://www.packtpub.com/big-data-and-business-intelligence/practical-data-analysis-second-edition
Some other materials used as reference:
---------------------------------------
- https://github.com/rsouza/MMD/blob/master/notebooks/3.1_Kaggle_Titanic.ipynb
- https://github.com/agconti/kaggle-titanic/blob/master/Titanic.ipynb
- https://github.com/donnemartin/data-science-ipython-notebooks/blob/master/kaggle/titanic.ipynb
Installing scikit-data
======================
Using conda
-----------
Installing `scikit-data` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
.. code-block:: console
$ conda config --add channels conda-forge
Once the `conda-forge` channel has been enabled, `scikit-data` can be installed with:
.. code-block:: console
$ conda install scikit-data
It is possible to list all of the versions of `scikit-data` available on your platform with:
.. code-block:: console
$ conda search scikit-data --channel conda-forge
Using pip
---------
To install scikit-data, run this command in your terminal:
.. code-block:: console
$ pip install skdata
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
More Information
----------------
* License: MIT
* Documentation: https://skdata.readthedocs.io
References
----------
* CUESTA, Hector; KUMAR, Sampath. Practical Data Analysis. Packt Publishing Ltd, 2016.
**Electronic materials**
* [1] http://www.datasciencecentral.com/profiles/blogs/introduction-to-outlier-detection-methods
| 0.946609 | 0.878991 |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/OpenDataScienceLab/skdata/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Jupyter Python Data Analisys could always use more documentation, whether as part of the
official Jupyter Python Data Analisys docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/xmnlab/skdata/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `skdata` for local development.
1. Fork the `skdata` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/skdata.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv skdata
$ cd skdata/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 skdata tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.4 and 3.5. Check
https://travis-ci.org/xmnlab/skdata/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_skdata
| scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/CONTRIBUTING.rst | CONTRIBUTING.rst | .. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/OpenDataScienceLab/skdata/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Jupyter Python Data Analisys could always use more documentation, whether as part of the
official Jupyter Python Data Analisys docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/xmnlab/skdata/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `skdata` for local development.
1. Fork the `skdata` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/skdata.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv skdata
$ cd skdata/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 skdata tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.4 and 3.5. Check
https://travis-ci.org/xmnlab/skdata/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_skdata
| 0.546133 | 0.477189 |
.. skdata documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to SciKit Data Analisys's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
authorshistory
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/docs/index.rst | index.rst | .. skdata documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to SciKit Data Analisys's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
authorshistory
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.540196 | 0.169681 |
.. highlight:: shell
============
Installation
============
Using conda
-----------
Installing `scikit-data` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
.. code-block:: console
$ conda config --add channels conda-forge
Once the `conda-forge` channel has been enabled, `scikit-data` can be installed with:
.. code-block:: console
$ conda install scikit-data
It is possible to list all of the versions of `scikit-data` available on your platform with:
.. code-block:: console
$ conda search scikit-data --channel conda-forge
Using pip
---------
To install scikit-data, run this command in your terminal:
.. code-block:: console
$ pip install skdata
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for scikit-data can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/OpenDataScienceLab/skdata
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/OpenDataScienceLab/skdata/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/OpenDataScienceLab/skdata
.. _tarball: https://github.com/OpenDataScienceLab/skdata/tarball/master
| scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/docs/installation.rst | installation.rst | .. highlight:: shell
============
Installation
============
Using conda
-----------
Installing `scikit-data` from the `conda-forge` channel can be achieved by adding `conda-forge` to your channels with:
.. code-block:: console
$ conda config --add channels conda-forge
Once the `conda-forge` channel has been enabled, `scikit-data` can be installed with:
.. code-block:: console
$ conda install scikit-data
It is possible to list all of the versions of `scikit-data` available on your platform with:
.. code-block:: console
$ conda search scikit-data --channel conda-forge
Using pip
---------
To install scikit-data, run this command in your terminal:
.. code-block:: console
$ pip install skdata
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for scikit-data can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/OpenDataScienceLab/skdata
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/OpenDataScienceLab/skdata/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/OpenDataScienceLab/skdata
.. _tarball: https://github.com/OpenDataScienceLab/skdata/tarball/master
| 0.723602 | 0.360461 |
from functools import reduce
# local
from .cleaning import *
import json
import numpy as np
import pandas as pd
class StepSkData:
parent = None
def __init__(self, parent: 'SkDataSet'):
"""
:param parent:
"""
self.parent = parent
def compute(
self, start: int = None, end: int = None,
steps_id: list = None
) -> pd.DataFrame:
"""
:param start:
:param end:
:param steps_id:
:return:
"""
dset = self.parent.parent.data[self.parent.iid]
try:
index_col = dset.attrs['index']
except:
index_col = None
keys = tuple(
k for k in dset.dtype.names[:]
if k not in [index_col]
)
params = {}
if index_col is not None:
params['index'] = dset[index_col]
df = pd.DataFrame(dset[keys], **params)
for k in df.keys():
if df[k].dtype == pd.api.types.pandas_dtype('O'):
df[k] = df[k].str.decode("utf-8")
df[k].replace(
dset.attrs['null_string'], np.nan, inplace=True
)
steps = self.parent.attr_load(attr='steps', default=[])
if steps_id is not None:
_steps = [s for i, s in enumerate(steps) if i in steps_id]
else:
_steps = steps[start:end]
for step in _steps:
df = self.expr(df, step)
return df
def export_steps(self, file_path: str, mode: str = 'a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
pass
@staticmethod
def expr(data: pd.DataFrame, step: str):
# aliases
op = step['operation']
k = step['column'] if 'column' in step else None
k_new = k if 'new-column' not in step else step['new-column']
c_expr = step['expression']
if op == 'text-transform':
f_expr = eval('lambda value: %s' % c_expr)
data[k_new] = data[k].apply(f_expr)
elif op == 'categorize':
params = dict(data=data, col_name=k, categories=eval(c_expr))
params.update(
{'new_col_name': k_new} if 'new-column' in step else {}
)
categorize(**params)
elif op == 'fill-na':
fill = c_expr
if c_expr in ['mean', 'max', 'min', 'median']:
fill = data.eval('%s.%s()' % (k, c_expr))
data[k].fillna(fill, inplace=True)
elif op == 'drop-na':
params = eval(c_expr)
dropna(data, **params)
elif op == 'drop-unique':
params = eval(c_expr)
drop_columns_with_unique_values(data, **params)
return data
def import_steps(self, file_path: str, mode: str='a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
steps_json = json.load(file_path)
def replace(value: str, replace_dict: dict):
"""
"""
if not isinstance(value, str):
return value
return reduce(
lambda x, y: x.replace(y, replace_dict[y]), replace_dict, value
) | scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/skdata/steps.py | steps.py | from functools import reduce
# local
from .cleaning import *
import json
import numpy as np
import pandas as pd
class StepSkData:
parent = None
def __init__(self, parent: 'SkDataSet'):
"""
:param parent:
"""
self.parent = parent
def compute(
self, start: int = None, end: int = None,
steps_id: list = None
) -> pd.DataFrame:
"""
:param start:
:param end:
:param steps_id:
:return:
"""
dset = self.parent.parent.data[self.parent.iid]
try:
index_col = dset.attrs['index']
except:
index_col = None
keys = tuple(
k for k in dset.dtype.names[:]
if k not in [index_col]
)
params = {}
if index_col is not None:
params['index'] = dset[index_col]
df = pd.DataFrame(dset[keys], **params)
for k in df.keys():
if df[k].dtype == pd.api.types.pandas_dtype('O'):
df[k] = df[k].str.decode("utf-8")
df[k].replace(
dset.attrs['null_string'], np.nan, inplace=True
)
steps = self.parent.attr_load(attr='steps', default=[])
if steps_id is not None:
_steps = [s for i, s in enumerate(steps) if i in steps_id]
else:
_steps = steps[start:end]
for step in _steps:
df = self.expr(df, step)
return df
def export_steps(self, file_path: str, mode: str = 'a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
pass
@staticmethod
def expr(data: pd.DataFrame, step: str):
# aliases
op = step['operation']
k = step['column'] if 'column' in step else None
k_new = k if 'new-column' not in step else step['new-column']
c_expr = step['expression']
if op == 'text-transform':
f_expr = eval('lambda value: %s' % c_expr)
data[k_new] = data[k].apply(f_expr)
elif op == 'categorize':
params = dict(data=data, col_name=k, categories=eval(c_expr))
params.update(
{'new_col_name': k_new} if 'new-column' in step else {}
)
categorize(**params)
elif op == 'fill-na':
fill = c_expr
if c_expr in ['mean', 'max', 'min', 'median']:
fill = data.eval('%s.%s()' % (k, c_expr))
data[k].fillna(fill, inplace=True)
elif op == 'drop-na':
params = eval(c_expr)
dropna(data, **params)
elif op == 'drop-unique':
params = eval(c_expr)
drop_columns_with_unique_values(data, **params)
return data
def import_steps(self, file_path: str, mode: str='a'):
"""
:param file_path:
:param mode: [a]ppend|[w]rite
:return:
"""
steps_json = json.load(file_path)
def replace(value: str, replace_dict: dict):
"""
"""
if not isinstance(value, str):
return value
return reduce(
lambda x, y: x.replace(y, replace_dict[y]), replace_dict, value
) | 0.547464 | 0.368406 |
from abc import ABCMeta, abstractmethod
from IPython.display import display, update_display
from ipywidgets import widgets, IntSlider
# locals from import
from .utils import plot2html
from .data import cross_fields
from .data import SkData
import numpy as np
import pandas as pd
class SkDataWidget:
"""
"""
layout = {}
controllers = {}
def __call__(self, *args, **kwargs):
# show dashboard
return self.display(*args, **kwargs)
def __init__(
self, skd: SkData, settings: dict={}
):
"""
:param skd:
:param settings: dictionary
"""
self.settings = settings
self.skd = skd
# settings
if 'title' not in self.settings:
self.settings['title'] = 'Data Analysis'
chart_settings = self.settings.pop('chart', {})
table_settings = self.settings.pop('table', {})
self.register_controller(
chart=SkDataChartController(self, chart_settings)
)
self.register_controller(
table=SkDataTableController(self, table_settings)
)
def _(self, name: str):
"""
Return layout object
:param name:
:return:
"""
return self.layout[name]
def _display_result(self, **kwargs):
"""
:param kwargs: kwargs could receive these parameters:
y, xs, bins, chart_type
:return:
"""
# get controller
chart = self.controllers['chart']
table = self.controllers['table']
# widget value is the default value
y = kwargs.pop('y', self._('y').value)
xs = kwargs.pop('xs', self._('xs').value)
bins = kwargs.pop('bins', self._('bins').value)
chart_type = kwargs.pop('chart_type', self._('chart_type').value)
dset_id = kwargs.pop('dset_id')
table.display(
y=y,
xs=xs,
bins=bins,
dset_id=dset_id
)
chart.display(
y=y,
xs=xs,
bins=bins,
chart_type=chart_type,
dset_id=dset_id
)
# disable slider bins if no fields are numerical
fields = [y] + list(xs)
dtypes = self.get_data(dset_id=dset_id)[fields].dtypes.values
visibility = {True: 'visible', False: 'hidden'}
self._('bins').layout.visibility = visibility[
float in dtypes or int in dtypes
]
def get_data(self, dset_id: str) -> pd.DataFrame:
"""
:return:
"""
return self.skd[dset_id].result
def build_layout(self, dset_id: str):
"""
:param dset_id:
:return:
"""
all_fields = list(self.get_data(dset_id=dset_id).keys())
try:
field_reference = self.skd[dset_id].attrs('target')
except:
field_reference = all_fields[0]
fields_comparison = [all_fields[1]]
# chart type widget
self.register_widget(
chart_type=widgets.RadioButtons(
options=['individual', 'grouped'],
value='individual',
description='Chart Type:'
)
)
# bins widget
self.register_widget(
bins=IntSlider(
description='Bins:',
min=2, max=10, value=2,
continuous_update=False
)
)
# fields comparison widget
self.register_widget(
xs=widgets.SelectMultiple(
description='Xs:',
options=[f for f in all_fields if not f == field_reference],
value=fields_comparison
)
)
# field reference widget
self.register_widget(
y=widgets.Dropdown(
description='Y:',
options=all_fields,
value=field_reference
)
)
# used to internal flow control
y_changed = [False]
self.register_widget(
box_filter_panel=widgets.VBox([
self._('y'), self._('xs'), self._('bins')
])
)
# layout widgets
self.register_widget(
table=widgets.HTML(),
chart=widgets.HTML()
)
self.register_widget(vbox_chart=widgets.VBox([
self._('chart_type'), self._('chart')
]))
self.register_widget(
tab=widgets.Tab(
children=[
self._('box_filter_panel'),
self._('table'),
self._('vbox_chart')
]
)
)
self.register_widget(dashboard=widgets.HBox([self._('tab')]))
# observe hooks
def w_y_change(change: dict):
"""
When y field was changed xs field should be updated and data table
and chart should be displayed/updated.
:param change:
:return:
"""
# remove reference field from the comparison field list
_xs = [
f for f in all_fields
if not f == change['new']
]
y_changed[0] = True # flow control variable
_xs_value = list(self._('xs').value)
if change['new'] in self._('xs').value:
_xs_value.pop(_xs_value.index(change['new']))
if not _xs_value:
_xs_value = [_xs[0]]
self._('xs').options = _xs
self._('xs').value = _xs_value
self._display_result(y=change['new'], dset_id=dset_id)
y_changed[0] = False # flow control variable
# widgets registration
# change tab settings
self._('tab').set_title(0, 'Filter')
self._('tab').set_title(1, 'Data')
self._('tab').set_title(2, 'Chart')
# data panel
self._('table').value = '...'
# chart panel
self._('chart').value = '...'
# create observe callbacks
self._('bins').observe(
lambda change: (
self._display_result(bins=change['new'], dset_id=dset_id)
), 'value'
)
self._('y').observe(w_y_change, 'value')
# execute display result if 'y' was not changing.
self._('xs').observe(
lambda change: (
self._display_result(xs=change['new'], dset_id=dset_id)
if not y_changed[0] else None
), 'value'
)
self._('chart_type').observe(
lambda change: (
self._display_result(chart_type=change['new'], dset_id=dset_id)
), 'value'
)
def display(self, dset_id: str):
"""
:param dset_id:
:return:
"""
# update result
self.skd[dset_id].compute()
# build layout
self.build_layout(dset_id=dset_id)
# display widgets
display(self._('dashboard'))
# display data table and chart
self._display_result(dset_id=dset_id)
def register_controller(self, **kwargs):
"""
This method should receive objects as SkDataController instance.
:return:
"""
self.controllers.update(kwargs)
def register_widget(self, **kwargs):
"""
This method should receive objects as ipywidgets.Widgets instance
:return:
"""
self.layout.update(kwargs)
def __repr__(self):
return ''
class SkDataController:
__metaclass__ = ABCMeta
def __init__(self, parent, settings: dict={}):
self.parent = parent
self.settings = settings
@abstractmethod
def display(self):
"""
This method should be overwritten.
:return:
"""
pass
class SkDataChartController(SkDataController):
def __init__(self, parent, settings: dict={}):
super(self.__class__, self).__init__(parent, settings)
# default settings
if 'sharey' not in self.settings:
self.settings.update({'sharey': True})
def display(
self,
y: str, # field_reference
xs: list, # fields_comparison
bins: int,
chart_type: str,
dset_id: str
):
"""
:param y:
:param xs:
:param bins:
:param chart_type:
:param dset_id:
:return:
"""
chart_param = self.settings
w_chart = self.parent.layout['chart']
if chart_type == 'grouped':
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
else:
d = self.parent.get_data(dset_id=dset_id)
chart_param.update(dict(
y=y, xs=xs, bins=bins
))
# display chart
plot2html(
data=d,
container=w_chart,
title=self.parent.settings['title'],
**chart_param
)
class SkDataTableController(SkDataController):
# display data and chart
def display(
self, y: str, xs: list or tuple, bins: int, dset_id: str
):
"""
:param xs:
:param bins:
:param dset_id:
:return:
"""
w_table = self.parent.layout['table']
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
# display data table
w_table.value = d.to_html() | scikit-data | /scikit-data-0.1.3.tar.gz/scikit-data-0.1.3/skdata/widgets.py | widgets.py | from abc import ABCMeta, abstractmethod
from IPython.display import display, update_display
from ipywidgets import widgets, IntSlider
# locals from import
from .utils import plot2html
from .data import cross_fields
from .data import SkData
import numpy as np
import pandas as pd
class SkDataWidget:
"""
"""
layout = {}
controllers = {}
def __call__(self, *args, **kwargs):
# show dashboard
return self.display(*args, **kwargs)
def __init__(
self, skd: SkData, settings: dict={}
):
"""
:param skd:
:param settings: dictionary
"""
self.settings = settings
self.skd = skd
# settings
if 'title' not in self.settings:
self.settings['title'] = 'Data Analysis'
chart_settings = self.settings.pop('chart', {})
table_settings = self.settings.pop('table', {})
self.register_controller(
chart=SkDataChartController(self, chart_settings)
)
self.register_controller(
table=SkDataTableController(self, table_settings)
)
def _(self, name: str):
"""
Return layout object
:param name:
:return:
"""
return self.layout[name]
def _display_result(self, **kwargs):
"""
:param kwargs: kwargs could receive these parameters:
y, xs, bins, chart_type
:return:
"""
# get controller
chart = self.controllers['chart']
table = self.controllers['table']
# widget value is the default value
y = kwargs.pop('y', self._('y').value)
xs = kwargs.pop('xs', self._('xs').value)
bins = kwargs.pop('bins', self._('bins').value)
chart_type = kwargs.pop('chart_type', self._('chart_type').value)
dset_id = kwargs.pop('dset_id')
table.display(
y=y,
xs=xs,
bins=bins,
dset_id=dset_id
)
chart.display(
y=y,
xs=xs,
bins=bins,
chart_type=chart_type,
dset_id=dset_id
)
# disable slider bins if no fields are numerical
fields = [y] + list(xs)
dtypes = self.get_data(dset_id=dset_id)[fields].dtypes.values
visibility = {True: 'visible', False: 'hidden'}
self._('bins').layout.visibility = visibility[
float in dtypes or int in dtypes
]
def get_data(self, dset_id: str) -> pd.DataFrame:
"""
:return:
"""
return self.skd[dset_id].result
def build_layout(self, dset_id: str):
"""
:param dset_id:
:return:
"""
all_fields = list(self.get_data(dset_id=dset_id).keys())
try:
field_reference = self.skd[dset_id].attrs('target')
except:
field_reference = all_fields[0]
fields_comparison = [all_fields[1]]
# chart type widget
self.register_widget(
chart_type=widgets.RadioButtons(
options=['individual', 'grouped'],
value='individual',
description='Chart Type:'
)
)
# bins widget
self.register_widget(
bins=IntSlider(
description='Bins:',
min=2, max=10, value=2,
continuous_update=False
)
)
# fields comparison widget
self.register_widget(
xs=widgets.SelectMultiple(
description='Xs:',
options=[f for f in all_fields if not f == field_reference],
value=fields_comparison
)
)
# field reference widget
self.register_widget(
y=widgets.Dropdown(
description='Y:',
options=all_fields,
value=field_reference
)
)
# used to internal flow control
y_changed = [False]
self.register_widget(
box_filter_panel=widgets.VBox([
self._('y'), self._('xs'), self._('bins')
])
)
# layout widgets
self.register_widget(
table=widgets.HTML(),
chart=widgets.HTML()
)
self.register_widget(vbox_chart=widgets.VBox([
self._('chart_type'), self._('chart')
]))
self.register_widget(
tab=widgets.Tab(
children=[
self._('box_filter_panel'),
self._('table'),
self._('vbox_chart')
]
)
)
self.register_widget(dashboard=widgets.HBox([self._('tab')]))
# observe hooks
def w_y_change(change: dict):
"""
When y field was changed xs field should be updated and data table
and chart should be displayed/updated.
:param change:
:return:
"""
# remove reference field from the comparison field list
_xs = [
f for f in all_fields
if not f == change['new']
]
y_changed[0] = True # flow control variable
_xs_value = list(self._('xs').value)
if change['new'] in self._('xs').value:
_xs_value.pop(_xs_value.index(change['new']))
if not _xs_value:
_xs_value = [_xs[0]]
self._('xs').options = _xs
self._('xs').value = _xs_value
self._display_result(y=change['new'], dset_id=dset_id)
y_changed[0] = False # flow control variable
# widgets registration
# change tab settings
self._('tab').set_title(0, 'Filter')
self._('tab').set_title(1, 'Data')
self._('tab').set_title(2, 'Chart')
# data panel
self._('table').value = '...'
# chart panel
self._('chart').value = '...'
# create observe callbacks
self._('bins').observe(
lambda change: (
self._display_result(bins=change['new'], dset_id=dset_id)
), 'value'
)
self._('y').observe(w_y_change, 'value')
# execute display result if 'y' was not changing.
self._('xs').observe(
lambda change: (
self._display_result(xs=change['new'], dset_id=dset_id)
if not y_changed[0] else None
), 'value'
)
self._('chart_type').observe(
lambda change: (
self._display_result(chart_type=change['new'], dset_id=dset_id)
), 'value'
)
def display(self, dset_id: str):
"""
:param dset_id:
:return:
"""
# update result
self.skd[dset_id].compute()
# build layout
self.build_layout(dset_id=dset_id)
# display widgets
display(self._('dashboard'))
# display data table and chart
self._display_result(dset_id=dset_id)
def register_controller(self, **kwargs):
"""
This method should receive objects as SkDataController instance.
:return:
"""
self.controllers.update(kwargs)
def register_widget(self, **kwargs):
"""
This method should receive objects as ipywidgets.Widgets instance
:return:
"""
self.layout.update(kwargs)
def __repr__(self):
return ''
class SkDataController:
__metaclass__ = ABCMeta
def __init__(self, parent, settings: dict={}):
self.parent = parent
self.settings = settings
@abstractmethod
def display(self):
"""
This method should be overwritten.
:return:
"""
pass
class SkDataChartController(SkDataController):
def __init__(self, parent, settings: dict={}):
super(self.__class__, self).__init__(parent, settings)
# default settings
if 'sharey' not in self.settings:
self.settings.update({'sharey': True})
def display(
self,
y: str, # field_reference
xs: list, # fields_comparison
bins: int,
chart_type: str,
dset_id: str
):
"""
:param y:
:param xs:
:param bins:
:param chart_type:
:param dset_id:
:return:
"""
chart_param = self.settings
w_chart = self.parent.layout['chart']
if chart_type == 'grouped':
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
else:
d = self.parent.get_data(dset_id=dset_id)
chart_param.update(dict(
y=y, xs=xs, bins=bins
))
# display chart
plot2html(
data=d,
container=w_chart,
title=self.parent.settings['title'],
**chart_param
)
class SkDataTableController(SkDataController):
# display data and chart
def display(
self, y: str, xs: list or tuple, bins: int, dset_id: str
):
"""
:param xs:
:param bins:
:param dset_id:
:return:
"""
w_table = self.parent.layout['table']
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
# display data table
w_table.value = d.to_html() | 0.717408 | 0.146942 |
<p align="left">
<img alt="Scikit Data Access" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_logo360x100.png"/>
</p>
- Import scientific data from various sources through one easy Python API.
- Use iterator patterns for each data source (configurable data generators + functions to get next data chunk).
- Skip parser programming and file format handling.
- Enjoy a common namespace for all data and unleash the power of data fusion.
- Handle data distribution in different modes: (1) local download, (2) caching of accessed data, or (3) online stream access
- Easily pull data on cloud servers through Python scripts and facilitate large-scale parallel processing.
- Build on an extensible plattform: Adding access to a new data source only requires addition of its "DataFetcher.py".
- Open source (MIT License)
<p align="center">
<img alt="Scikit Data Access Overview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_overviewdiag.png" width="810"/>
</p>
Supported data sets:
<table>
<tr>
<td>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<p>Namespace</p>
</td>
<!-- preview -->
<td width=63>
<p><span>Preview<br><sup>(link)</sup></span></p>
</td>
<!-- description -->
<td width=500>
<p><span>Description & Data Source</span></p>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png> Astronomy
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.kepler
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Kepler.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.kepler.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Light curves for stars imaged by the NASA Kepler Space Telescope <br>Source: https://keplerscience.arc.nasa.gov</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.spectra
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SDSS_Spectra.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.spectra.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Spectra from the Sloan Digital Sky Survey <br>Source: https://www.sdss.org/dr14/spectro/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.tess.data
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_TESS_Data_Alerts.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.tess.data.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Light curves from TESS Data Alerts <br>Source: https://archive.stsci.edu/prepds/tess-data-alerts/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png>
astro.tess.simulated
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_TESS_Simulated_Data.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.tess.simulated.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Simulated light curves from TESS End-to-End 6 <br>Source: https://archive.stsci.edu/prepds/tess-data-alerts/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_astro.png> astro.voyager
</sup>
</td>
<!-- preview -->
<td width=63><sup>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Voyager.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.astro.voyager.png"/></a>
</sup>
</td>
<!-- description -->
<td width=500>
<sup>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> Data from the Voyager mission. <br> Source: https://spdf.gsfc.nasa.gov/
</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> Engineering
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> engineering.la.traffic_counts
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Traffic_Counts.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.engineering.la.traffic_counts.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Traffic Count data in Los Angeles. <br> Source: https://data.lacity.org/A-Livable-and-Sustainable-City/LADOT-Traffic-Counts-Summary/94wu-3ps3
<sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=250><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_engineering.png> engineering.webcam.mit_sailing
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Webcam_MIT_Sailing.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.engineering.webcam.mit_sailing.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Images from webcams located at the MIT Sailing Pavilion <br> Source: http://sailing.mit.edu/webcam.php
<sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_finance.png> Finance
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_finance.png> finance.timeseries
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Finance_Time_Series.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.finance.timeseries.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Financial time series data retrieved using Alpha Vantage API. <br> Source: https://www.alphavantage.co/
</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> Geoscience
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200>
<sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.era_interim
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_ERA_Interim.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.era_interim.png"/></a>
</td>
<!-- description -->
<td width=500><sup>
Era-Interim data at different pressure values from <br/> the European Centre for Medium-Range Weather Forecasts accessed through the University Corporation for Atmospheric Research. <br> Source: https://rda.ucar.edu/datasets/ds627.0/
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.gldas
</sup>
</td>
<!-- preview -->
<td width=63><a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GLDAS.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.gldas.png"/></a>
</td>
<!-- description -->
<td width=500><img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Land hydrology model produced by NASA. This version of the data is generated to match the GRACE temporal and spatial characteristics and is available as a complementary data product. <br> Source: https://grace.jpl.nasa.gov/data/get-data/land-water-content </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.grace
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GRACE.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.grace.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> NASA GRACE Tellus Monthly Mass Grids. 30-day measurements of changes in Earth’s gravity field to quantify equivalent water thickness. <br> Source: https://grace.jpl.nasa.gov/data/get-data/monthly-mass-grids-land </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.grace.mascon
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_GRACE_Mascon.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.grace.mascon.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> NASA GRACE Tellus Monthly Mass Grids - Global Mascons. 30-day measurements of changes in Earth’s gravity field to quantify equivalent water thickness. Source: https://grace.jpl.nasa.gov/data/get-data/jpl_global_mascons </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.groundwater </sup>
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Groundwater.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.groundwater.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> United States groundwater monitoring wells measuring the depth to water level. Source: https://waterservices.usgs.gov </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.magnetometer
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Magnetometer.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.magnetometer.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> Data collected at magnetic observatories operated by the U.S. Geological Survey. Source: https://geomag.usgs.gov</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.rinex
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_Rinex.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.rinex.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Rinex files from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br> Web: http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.tec
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_TEC.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.tec.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Total Electron Content from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br> Web:http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.mahali.temperature
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Mahali_Temperature.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.mahali.temperature.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_mit.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nsf.png" /> <sup> Temperature data from the MIT led NSF project studying the Earth’s ionosphere with GPS. <br>Web: http://mahali.mit.edu </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.modis
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_MODIS.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.modis.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Spectroradiometer aboard the NASA Terra and Aqua image satellites. Generates approximately daily images of the Earth’s surface.<br> Source:https://modis.gsfc.nasa.gov </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.pbo
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_PBO.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.pbo.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_unavco.png" /> <sup> EarthScope - Plate Boundary Observatory (PBO): Daily GPS displacement time series measurements throughout the United States.<br>Source: http://www.unavco.org/projects/major-projects/pbo/pbo.html </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.sentinel_1
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Sentinel_1.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.sentinel_1.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup><img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_esa.png" /> Sentinel-1 TOPSAR data from the European Space Agency retrieved from the Alaska Satellite Facility.<br>Source:https://www.asf.alaska.edu/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.srtm
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SRTM.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.srtm.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_usgs.png" /> <sup> Elevation data at a one arc second resolution from the Shuttle Radar Topography Mission (SRTMGL1).<br>Source: https://lpdaac.usgs.gov/dataset_discovery/measures/measures_products_table/srtmgl1_v003 </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.uavsar
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_UAVSAR.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.uavsar.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> UAVSAR SLC data from JPL.<br>Source: https://uavsar.jpl.nasa.gov/ </sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_geo.png> geo.wyoming_sounding
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_Wyoming_Sounding.ipynb"><img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.geo.wyoming_sounding.png"/></a>
</td>
<!-- description -->
<td width=500>
<sup> Sounding data from the University of Wyoming.<br>Source: http://weather.uwyo.edu/upperair/sounding.html </sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_planetary.png> Planetary Science
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_planetary.png> planetary.ode
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_ODE.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.planetary.ode.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Mars planetary data from PDS Geosciences Node's Orbital Data Explorer.<br>Source: http://pds-geosciences.wustl.edu/default.htm</sup>
</td>
</tr>
<!--- HEADER ENTRY ---------------------------------->
<tr>
<td colspan=4><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_solar.png> Solar Science
</sup>
</td>
</tr>
<!--- ENTRY ---------------------------------->
<tr>
<td width=2>
<p><o:p> </o:p></p>
</td>
<!-- namespace -->
<td width=200><sup>
<img src=https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_solar.png> solar.sdo
</sup>
</td>
<!-- preview -->
<td width=63>
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/examples/Demo_SDO.ipynb"> <img alt="Preview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_skdaccess.solar.sdo.png"/></a>
</td>
<!-- description -->
<td width=500>
<img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/icon_datasource_logo_nasa.png" /> <sup> Images from the Solar Dynamics Observatory.<br>Source: https://sdo.gsfc.nasa.gov/</sup>
</td>
</tr>
</table>
### Install
```python
pip install scikit-dataaccess
```
### Documentation
- User Manual: [/docs/skdaccess_manual.pdf](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_manual.pdf)<br>
- Code documentation (Doxygen): [/docs/skdaccess_doxygen.pdf](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_doxygen.pdf)
- Code visualization (treemap): [/docs/skdaccess_treemap.png](https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/skdaccess_treemap.png)
- Code class diagrams: [/docs/class_diagrams](https://github.com/MITHaystack/scikit-dataaccess/tree/master/skdaccess/docs/class_diagrams)
### Contributors
Project lead: [Victor Pankratius (MIT)](http://www.victorpankratius.com)<br>
Contributors: Cody M. Rude, Justin D. Li, David M. Blair, Michael G. Gowanlock, Guillaume Rongier, Victor Pankratius
New contributors welcome! Contact <img src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess_cont.png" /> to contribute and add interface code for your own datasets :smile:
### Acknowledgements
We acknowledge support from NASA AIST14-NNX15AG84G, NASA AIST16-80NSSC17K0125, NSF ACI-1442997, and NSF AGS-1343967.
## Examples
Code examples (Jupyter notebooks) for all datasets listed above are available at: [/skdaccess/examples](https://github.com/MITHaystack/scikit-dataaccess/tree/master/skdaccess/examples)
<p align="center">
<a href="https://github.com/MITHaystack/scikit-dataaccess/blob/master/skdaccess/docs/images/skdaccess-quickexamples-combined.png">
<img alt="Scikit Data Access Overview" src="https://github.com/MITHaystack/scikit-dataaccess/raw/master/skdaccess/docs/images/skdaccess-quickexamples-combined.png"/>
</a>
</p>
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/README.md | README.md | pip install scikit-dataaccess | 0.613005 | 0.765987 |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
# Standard library imports
from collections import OrderedDict
import re
# 3rd part imports
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, start_year, end_year, spacecraft='both'):
'''
Initialize Voyager data fetcher
@param start_year: Starting year
@param end_year: Ending year
@param spacecraft: Which spaceraft to use (voyager1, voyager2, or both).
'''
# Generate list of years for retrieving data
self.year_list = list(range(start_year, end_year+1))
# Create a list of spacecraft data to download
if spacecraft not in ('voyager1', 'voyager2', 'both'):
raise RuntimeError('Spacecraft not understood')
if spacecraft == 'both':
self.spacecraft_list = ['voyager1', 'voyager2']
else:
self.spacecraft_list = [spacecraft]
# Field names for parsing data
self.field_names = [
'Year', 'Day', 'Hour', 'Distance', 'Latitude', 'Longitude',
'Field_Magnitude_Average', 'Magnitude_of_Average_Field', 'BR', 'BT',
'BN', 'Flow_Speed', 'Theta', 'Phi', 'Proton_Density',
'Proton_Temperature', 'LECP_1', 'LECP_2', 'LECP_3', 'CRS_1', 'CRS_2',
'CRS_3', 'CRS_4', 'CRS_5', 'CRS_6', 'CRS_7', 'CRS_8', 'CRS_9',
'CRS_10', 'CRS_11', 'CRS_12', 'CRS_13', 'CRS_14', 'CRS_15', 'CRS_16',
'CRS_17', 'CRS_18',
]
# Field widths as the data is fixed width format
self.field_widths = [
4, 4, 3, 7, 7, 7, 8, 8, 8, 8, 8, 7, 7, 7, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10
]
# Base data location url
self.base_url = 'https://spdf.gsfc.nasa.gov/pub/data/voyager/'
super(DataFetcher, self).__init__([])
def generateURL(self, spacecraft, in_year):
'''
Generate url for voyager data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_year: Input year (or 'metadata')
@return Url of data location
'''
num = spacecraft[-1]
url = self.base_url + 'voyager' + num + '/merged/'
if in_year == 'metadata':
url = url + 'vy' + num + 'mgd.txt'
else:
url = url + 'vy' + num + '_' + str(in_year) + '.asc'
return url
def parseVoyagerData(self, spacecraft, in_filename):
'''
Parse Voyager Data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_filename: Input voyager data filename
@return Pandas Dataframe of Voyager data
'''
def convert_date(year, day, hour):
'''
Convert to datetime
@param year: Input year
@param day: Input day
@param hour: Input hour
@return datetime
'''
return pd.to_datetime("{0:0>4}{1:0>3}{2:0>2}".format(year,day,hour), format='%Y%j%H')
# Voyager 1 has 3 less columns than Voyager 2
if spacecraft == 'voyager1':
field_widths = self.field_widths[:34]
field_names = self.field_names[:34]
else:
field_widths = self.field_widths
field_names = self.field_names
# Parse the data
data = pd.read_fwf(in_filename, widths=field_widths, header=None, names=field_names)
# Create date column
data['Date'] = list(map(convert_date,
data.loc[:,'Year'],
data.loc[:,'Day'],
data.loc[:,'Hour']))
data.set_index('Date', inplace=True)
return data
def parseVoyagerMetadata(self, in_file):
''' Parse voyager metadata
@param in_file: Input filename
@return Dictionary containing metadata
'''
with open(in_file,'r',errors='ignore') as metafile:
lines = metafile.readlines()
lines = [line.rstrip() for line in lines]
start_index = -1
end_index = -1
prev_line = None
for index, line in enumerate(lines):
if re.search('FORMAT DESCRIPTION',line):
start_index = index+4
if prev_line == '' and line == '' and start_index > -1:
end_index = index - 2
break
prev_line = line
description_data = lines[start_index:end_index+1]
field_index = 0
description_dict = OrderedDict()
for line in description_data:
if re.search('\s+[0-9]+', line[:6]):
info = re.split('\s\s+',line)[1:]
key = self.field_names[field_index]
description_dict[key] = OrderedDict()
description_dict[key]['MEANING'] = info[2]
description_dict[key]['UNITS/COMMENTS'] = info[3]
field_index += 1
elif line.strip() != '':
description_dict[key]['MEANING'] = description_dict[key]['MEANING'] + ' ' + line.strip()
return description_dict
def getMetadataFiles(self):
'''
Get path to metadata file
Metadata will download if necessary
@return List containing file path(s) for the metadata
'''
urls = [self.generateURL(spacecraft, 'metadata') for spacecraft in self.spacecraft_list]
return self.cacheData('voyager', urls)
def output(self):
'''
Generate data wrapper
@return data wrapper of voyager data
'''
# Generate url_list
url_list = []
for spacecraft in self.spacecraft_list:
url_list += [self.generateURL(spacecraft, 'metadata')]
url_list += [self.generateURL(spacecraft, year) for year in self.year_list]
full_filenames = self.cacheData('voyager', url_list)
num_files = len(self.year_list) + 1
# Parse downloaded data
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for index, spacecraft in enumerate(self.spacecraft_list):
# Need to select data for this spacecraft
filenames = full_filenames[num_files * index : num_files * (1+index)]
# parse data
metadata_dict[spacecraft] = self.parseVoyagerMetadata(filenames[0])
data_list = [self.parseVoyagerData(spacecraft, filename) for filename in filenames[1:]]
data_dict[spacecraft] = pd.concat(data_list)
return TableWrapper(data_dict, meta_data = metadata_dict, default_columns = ['BR','BT','BN']) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/voyager/data_fetcher.py | data_fetcher.py |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
# Standard library imports
from collections import OrderedDict
import re
# 3rd part imports
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, start_year, end_year, spacecraft='both'):
'''
Initialize Voyager data fetcher
@param start_year: Starting year
@param end_year: Ending year
@param spacecraft: Which spaceraft to use (voyager1, voyager2, or both).
'''
# Generate list of years for retrieving data
self.year_list = list(range(start_year, end_year+1))
# Create a list of spacecraft data to download
if spacecraft not in ('voyager1', 'voyager2', 'both'):
raise RuntimeError('Spacecraft not understood')
if spacecraft == 'both':
self.spacecraft_list = ['voyager1', 'voyager2']
else:
self.spacecraft_list = [spacecraft]
# Field names for parsing data
self.field_names = [
'Year', 'Day', 'Hour', 'Distance', 'Latitude', 'Longitude',
'Field_Magnitude_Average', 'Magnitude_of_Average_Field', 'BR', 'BT',
'BN', 'Flow_Speed', 'Theta', 'Phi', 'Proton_Density',
'Proton_Temperature', 'LECP_1', 'LECP_2', 'LECP_3', 'CRS_1', 'CRS_2',
'CRS_3', 'CRS_4', 'CRS_5', 'CRS_6', 'CRS_7', 'CRS_8', 'CRS_9',
'CRS_10', 'CRS_11', 'CRS_12', 'CRS_13', 'CRS_14', 'CRS_15', 'CRS_16',
'CRS_17', 'CRS_18',
]
# Field widths as the data is fixed width format
self.field_widths = [
4, 4, 3, 7, 7, 7, 8, 8, 8, 8, 8, 7, 7, 7, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10
]
# Base data location url
self.base_url = 'https://spdf.gsfc.nasa.gov/pub/data/voyager/'
super(DataFetcher, self).__init__([])
def generateURL(self, spacecraft, in_year):
'''
Generate url for voyager data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_year: Input year (or 'metadata')
@return Url of data location
'''
num = spacecraft[-1]
url = self.base_url + 'voyager' + num + '/merged/'
if in_year == 'metadata':
url = url + 'vy' + num + 'mgd.txt'
else:
url = url + 'vy' + num + '_' + str(in_year) + '.asc'
return url
def parseVoyagerData(self, spacecraft, in_filename):
'''
Parse Voyager Data
@param spacecraft: Voyager spacecraft (vy1 or vy2)
@param in_filename: Input voyager data filename
@return Pandas Dataframe of Voyager data
'''
def convert_date(year, day, hour):
'''
Convert to datetime
@param year: Input year
@param day: Input day
@param hour: Input hour
@return datetime
'''
return pd.to_datetime("{0:0>4}{1:0>3}{2:0>2}".format(year,day,hour), format='%Y%j%H')
# Voyager 1 has 3 less columns than Voyager 2
if spacecraft == 'voyager1':
field_widths = self.field_widths[:34]
field_names = self.field_names[:34]
else:
field_widths = self.field_widths
field_names = self.field_names
# Parse the data
data = pd.read_fwf(in_filename, widths=field_widths, header=None, names=field_names)
# Create date column
data['Date'] = list(map(convert_date,
data.loc[:,'Year'],
data.loc[:,'Day'],
data.loc[:,'Hour']))
data.set_index('Date', inplace=True)
return data
def parseVoyagerMetadata(self, in_file):
''' Parse voyager metadata
@param in_file: Input filename
@return Dictionary containing metadata
'''
with open(in_file,'r',errors='ignore') as metafile:
lines = metafile.readlines()
lines = [line.rstrip() for line in lines]
start_index = -1
end_index = -1
prev_line = None
for index, line in enumerate(lines):
if re.search('FORMAT DESCRIPTION',line):
start_index = index+4
if prev_line == '' and line == '' and start_index > -1:
end_index = index - 2
break
prev_line = line
description_data = lines[start_index:end_index+1]
field_index = 0
description_dict = OrderedDict()
for line in description_data:
if re.search('\s+[0-9]+', line[:6]):
info = re.split('\s\s+',line)[1:]
key = self.field_names[field_index]
description_dict[key] = OrderedDict()
description_dict[key]['MEANING'] = info[2]
description_dict[key]['UNITS/COMMENTS'] = info[3]
field_index += 1
elif line.strip() != '':
description_dict[key]['MEANING'] = description_dict[key]['MEANING'] + ' ' + line.strip()
return description_dict
def getMetadataFiles(self):
'''
Get path to metadata file
Metadata will download if necessary
@return List containing file path(s) for the metadata
'''
urls = [self.generateURL(spacecraft, 'metadata') for spacecraft in self.spacecraft_list]
return self.cacheData('voyager', urls)
def output(self):
'''
Generate data wrapper
@return data wrapper of voyager data
'''
# Generate url_list
url_list = []
for spacecraft in self.spacecraft_list:
url_list += [self.generateURL(spacecraft, 'metadata')]
url_list += [self.generateURL(spacecraft, year) for year in self.year_list]
full_filenames = self.cacheData('voyager', url_list)
num_files = len(self.year_list) + 1
# Parse downloaded data
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for index, spacecraft in enumerate(self.spacecraft_list):
# Need to select data for this spacecraft
filenames = full_filenames[num_files * index : num_files * (1+index)]
# parse data
metadata_dict[spacecraft] = self.parseVoyagerMetadata(filenames[0])
data_list = [self.parseVoyagerData(spacecraft, filename) for filename in filenames[1:]]
data_dict[spacecraft] = pd.concat(data_list)
return TableWrapper(data_dict, meta_data = metadata_dict, default_columns = ['BR','BT','BN']) | 0.680348 | 0.420243 |
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.tess_utils import parseTessData
# Standard library imports
from collections import OrderedDict
# Third pary imports
from astropy.io import fits
from astropy.table import Table
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherCache):
''' Data Fetcher for TESS data alerts '''
def __init__(self, ap_paramList, toi_information):
'''
Initialize TESS Data Fetcher
@param ap_paramList[tess_ids]: List of TESS IDs to retrieve
@param toi_information: Pandas dataframe containing target information
'''
self.toi_information = toi_information
super(DataFetcher, self).__init__(ap_paramList)
def getTargetInformation():
"""
Retrieve Target list information
@return Target information list
"""
pass
def generateURLFromTID(self, tid_list):
"""
Generate URL from TID
@param tid_list: Input Tess ID list
@return URL List of of objects in tid_list
"""
pass
def output(self):
"""
Retrieve Tess data
@return TableWrapper containing TESS lightcurves
"""
tid_series = pd.Series([int(tid) for tid in self.ap_paramList[0]()])
tid_string_list = [str(tid).zfill(16) for tid in tid_series]
tid_not_found = tid_series.isin(self.toi_information['tic_id'])
if np.count_nonzero(~tid_not_found) > 0:
raise RuntimeError("No data for TID: " + str(tid_series[~tid_not_found].tolist()))
url_list = self.generateURLFromTID(tid_string_list)
file_list = self.cacheData('tess', url_list)
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for filename, tid in zip(file_list, tid_string_list):
fits_data = fits.open(filename)
data_dict[tid], metadata_dict[tid] = parseTessData(fits_data)
return TableWrapper(data_dict, meta_data = metadata_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/tess/generic/cache.py | cache.py |
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.tess_utils import parseTessData
# Standard library imports
from collections import OrderedDict
# Third pary imports
from astropy.io import fits
from astropy.table import Table
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherCache):
''' Data Fetcher for TESS data alerts '''
def __init__(self, ap_paramList, toi_information):
'''
Initialize TESS Data Fetcher
@param ap_paramList[tess_ids]: List of TESS IDs to retrieve
@param toi_information: Pandas dataframe containing target information
'''
self.toi_information = toi_information
super(DataFetcher, self).__init__(ap_paramList)
def getTargetInformation():
"""
Retrieve Target list information
@return Target information list
"""
pass
def generateURLFromTID(self, tid_list):
"""
Generate URL from TID
@param tid_list: Input Tess ID list
@return URL List of of objects in tid_list
"""
pass
def output(self):
"""
Retrieve Tess data
@return TableWrapper containing TESS lightcurves
"""
tid_series = pd.Series([int(tid) for tid in self.ap_paramList[0]()])
tid_string_list = [str(tid).zfill(16) for tid in tid_series]
tid_not_found = tid_series.isin(self.toi_information['tic_id'])
if np.count_nonzero(~tid_not_found) > 0:
raise RuntimeError("No data for TID: " + str(tid_series[~tid_not_found].tolist()))
url_list = self.generateURLFromTID(tid_string_list)
file_list = self.cacheData('tess', url_list)
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for filename, tid in zip(file_list, tid_string_list):
fits_data = fits.open(filename)
data_dict[tid], metadata_dict[tid] = parseTessData(fits_data)
return TableWrapper(data_dict, meta_data = metadata_dict) | 0.636805 | 0.297285 |
# """@package Kepler
# Provides classes for accessing Kepler data.
# """
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.file_util import openPandasHDFStoreLocking
# Standard library imports
import re
import glob
import os
from collections import OrderedDict
from ftplib import FTP
from io import BytesIO
from tarfile import TarFile
# 3rd party package imports
import pandas as pd
import numpy as np
from astropy.table import Table
from astropy.io import fits
class DataFetcher(DataFetcherCache):
''' Data Fetcher for Kepler light curve data '''
def __init__(self, ap_paramList, quarter_list=None):
'''
Initialize Kepler Data Fetcher
@param ap_paramList[kepler_id_list]: List of kepler id's
@param quarter_list: List of quarters (0-17) (default: all quarters)
'''
self.quarter_list = quarter_list
super(DataFetcher, self).__init__(ap_paramList)
def _getKeplerFilePath(self):
'''
Get the path to the Kepler HDF file
This helper function is for backwards compatibility as data
locations for cached data are now all directories.
@return String containing the path to the Kepler HDF file
'''
data_location = DataFetcher.getDataLocation('kepler')
if os.path.split(data_location)[1] == 'kepler_data.h5':
data_file_name = data_location
else:
data_file_name = os.path.join(data_location, 'kepler_data.h5')
data_file_directory = os.path.split(data_file_name)[0]
if not os.path.isdir(data_file_directory):
os.makedirs(data_file_directory, exist_ok=True)
return data_file_name
def downloadKeplerData(self, kid_list):
'''
Download and parse Kepler data for a list of kepler id's
@param kid_list: List of Kepler ID's to download
@return dictionary of kepler data
'''
return_data = dict()
# connect to ftp server
ftp = FTP('archive.stsci.edu')
ftp.login()
# For each kepler id, download the appropriate data
for kid in kid_list:
ftp.cwd('/pub/kepler/lightcurves/' + kid[0:4] + '/' + kid)
file_list = ftp.nlst()
filename = None
for file in file_list:
match = re.match('kplr' + kid + '_lc_.*',file)
if match:
filename = match.group(0)
break
bio = BytesIO()
ftp.retrbinary('RETR ' + filename, bio.write)
bio.seek(0)
# Read tar file
tfile = tfile = TarFile(fileobj=bio)
member_list = [member for member in tfile.getmembers()]
# Extract data from tar file
data_list = []
for member in member_list:
file = tfile.extractfile(member)
fits_data = fits.open(file)
data = Table(fits_data[1].data).to_pandas()
data.set_index('CADENCENO',inplace=True)
data.loc[:,'QUARTER'] = fits_data[0].header['QUARTER']
data_list.append(data)
full_data = pd.concat(data_list)
return_data[kid] = full_data
try:
ftp.quit()
except:
ftp.close()
return return_data
def cacheData(self, data_specification):
'''
Cache Kepler data locally
@param data_specification: List of kepler IDs
'''
kid_list = data_specification
data_location = self._getKeplerFilePath()
store = openPandasHDFStoreLocking(data_location, 'a')
missing_kid_list = []
for kid in kid_list:
if 'kid_' + kid not in store:
missing_kid_list.append(kid)
if len(missing_kid_list) > 0:
print("Downloading data for " + str(len(missing_kid_list)) + " star(s)")
missing_kid_data = self.downloadKeplerData(missing_kid_list)
for kid,data in missing_kid_data.items():
store.put('kid_' + kid, data)
store.close()
def output(self):
'''
Output kepler data wrapper
@return DataWrapper
'''
kid_list = self.ap_paramList[0]()
kid_list = [ str(kid).zfill(9) for kid in kid_list ]
self.cacheData(kid_list)
data_location = self._getKeplerFilePath()
kid_data = dict()
store = openPandasHDFStoreLocking(data_location, 'r')
for kid in kid_list:
kid_data[kid] = store['kid_' + kid]
# If downloaded using old skdaccess version
# switch index
if kid_data[kid].index.name == 'TIME':
kid_data[kid]['TIME'] = kid_data[kid].index
kid_data[kid].set_index('CADENCENO', inplace=True)
store.close()
kid_data = OrderedDict(sorted(kid_data.items(), key=lambda t: t[0]))
# If a list of quarters is specified, only select data in those quarters
if self.quarter_list != None:
for kid in kid_list:
kid_data[kid] = kid_data[kid][kid_data[kid]['QUARTER'].isin(self.quarter_list)]
return TableWrapper(kid_data, default_columns = ['PDCSAP_FLUX'], default_error_columns = ['PDCSAP_FLUX_ERR']) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/astro/kepler/data_fetcher.py | data_fetcher.py |
# """@package Kepler
# Provides classes for accessing Kepler data.
# """
# mithagi required Base,Utils imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.file_util import openPandasHDFStoreLocking
# Standard library imports
import re
import glob
import os
from collections import OrderedDict
from ftplib import FTP
from io import BytesIO
from tarfile import TarFile
# 3rd party package imports
import pandas as pd
import numpy as np
from astropy.table import Table
from astropy.io import fits
class DataFetcher(DataFetcherCache):
''' Data Fetcher for Kepler light curve data '''
def __init__(self, ap_paramList, quarter_list=None):
'''
Initialize Kepler Data Fetcher
@param ap_paramList[kepler_id_list]: List of kepler id's
@param quarter_list: List of quarters (0-17) (default: all quarters)
'''
self.quarter_list = quarter_list
super(DataFetcher, self).__init__(ap_paramList)
def _getKeplerFilePath(self):
'''
Get the path to the Kepler HDF file
This helper function is for backwards compatibility as data
locations for cached data are now all directories.
@return String containing the path to the Kepler HDF file
'''
data_location = DataFetcher.getDataLocation('kepler')
if os.path.split(data_location)[1] == 'kepler_data.h5':
data_file_name = data_location
else:
data_file_name = os.path.join(data_location, 'kepler_data.h5')
data_file_directory = os.path.split(data_file_name)[0]
if not os.path.isdir(data_file_directory):
os.makedirs(data_file_directory, exist_ok=True)
return data_file_name
def downloadKeplerData(self, kid_list):
'''
Download and parse Kepler data for a list of kepler id's
@param kid_list: List of Kepler ID's to download
@return dictionary of kepler data
'''
return_data = dict()
# connect to ftp server
ftp = FTP('archive.stsci.edu')
ftp.login()
# For each kepler id, download the appropriate data
for kid in kid_list:
ftp.cwd('/pub/kepler/lightcurves/' + kid[0:4] + '/' + kid)
file_list = ftp.nlst()
filename = None
for file in file_list:
match = re.match('kplr' + kid + '_lc_.*',file)
if match:
filename = match.group(0)
break
bio = BytesIO()
ftp.retrbinary('RETR ' + filename, bio.write)
bio.seek(0)
# Read tar file
tfile = tfile = TarFile(fileobj=bio)
member_list = [member for member in tfile.getmembers()]
# Extract data from tar file
data_list = []
for member in member_list:
file = tfile.extractfile(member)
fits_data = fits.open(file)
data = Table(fits_data[1].data).to_pandas()
data.set_index('CADENCENO',inplace=True)
data.loc[:,'QUARTER'] = fits_data[0].header['QUARTER']
data_list.append(data)
full_data = pd.concat(data_list)
return_data[kid] = full_data
try:
ftp.quit()
except:
ftp.close()
return return_data
def cacheData(self, data_specification):
'''
Cache Kepler data locally
@param data_specification: List of kepler IDs
'''
kid_list = data_specification
data_location = self._getKeplerFilePath()
store = openPandasHDFStoreLocking(data_location, 'a')
missing_kid_list = []
for kid in kid_list:
if 'kid_' + kid not in store:
missing_kid_list.append(kid)
if len(missing_kid_list) > 0:
print("Downloading data for " + str(len(missing_kid_list)) + " star(s)")
missing_kid_data = self.downloadKeplerData(missing_kid_list)
for kid,data in missing_kid_data.items():
store.put('kid_' + kid, data)
store.close()
def output(self):
'''
Output kepler data wrapper
@return DataWrapper
'''
kid_list = self.ap_paramList[0]()
kid_list = [ str(kid).zfill(9) for kid in kid_list ]
self.cacheData(kid_list)
data_location = self._getKeplerFilePath()
kid_data = dict()
store = openPandasHDFStoreLocking(data_location, 'r')
for kid in kid_list:
kid_data[kid] = store['kid_' + kid]
# If downloaded using old skdaccess version
# switch index
if kid_data[kid].index.name == 'TIME':
kid_data[kid]['TIME'] = kid_data[kid].index
kid_data[kid].set_index('CADENCENO', inplace=True)
store.close()
kid_data = OrderedDict(sorted(kid_data.items(), key=lambda t: t[0]))
# If a list of quarters is specified, only select data in those quarters
if self.quarter_list != None:
for kid in kid_list:
kid_data[kid] = kid_data[kid][kid_data[kid]['QUARTER'].isin(self.quarter_list)]
return TableWrapper(kid_data, default_columns = ['PDCSAP_FLUX'], default_error_columns = ['PDCSAP_FLUX_ERR']) | 0.602529 | 0.340266 |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Land hydrology model produced by NASA<br>
https://grace.jpl.nasa.gov/data/get-data/land-water-content/
```
from skdaccess.geo.gldas import DataFetcher as GLDAS_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
gldas_fetcher = GLDAS_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
data_wrapper = gldas_fetcher.output() # Get a data wrapper
label, data = next(data_wrapper.getIterator()) # Get GLDAS data
data.head()
plt.plot(data['Equivalent Water Thickness (cm)']);
plt.xticks(rotation=15);
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_GLDAS.ipynb | Demo_GLDAS.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
from skdaccess.geo.gldas import DataFetcher as GLDAS_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
gldas_fetcher = GLDAS_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
data_wrapper = gldas_fetcher.output() # Get a data wrapper
label, data = next(data_wrapper.getIterator()) # Get GLDAS data
data.head()
plt.plot(data['Equivalent Water Thickness (cm)']);
plt.xticks(rotation=15); | 0.514644 | 0.564459 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Rinex files from Mahali 2015 Alaska Experiment<br>
MIT led NSF project studying the Earth’s ionosphere with GPS<br>
http://mahali.mit.edu/
```
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.rinex import DataFetcher as MAHALI_DF
station_list = AutoList(['mh06']) # List of stations to download
mahali_df = MAHALI_DF([station_list], start_date='2015-10-10', end_date='2015-10-10')
data_wrapper = mahali_df.output() # Get a data wrapper
site, date, nav, obs = next(data_wrapper.getIterator()) # Get data location
site, date, nav, obs # Print information about downloaded data
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Mahali_Rinex.ipynb | Demo_Mahali_Rinex.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.rinex import DataFetcher as MAHALI_DF
station_list = AutoList(['mh06']) # List of stations to download
mahali_df = MAHALI_DF([station_list], start_date='2015-10-10', end_date='2015-10-10')
data_wrapper = mahali_df.output() # Get a data wrapper
site, date, nav, obs = next(data_wrapper.getIterator()) # Get data location
site, date, nav, obs # Print information about downloaded data | 0.352759 | 0.595257 |
The MIT License (MIT)<br>
Copyright (c) 2018 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
Initial imports
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.framework.param_class import *
from skdaccess.finance.timeseries.stream import DataFetcher
```
Select which symbol to retrieve
```
stock_ap_list = AutoList(['SPY'])
```
Create a data fetcher
```
stockdf = DataFetcher([stock_ap_list], 'daily', '2017-06-01')
```
Access the data
```
dw = stockdf.output()
label, data = next(dw.getIterator())
```
List the columns of the data
```
data.columns
```
Plot the closing price
```
ax = data['4. close'].plot(title='SPY daily closing price');
ax.set_ylabel('price');
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Finance_Time_Series.ipynb | Demo_Finance_Time_Series.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.framework.param_class import *
from skdaccess.finance.timeseries.stream import DataFetcher
stock_ap_list = AutoList(['SPY'])
stockdf = DataFetcher([stock_ap_list], 'daily', '2017-06-01')
dw = stockdf.output()
label, data = next(dw.getIterator())
data.columns
ax = data['4. close'].plot(title='SPY daily closing price');
ax.set_ylabel('price'); | 0.38122 | 0.641801 |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
# Plate Boundary Observatory GPS Data
# Source: http://www.unavco.org/instrumentation/networks/status/pbo
# Time series data for GPS sensors (North, East, Up), displacement in meters versus time
from skdaccess.geo.pbo import DataFetcher as PBO_DF
from skdaccess.framework.param_class import *
%matplotlib notebook
import matplotlib.pyplot as plt
# Latitude and Longitude range around Akutan Volcano
lat_range = AutoList((54,54.25))
lon_range = AutoList((-166, -165.6))
start_time = '2006-01-01'
end_time = '2015-06-01'
PBO_data_fetcher = PBO_DF(start_time, end_time, [lat_range, lon_range],mdyratio=.7)
PBO_data = PBO_data_fetcher.output().get() # returns an ordered dictionary of data frames
PBO_data['AV06'].head()
plt.figure();
plt.plot(PBO_data['AV06']['dN']);
plt.tight_layout()
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_PBO.ipynb | Demo_PBO.ipynb | # Plate Boundary Observatory GPS Data
# Source: http://www.unavco.org/instrumentation/networks/status/pbo
# Time series data for GPS sensors (North, East, Up), displacement in meters versus time
from skdaccess.geo.pbo import DataFetcher as PBO_DF
from skdaccess.framework.param_class import *
%matplotlib notebook
import matplotlib.pyplot as plt
# Latitude and Longitude range around Akutan Volcano
lat_range = AutoList((54,54.25))
lon_range = AutoList((-166, -165.6))
start_time = '2006-01-01'
end_time = '2015-06-01'
PBO_data_fetcher = PBO_DF(start_time, end_time, [lat_range, lon_range],mdyratio=.7)
PBO_data = PBO_data_fetcher.output().get() # returns an ordered dictionary of data frames
PBO_data['AV06'].head()
plt.figure();
plt.plot(PBO_data['AV06']['dN']);
plt.tight_layout() | 0.818047 | 0.566798 |
The MIT License (MIT)<br>
Copyright (c) 2018 Massachusetts Institute of Technology<br>
Authors: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
```
TESS End-to-End 6 Simulated Light Curve Time Series<br>
Source: https://archive.stsci.edu/tess/ete-6.html
```
from skdaccess.astro.tess.simulated.cache import DataFetcher as TESS_DF
from skdaccess.framework.param_class import *
import numpy as np
tess_fetcher = TESS_DF([AutoList([376664523])])
tess_dw = tess_fetcher.output()
label, data = next(tess_dw.getIterator())
```
Normalize flux
```
valid_index = data['PDCSAP_FLUX'] != 0.0
data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'] = data.loc[valid_index, 'PDCSAP_FLUX'] / np.median(data.loc[valid_index, 'PDCSAP_FLUX'])
```
Plot Relative PDCSAP Flux vs time
```
plt.gcf().set_size_inches(6,2);
plt.scatter(data.loc[valid_index, 'TIME'], data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'], s=2, edgecolor='none');
plt.xlabel('Time');
plt.ylabel('Relative PDCSAP Flux');
plt.title('Simulated Data TID: ' + str(int(label)));
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_TESS_Simulated_Data.ipynb | Demo_TESS_Simulated_Data.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.astro.tess.simulated.cache import DataFetcher as TESS_DF
from skdaccess.framework.param_class import *
import numpy as np
tess_fetcher = TESS_DF([AutoList([376664523])])
tess_dw = tess_fetcher.output()
label, data = next(tess_dw.getIterator())
valid_index = data['PDCSAP_FLUX'] != 0.0
data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'] = data.loc[valid_index, 'PDCSAP_FLUX'] / np.median(data.loc[valid_index, 'PDCSAP_FLUX'])
plt.gcf().set_size_inches(6,2);
plt.scatter(data.loc[valid_index, 'TIME'], data.loc[valid_index, 'RELATIVE_PDCSAP_FLUX'], s=2, edgecolor='none');
plt.xlabel('Time');
plt.ylabel('Relative PDCSAP Flux');
plt.title('Simulated Data TID: ' + str(int(label))); | 0.403097 | 0.647534 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['figure.figsize'] = (14,14)
```
Temperature data from Mahali 2015 Alaska Experiment (Temperature sensor inside Mahali box) <br>
MIT led NSF project studying the Earth’s ionosphere with GPS<br>
http://mahali.mit.edu/
```
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.temperature import DataFetcher as MHDF
station_list = [ 'mh02', 'mh03', 'mh04', 'mh05', 'mh06', 'mh07', 'mh08', 'mh09', 'mh13']
mhdf = MHDF([AutoList(station_list)])
dw = mhdf.output()
label = 'mh06'
data = dw.get()['mh06']
plt.plot(data['Temperature'],'o',markersize=1);
plt.title(label,fontsize=14);
plt.ylabel('Temperature + (C)', fontsize=14);
plt.xticks(rotation=15);
plt.gcf().set_size_inches(14,4)
def plotAllStations(start_date, end_date):
plt.gcf().set_size_inches(12,9)
for index, (label, data) in enumerate(dw.getIterator()):
plt.subplot(3,3, index+1)
plt.title(label);
plt.ylabel('Temperature (C)');
plt.xticks(rotation=25);
plt.plot(data['Temperature'],'o',markersize=1);
plt.xlim(start_date, end_date)
plt.tight_layout()
plotAllStations(pd.to_datetime('2015-09-28'),pd.to_datetime('2015-11-13'))
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Mahali_Temperature.ipynb | Demo_Mahali_Temperature.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['figure.figsize'] = (14,14)
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.temperature import DataFetcher as MHDF
station_list = [ 'mh02', 'mh03', 'mh04', 'mh05', 'mh06', 'mh07', 'mh08', 'mh09', 'mh13']
mhdf = MHDF([AutoList(station_list)])
dw = mhdf.output()
label = 'mh06'
data = dw.get()['mh06']
plt.plot(data['Temperature'],'o',markersize=1);
plt.title(label,fontsize=14);
plt.ylabel('Temperature + (C)', fontsize=14);
plt.xticks(rotation=15);
plt.gcf().set_size_inches(14,4)
def plotAllStations(start_date, end_date):
plt.gcf().set_size_inches(12,9)
for index, (label, data) in enumerate(dw.getIterator()):
plt.subplot(3,3, index+1)
plt.title(label);
plt.ylabel('Temperature (C)');
plt.xticks(rotation=25);
plt.plot(data['Temperature'],'o',markersize=1);
plt.xlim(start_date, end_date)
plt.tight_layout()
plotAllStations(pd.to_datetime('2015-09-28'),pd.to_datetime('2015-11-13')) | 0.518546 | 0.658143 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
----------------
### Data Citation
European Centre for Medium-Range Weather Forecasts (2009): ERA-Interim Project. Research Data Archive at the National Center for Atmospheric Research, Computational and Information Systems Laboratory. https://doi.org/10.5065/D6CR5RD9.
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from getpass import getpass
import pandas as pd
from skdaccess.framework.param_class import *
from skdaccess.geo.era_interim.cache import DataFetcher as EDF
```
Specify list of dates
```
date_list = pd.date_range('2015-06-06 00:00:00', '2015-06-06 06:00:00', freq='6H')
```
Enter Research Data Archive (NCAR) credentials
```
username='Enter username'
password = getpass()
```
Create data fetcher
```
edf = EDF(date_list=date_list, data_names=['Geopotential','Temperature'],
username=username, password=password)
```
Access data
```
edw = edf.output()
iterator = edw.getIterator()
geo_label, geo_data = next(iterator)
temp_label, temp_data = next(iterator)
```
Plot temperature data
```
plt.figure(figsize=(5,3.75));
plt.plot(temp_data[0,:,75,350], temp_data['pressure']);
plt.gca().invert_yaxis();
plt.ylabel('Pressure');
plt.xlabel('Temperature');
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_ERA_Interim.ipynb | Demo_ERA_Interim.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from getpass import getpass
import pandas as pd
from skdaccess.framework.param_class import *
from skdaccess.geo.era_interim.cache import DataFetcher as EDF
date_list = pd.date_range('2015-06-06 00:00:00', '2015-06-06 06:00:00', freq='6H')
username='Enter username'
password = getpass()
edf = EDF(date_list=date_list, data_names=['Geopotential','Temperature'],
username=username, password=password)
edw = edf.output()
iterator = edw.getIterator()
geo_label, geo_data = next(iterator)
temp_label, temp_data = next(iterator)
plt.figure(figsize=(5,3.75));
plt.plot(temp_data[0,:,75,350], temp_data['pressure']);
plt.gca().invert_yaxis();
plt.ylabel('Pressure');
plt.xlabel('Temperature'); | 0.364099 | 0.650322 |
The MIT License (MIT)<br>
Copyright (c) 2016, 2017, 2018 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi']=150
# Gravity Recovery and Climate Experiment (GRACE) Data
# Source: http://grace.jpl.nasa.gov/
# Current surface mass change data, measuring equivalent water thickness in cm, versus time
# This data fetcher uses results from the Mascon solutions
from skdaccess.geo.grace.mascon.cache import DataFetcher as GR_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
grace_fetcher = GR_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
grace_data_wrapper = grace_fetcher.output() # Get a data wrapper
grace_label, grace_data = next(grace_data_wrapper.getIterator())# Get GRACE data
grace_data.head()
```
Get scale factor
```
scale_factor = grace_data_wrapper.info(grace_label)['scale_factor']
```
Plot EWD $\times$ scale factor
```
plt.plot(grace_data['EWD']*scale_factor);
plt.xticks(rotation=35);
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_GRACE_Mascon.ipynb | Demo_GRACE_Mascon.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi']=150
# Gravity Recovery and Climate Experiment (GRACE) Data
# Source: http://grace.jpl.nasa.gov/
# Current surface mass change data, measuring equivalent water thickness in cm, versus time
# This data fetcher uses results from the Mascon solutions
from skdaccess.geo.grace.mascon.cache import DataFetcher as GR_DF
from skdaccess.framework.param_class import *
geo_point = AutoList([(38, -117)]) # location in Nevada
grace_fetcher = GR_DF([geo_point],start_date='2010-01-01',end_date='2014-01-01')
grace_data_wrapper = grace_fetcher.output() # Get a data wrapper
grace_label, grace_data = next(grace_data_wrapper.getIterator())# Get GRACE data
grace_data.head()
scale_factor = grace_data_wrapper.info(grace_label)['scale_factor']
plt.plot(grace_data['EWD']*scale_factor);
plt.xticks(rotation=35); | 0.707506 | 0.567607 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.framework.param_class import *
from skdaccess.geo.wyoming_sounding.cache import DataFetcher
```
Create a data fetcher
```
sdf = DataFetcher(station_number='72493', year=2014, month=5, day_start=30, day_end=30, start_hour=12, end_hour=12)
```
Access data
```
dw = sdf.output()
label, data = next(dw.getIterator())
data.head()
plt.figure(figsize=(5,3.75))
plt.plot(data['TEMP'],data['HGHT']);
plt.ylabel('Height');
plt.xlabel('Temperature');
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Wyoming_Sounding.ipynb | Demo_Wyoming_Sounding.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.framework.param_class import *
from skdaccess.geo.wyoming_sounding.cache import DataFetcher
sdf = DataFetcher(station_number='72493', year=2014, month=5, day_start=30, day_end=30, start_hour=12, end_hour=12)
dw = sdf.output()
label, data = next(dw.getIterator())
data.head()
plt.figure(figsize=(5,3.75))
plt.plot(data['TEMP'],data['HGHT']);
plt.ylabel('Height');
plt.xlabel('Temperature'); | 0.456168 | 0.615232 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Rinex files from Mahali Alaska 2015 Experiment<br>
MIT led NSF project studying the Earth’s ionosphere with GPS<br>
http://mahali.mit.edu/
```
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.rinex import DataFetcher as MAHALI_DF
station_list = AutoList(['mh06','mh08']) # List of stations
mahali_df = MAHALI_DF([station_list], start_date='2015-10-10', end_date='2015-10-12', generate_links=True)
data_wrapper = mahali_df.output() # Get a data wrapper
```
Generate download links for files (only works in jupyter notebooks)
```
for site, date, nav, obs in data_wrapper.getIterator():
print(site, date.strftime('%Y-%m-%d'), nav, obs, sep='\n',end='\n\n')
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Mahali_Rinex_Links.ipynb | Demo_Mahali_Rinex_Links.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
from skdaccess.framework.param_class import *
from skdaccess.geo.mahali.rinex import DataFetcher as MAHALI_DF
station_list = AutoList(['mh06','mh08']) # List of stations
mahali_df = MAHALI_DF([station_list], start_date='2015-10-10', end_date='2015-10-12', generate_links=True)
data_wrapper = mahali_df.output() # Get a data wrapper
for site, date, nav, obs in data_wrapper.getIterator():
print(site, date.strftime('%Y-%m-%d'), nav, obs, sep='\n',end='\n\n') | 0.33372 | 0.582432 |
The MIT License (MIT)<br>
Copyright (c) 2018 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
import pandas as pd
# LA Traffic count data between 2014-01-14 and 2014-01-16
# Source: https://data.lacity.org/A-Livable-and-Sustainable-City/LADOT-Traffic-Counts-Summary/94wu-3ps3
from skdaccess.engineering.la.traffic_counts.stream import DataFetcher as TrafficDF
from skdaccess.framework.param_class import *
# Create traffic count data fetcher
# Note, use paramter app_token to supply an application token to prevent throttling
# See: https://dev.socrata.com/docs/app-tokens.html
traffic_df = TrafficDF(start_time='2014-01-14', end_time='2014-01-16')
# Create a data wapper
traffic_dw = traffic_df.output()
# Retrieve results
label, data = next(traffic_dw.getIterator())
# Select rows with east bound traffic on 2014-01-14
date = pd.to_datetime('2014-01-14')
cut_data = data[data.count_date == date]
cut_data = cut_data[cut_data.e_b != 0]
# Create plot
plt.title('East Bound on {}'.format(date.strftime('%Y-%m-%d')));
plt.ylabel('Count');
tick_labels = cut_data.primary_street + ' and ' + cut_data.cross_street
plt.bar(x=range(1,4), height=cut_data.e_b, tick_label=tick_labels);
plt.xticks(rotation=15);
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Traffic_Counts.ipynb | Demo_Traffic_Counts.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
import pandas as pd
# LA Traffic count data between 2014-01-14 and 2014-01-16
# Source: https://data.lacity.org/A-Livable-and-Sustainable-City/LADOT-Traffic-Counts-Summary/94wu-3ps3
from skdaccess.engineering.la.traffic_counts.stream import DataFetcher as TrafficDF
from skdaccess.framework.param_class import *
# Create traffic count data fetcher
# Note, use paramter app_token to supply an application token to prevent throttling
# See: https://dev.socrata.com/docs/app-tokens.html
traffic_df = TrafficDF(start_time='2014-01-14', end_time='2014-01-16')
# Create a data wapper
traffic_dw = traffic_df.output()
# Retrieve results
label, data = next(traffic_dw.getIterator())
# Select rows with east bound traffic on 2014-01-14
date = pd.to_datetime('2014-01-14')
cut_data = data[data.count_date == date]
cut_data = cut_data[cut_data.e_b != 0]
# Create plot
plt.title('East Bound on {}'.format(date.strftime('%Y-%m-%d')));
plt.ylabel('Count');
tick_labels = cut_data.primary_street + ' and ' + cut_data.cross_street
plt.bar(x=range(1,4), height=cut_data.e_b, tick_label=tick_labels);
plt.xticks(rotation=15); | 0.737158 | 0.582135 |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Voyager mission data
Data is retrieved from the Space Physics Data Facility https://spdf.gsfc.nasa.gov/
```
from skdaccess.astro.voyager import DataFetcher
df = DataFetcher(1980,1981,spacecraft='voyager1')
df.getMetadataFiles()
dw = df.output()
it = dw.getIterator()
label, data = next(it)
plt.plot(data.loc[data['BT']<900,'BT'],'.');
plt.title(label)
plt.ylabel(dw.info(label)['BT']['MEANING'] + '\n'
+ dw.info(label)['BT']['UNITS/COMMENTS'])
plt.xticks(rotation=20);
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Voyager.ipynb | Demo_Voyager.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
from skdaccess.astro.voyager import DataFetcher
df = DataFetcher(1980,1981,spacecraft='voyager1')
df.getMetadataFiles()
dw = df.output()
it = dw.getIterator()
label, data = next(it)
plt.plot(data.loc[data['BT']<900,'BT'],'.');
plt.title(label)
plt.ylabel(dw.info(label)['BT']['MEANING'] + '\n'
+ dw.info(label)['BT']['UNITS/COMMENTS'])
plt.xticks(rotation=20); | 0.402862 | 0.563738 |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
# USGS Groundwater Data - 129 Monitoring Wells in CA between 2010 and 2014
# Source: http://water.usgs.gov/ogw/
# Returns depth to water level in meters
from skdaccess.geo.groundwater import DataFetcher as GW_DF
from skdaccess.framework.param_class import *
groundwater_fetcher = GW_DF([AutoList([323313117033902])], start_date='2010-01-01',end_date='2014-01-01')
groundwater_data = groundwater_fetcher.output().get() # returns a pandas data panel
# Plotting Well Number 323313117033902
plt.figure();
plt.plot(groundwater_data[323313117033902]['Median Depth to Water']);
plt.tight_layout()
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Groundwater.ipynb | Demo_Groundwater.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
# USGS Groundwater Data - 129 Monitoring Wells in CA between 2010 and 2014
# Source: http://water.usgs.gov/ogw/
# Returns depth to water level in meters
from skdaccess.geo.groundwater import DataFetcher as GW_DF
from skdaccess.framework.param_class import *
groundwater_fetcher = GW_DF([AutoList([323313117033902])], start_date='2010-01-01',end_date='2014-01-01')
groundwater_data = groundwater_fetcher.output().get() # returns a pandas data panel
# Plotting Well Number 323313117033902
plt.figure();
plt.plot(groundwater_data[323313117033902]['Median Depth to Water']);
plt.tight_layout() | 0.719581 | 0.530784 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
```
Geomagnetic data from the USGS
The results presented in this document rely on data collected at magnetic observatories operated by the U.S. Geological Survey (USGS, geomag.usgs.gov).
```
from skdaccess.framework.param_class import *
from skdaccess.geo.magnetometer import DataFetcher
geomag_df = DataFetcher([AutoList(['BOU'])], start_time='2015-11-01',end_time=('2015-11-02'))
dw = geomag_df.output()
label, data = next(dw.getIterator())
plt.plot(data['X']);
plt.ylabel('nT');
plt.title('X');
plt.xticks(rotation=15);
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Magnetometer.ipynb | Demo_Magnetometer.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
from skdaccess.framework.param_class import *
from skdaccess.geo.magnetometer import DataFetcher
geomag_df = DataFetcher([AutoList(['BOU'])], start_time='2015-11-01',end_time=('2015-11-02'))
dw = geomag_df.output()
label, data = next(dw.getIterator())
plt.plot(data['X']);
plt.ylabel('nT');
plt.title('X');
plt.xticks(rotation=15); | 0.515132 | 0.599514 |
The MIT License (MIT)<br>
Copyright (c) 2016,2017 Massachusetts Institute of Technology<br>
Authors: Justin Li, Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
%matplotlib notebook
import matplotlib.pyplot as plt
# Kepler Exoplanet Light Curve Time Series
# Source: http://keplerscience.arc.nasa.gov
# Light curve in relative flux versus phase
from skdaccess.astro.kepler import DataFetcher as Kepler_DF
from skdaccess.utilities.kepler_util import normalize
from skdaccess.framework.param_class import *
import numpy as np
kepler_fetcher = Kepler_DF([AutoList(['009941662'])])
kepler_data = kepler_fetcher.output().get()
normalize(kepler_data['009941662'])
kepler_data['009941662'].head()
plt.figure(figsize=(8,4));
data = kepler_data['009941662'].iloc[0:1000]
plt.plot(np.array(data['TIME']) % 1.7636, data['PDCSAP_FLUX'],'.');
plt.tight_layout();
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_Kepler.ipynb | Demo_Kepler.ipynb | %matplotlib notebook
import matplotlib.pyplot as plt
# Kepler Exoplanet Light Curve Time Series
# Source: http://keplerscience.arc.nasa.gov
# Light curve in relative flux versus phase
from skdaccess.astro.kepler import DataFetcher as Kepler_DF
from skdaccess.utilities.kepler_util import normalize
from skdaccess.framework.param_class import *
import numpy as np
kepler_fetcher = Kepler_DF([AutoList(['009941662'])])
kepler_data = kepler_fetcher.output().get()
normalize(kepler_data['009941662'])
kepler_data['009941662'].head()
plt.figure(figsize=(8,4));
data = kepler_data['009941662'].iloc[0:1000]
plt.plot(np.array(data['TIME']) % 1.7636, data['PDCSAP_FLUX'],'.');
plt.tight_layout(); | 0.749821 | 0.565539 |
The MIT License (MIT)<br>
Copyright (c) 2018 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
Setup plotting libraries
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
```
Import data fetcher
```
from skdaccess.framework.param_class import *
from skdaccess.astro.spectra.stream import DataFetcher
```
Specify list of SDSS spectra URLs to retrieve
```
ap_spectra_url = AutoList([
'https://dr14.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/lite/4055/spec-4055-55359-0596.fits',
])
```
Create data fetcher
```
df = DataFetcher([ap_spectra_url])
```
Access data and metadata
```
dw = df.output()
label, data = next(dw.getIterator())
header = dw.info(label)
```
Plot spectra
```
plt.plot(10**data['loglam'], data['flux']);
plt.title(label.split('/')[-1]);
plt.ylabel('Flux ({})'.format(header['BUNIT']));
plt.xlabel('Wavelength (Ångströms)');
```
| scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/examples/Demo_SDSS_Spectra.ipynb | Demo_SDSS_Spectra.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 150
from skdaccess.framework.param_class import *
from skdaccess.astro.spectra.stream import DataFetcher
ap_spectra_url = AutoList([
'https://dr14.sdss.org/sas/dr14/eboss/spectro/redux/v5_10_0/spectra/lite/4055/spec-4055-55359-0596.fits',
])
df = DataFetcher([ap_spectra_url])
dw = df.output()
label, data = next(dw.getIterator())
header = dw.info(label)
plt.plot(10**data['loglam'], data['flux']);
plt.title(label.split('/')[-1]);
plt.ylabel('Flux ({})'.format(header['BUNIT']));
plt.xlabel('Wavelength (Ångströms)'); | 0.4436 | 0.642713 |
# """@package DataClass
# Provides base data classes inherited by the specific data fetchers
# """
# Standard library imports
import os
import pathlib
from glob import glob
from urllib import request, parse
import shutil
from collections import OrderedDict
import warnings
from urllib.request import HTTPPasswordMgrWithDefaultRealm
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPCookieProcessor
from urllib.request import build_opener, install_opener, urlopen
from io import BytesIO
from http.cookiejar import CookieJar
import fcntl
import struct
# Compatability imports for standard library
from six.moves import configparser
from six.moves.configparser import NoOptionError, NoSectionError
# 3rd party imports
from tqdm import tqdm
from skimage.io import imread
from astropy.io import fits
from atomicwrites import atomic_write
import requests
class DataFetcherBase(object):
'''
Base class for all data fetchers
'''
def __init__(self, ap_paramList=[], verbose=False):
'''
Initialize data fetcher with parameter list
@param ap_paramList: List of parameters
@param verbose: Output extra information
'''
self.ap_paramList = ap_paramList
self.verbose = verbose
def output(self):
'''
Output data wrapper
@return Datawrapper
'''
pass
def perturb(self):
'''Perturb parameters'''
for param in self.ap_paramList:
param.perturb()
def reset(self):
'''Set all parameters to initial value'''
for param in self.ap_paramList:
param.reset()
def __str__(self):
''' Generate string description'''
return str( [ str(item) for item in self.ap_paramList ] )
def getMetadata(self):
'''
Return metadata about Data Fetcher
@return metadata of object.
'''
return str(self)
def getConfig():
'''
Retrieve skdaccess configuration
@return configParser.ConfigParser object of configuration
'''
config_location = os.path.join(os.path.expanduser('~'), '.skdaccess.conf')
conf = configparser.ConfigParser()
conf.read(config_location)
return conf
def getConfigItem(section, key):
"""
Retrieve skdaccess configuration item
@param section: Section of configuration item
@param key: Configuration key value
@return Requested configuration item or None if it doesn't exist
"""
conf = DataFetcherBase.getConfig()
if section in conf:
return conf.get(section, key, fallback=None)
else:
return None
def writeConfigItem(section, key, value):
"""
Retrieve skdaccess configuration item
@param section: Section of configuration item
@param key: Configuration key value
@param value: Value to be written
@return Requested configuration item or None if it doesn't exist
"""
conf = DataFetcherBase.getConfig()
if section not in conf:
conf.add_section(section)
conf.set(section, key, value)
DataFetcherBase.writeConfig(conf)
def writeConfig(conf):
'''
Write config to disk
@param conf: configparser.ConfigParser object
'''
config_location = os.path.join(os.path.expanduser('~'), '.skdaccess.conf')
config_handle = open(config_location, "w")
conf.write(config_handle)
config_handle.close()
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
pass
def verbose_print(self, *args, **kwargs):
"""
Print statement if verbose flag is set
@param *args: Arguments to pass to print
@param **kwargs: Keyword arguments to pass to print
"""
if self.verbose:
print(*args, **kwargs)
class DataFetcherLocal(DataFetcherBase):
''' Data fetcher base class for use when storing data locally'''
def getDataLocation(data_name):
'''
Get the location of data set
@param data_name: Name of data set
@return string of data location, None if not found
'''
data_name = str.lower(data_name)
conf = DataFetcherLocal.getConfig()
try:
return conf.get(data_name, 'data_location')
except (NoOptionError, NoSectionError):
# If it doesn't exist, create a new one
# Check if an alternate root has been defined
try:
data_location = os.path.join(conf.get('skdaccess', 'root'), data_name)
except (NoOptionError, NoSectionError):
data_location = os.path.join(os.path.expanduser('~'), '.skdaccess', data_name)
# Make directory and set location
os.makedirs(data_location, exist_ok=True)
DataFetcherLocal.setDataLocation(data_name, data_location)
return data_location
def setDataLocation(data_name, location, key='data_location'):
'''
Set the location of a data set
@param data_name: Name of data set
@param location: Location of data set
@param key: Key of configuration option
'''
conf = DataFetcherLocal.getConfig()
if not conf.has_section(data_name):
conf.add_section(data_name)
conf.set(data_name, key, location)
DataFetcherLocal.writeConfig(conf)
class DataFetcherStorage(DataFetcherLocal):
''' Data fetcher base class for use when entire data set is downloaded'''
@classmethod
def downloadFullDataset(cls, out_file, use_file=None):
'''
Abstract function used to download full data set
@param out_file: output file name
@param use_file: Use previously downloaded data
@return Absolute path of parsed data
'''
pass
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return True
class DataFetcherStream(DataFetcherBase):
'''
Data fetcher base class for downloading data into memory
'''
def retrieveOnlineData(self, data_specification):
'''
Method for downloading data into memory
@param data_specification: Url list of data to be retrieved
@return Retrieved data
'''
# Dictionary to store results
data_dict = OrderedDict()
metadata_dict = OrderedDict()
# Parse data
for url in data_specification:
# Get http data type
with urlopen(url) as url_access:
content_type = url_access.info().get_content_type()
# Access fits file
if content_type == 'application/fits':
# Do not want caching to avoid issues when running multiple pipelines
bytes_data = BytesIO(url_access.read())
with warnings.catch_warnings(), fits.open(bytes_data, cache=False) as hdu_list:
warnings.simplefilter("ignore", fits.verify.VerifyWarning)
# Need to fix header otherwise astropy can fail to read data
hdu_list.verify('fix')
data_dict[url] = hdu_list[1].data
metadata_dict[url] = hdu_list[1].header
# Access jpg file
elif content_type == 'image/jpeg':
data_dict[url] = imread(url)
metadata_dict[url] = None
# Throw warning if content_type not understood
else:
raise RuntimeError('Did not understand content type: ' + content_type)
return metadata_dict, data_dict
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return True
class DataFetcherCache(DataFetcherLocal):
'''
Data fetcher base class for downloading data and caching results on hard disk
'''
def checkIfDataExists(self, in_file_name):
'''
Checks if the file exists on the filesystem and the file is not empty
@param in_file_name: Input filename to test
@return True if data exists and False otherwise
'''
try:
with open(in_file_name, 'rb') as read_file:
rv = fcntl.fcntl(read_file, fcntl.LOCK_SH)
first_byte = read_file.read(1)
if len(first_byte) == 0:
return False
else:
return True
except FileNotFoundError:
return False
def cacheData(self, keyname, online_path_list, username=None, password=None, authentication_url=None,
cookiejar = None, use_requests=False, use_progress_bar=True):
'''
Download and store specified data to local disk
@param keyname: Name of dataset in configuration file
@param online_path_list: List of urls to data
@param username: Username for accessing online resources
@param password: Password for accessing online resources
@param authentication_url: The url used for authentication (unused when use_requests=True)
@param cookiejar: The cookiejar that stores credentials (unused when use_requests=True)
@param use_requests: Use the requests library instead of the standard library for accessing resources
@param use_progress_bar: Use a progress bar to show number of items downloaded
@return List of downloaded file locations
'''
def parseURL(data_location, in_path):
'''
This function takes the file path of saved data and determines
what url created it.
@param data_location: Absolute path to root directory whose path is not part of the url
@param path: Path to object that will be used to generate a url
@return ParseResult of url generated from in_path
'''
data_location_parts = len(pathlib.Path(data_location).parts[:])
path = pathlib.Path(in_path)
access_type = path.parts[data_location_parts]
if access_type != 'file':
access_type += '://'
else:
access_type += ':///'
url_path = pathlib.Path(*path.parts[data_location_parts+1:]).as_posix()
return parse.urlparse(access_type+url_path)
def generatePath(data_location, parsed_url):
'''
This function takes a parsed url (ParseResult) and
generates the filepath to where the data should be stored
stored
@param data_location: Location where data is stored
@param parsed_url: ParseResult generated from url
@return Local path to file
'''
if parsed_url.query == '':
return os.path.join(data_location, parsed_url.scheme,parsed_url.netloc, parsed_url.path[1:])
else:
return os.path.join(data_location, parsed_url.scheme,parsed_url.netloc,
parsed_url.path[1:] + '?' + parsed_url.query)
# Get absolute path to data directory
data_location = DataFetcherCache.getDataLocation(keyname)
# If it doesn't exist, create a new one
if data_location == None:
data_location = os.path.join(os.path.expanduser('~'), '.skdaccess',keyname)
os.makedirs(data_location, exist_ok=True)
DataFetcherCache.setDataLocation(keyname, data_location)
# Get currently downloaded files
downloaded_full_file_paths = [filename for filename in glob(os.path.join(data_location,'**'), recursive=True) if os.path.isfile(filename)]
# Remove files empty files
downloaded_full_file_paths = [filename for filename in downloaded_full_file_paths if self.checkIfDataExists(filename)]
# Convert filenames to urls
downloaded_parsed_urls = set(parseURL(data_location, file_path) for file_path in downloaded_full_file_paths)
# Determine which files are missing
parsed_http_paths = [parse.urlparse(online_path) for online_path in online_path_list]
missing_files = list(set(parsed_http_paths).difference(downloaded_parsed_urls))
missing_files.sort()
# Download missing files
if len(missing_files) > 0:
# Sanity check on input options
if use_requests == True and authentication_url != None:
raise ValueError('Cannot use an authentication url with requests')
# Setup connection (non requests)
if not use_requests:
# Deal with password protected urls
# This method comes from
# https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
if username != None or password != None:
password_manager = HTTPPasswordMgrWithDefaultRealm()
if authentication_url == None:
authentication_url = [parsed_url.geturl() for parsed_url in missing_files]
password_manager.add_password(None, authentication_url, username, password)
handler = HTTPBasicAuthHandler(password_manager)
# If no cookiejar was given, create a new one
if cookiejar == None:
cookiejar = CookieJar()
cookie_processor = HTTPCookieProcessor(cookiejar)
install_opener(build_opener(cookie_processor, handler))
# Use a cookie with no username or password
elif cookiejar != None:
cookie_processor = HTTPCookieProcessor(cookiejar)
install_opener(build_opener(cookie_processor))
if use_progress_bar:
missing_files_loop = tqdm(missing_files)
else:
missing_files_loop = missing_files
for parsed_url in missing_files_loop:
out_filename = generatePath(data_location, parsed_url)
os.makedirs(os.path.split(out_filename)[0],exist_ok=True)
with open(out_filename, 'a+b') as lockfile:
fcntl.lockf(lockfile, fcntl.LOCK_EX)
lockfile.seek(0)
if len(lockfile.read(1)) == 0:
with atomic_write(out_filename, mode='wb', overwrite=True) as data_file:
if not use_requests:
shutil.copyfileobj(urlopen(parsed_url.geturl()), data_file)
else:
if username != None or password != None:
# This method to download password protected data comes from
# https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
with requests.Session() as session:
initial_request = session.request('get',parsed_url.geturl())
r = session.get(initial_request.url, auth=(username,password), stream=True)
if r.status_code == 401:
raise RuntimeError("Authorization Denied")
shutil.copyfileobj(r.raw, data_file, 1024*1024*10)
else:
with requests.Session() as session:
r = session.get(parsed_url.geturl(), stream=True)
shutil.copyfileobj(r.raw, data_file, 1024*1024*10)
# Return a list of file locations for parsing
return [generatePath(data_location, parsed_url) for parsed_url in parsed_http_paths]
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return False
def getHDFStorage(self, keyname):
"""
Retrieve a Pandas HDF Store for a dataset
@param keyname: Key name of HDF store
@return Pandas HDF Store
"""
data_location = DataFetcherCache.getDataLocation(keyname)
if data_location == None:
data_location = os.path.join(os.path.expanduser('~'),'.skdaccess',keyname)
os.makedirs(data_location, exist_ok=True)
data_location = os.path.join(data_location, keyname + '_data.h5')
DataFetcher.setDataLocation(keyname, data_location)
return pd.HDFStore(data_location)
class DataWrapperBase(object):
''' Base class for wrapping data for use in DiscoveryPipeline '''
def __init__(self, obj_wrap, run_id = -1, meta_data = None):
'''
Construct wrapper from input data.
@param obj_wrap: Data to be wrapped
@param run_id: ID of the run
@param meta_data: Metadata to store with data
'''
self.data = obj_wrap
self.results = dict()
self.constants = dict()
self.run_id = run_id
self.meta_data = meta_data
def update(self, obj):
'''
Updated wrapped data
@param obj: New data for wrapper
'''
self.data = obj
def updateMetadata(self, new_metadata):
'''
Update metadata
@param new_metadata: New metadata
'''
self.meta_data = new_metadata
def get(self):
'''
Retrieve stored data.
@return Stored data
'''
return self.data
def getResults(self):
'''
Retrieve accumulated results, if any.
@return store results
'''
return self.results
def addResult(self,rkey,rres):
'''
Add a result to the data wrapper
@param rkey: Result key
@param rres: Result
'''
self.results[rkey] = rres
def reset(self):
''' Reset data back to original state '''
self.results = dict()
def info(self, key=None):
'''
Get information about data wrapper
@return The stored metadata
'''
if key==None:
return self.meta_data
else:
return self.meta_data[key]
def getIterator(self):
'''
Get an iterator to the data
@return iterator to data
'''
pass
def __len__(self):
'''
Get length of wrapped data
@return length of wrapped data
'''
return len(self.data)
def getRunID(self):
'''
Get the Run ID
@return run_id
'''
return self.run_id
class SeriesWrapper(DataWrapperBase):
'''
Data wrapper for series data using a data panel
'''
def __init__(self, obj_wrap, data_names, error_names = None, meta_data = None, run_id = -1):
'''
Initialize Series Wrapper
@param obj_wrap: Pandas data panel to wrap
@param data_names: List of data column names
@param error_names: List of error column names
@param meta_data: Metadata
@param run_id: ID of run
'''
self.data_names = data_names
self.error_names = error_names
super(SeriesWrapper, self).__init__(obj_wrap, run_id, meta_data)
def getIterator(self):
'''
Get an iterator to the data
@return Iterator (label, data, errors) that will cycle over data and error names
'''
if self.error_names != None:
for frame in self.data.minor_axis:
for data_index,error_index in zip(self.data_names, self.error_names):
yield data_index, self.data.loc[data_index, :, frame], self.data.loc[error_index, :, frame]
else:
for frame in self.data.minor_axis:
for data_index in self.data_names:
yield data_index, self.data.loc[data_index, :, frame], None
def getIndices(self):
'''
Get the indicies of the data
@return index of data
'''
return (list(self.data.minor_axis), self.data_names)
def getLength(self):
'''
Get total number of series that the iterate will loop over
@return Number of series iterator will traverse over
'''
return self.data.shape[2]*len(self.data_names)
class SeriesDictionaryWrapper(SeriesWrapper):
'''
Data wrapper for series data using a dictionary of data frames
'''
def getIterator(self):
'''
Get an iterator to the data
@return Iterator (label, data, errors) that will cycle over data and error names
'''
if self.error_names != None:
for frame in self.data.keys():
for data_index,error_index in zip(self.data_names, self.error_names):
yield data_index, self.data[frame].loc[:, data_index], self.data[frame].loc[:, error_index]
else:
for frame in self.data.keys():
for data_index in self.data_names:
yield data_index, self.data[frame].loc[:, data_index], None
def getIndices(self):
'''
Get the indices of the data
@return index of data
'''
return (list(self.data.keys()), self.data_names)
def getLength(self):
'''
Get total number of series that the iterate will loop over
@return Number of series iterator will traverse over
'''
return len(self.data) * len(self.data_names)
class TableWrapper(DataWrapperBase):
'''
Data wrapper for table data using an ordered dictionary
'''
def __init__(self, obj_wrap, run_id = -1, meta_data = None, default_columns = None, default_error_columns = None):
'''
Construct object from input data.
@param obj_wrap: Data to be wrapped
@param run_id: ID of the run
@param meta_data: Metadata to store with data
@param default_columns: Default columns for pipeline items
@param default_error_columns: Default error columns for pipeline items
'''
self.default_columns = default_columns
self.default_error_columns = default_error_columns
super(TableWrapper, self).__init__(obj_wrap, run_id, meta_data)
def getIterator(self):
'''
Iterator access to data.
@return iterator to (label, data frame) from Dictionary
'''
for label,frame in self.data.items():
yield label,frame
def getLength(self):
'''
Get number of data frames
@return Number of data frames
'''
return len(self.data)
def updateData(self, label, index, column_names, new_data):
'''
Update wrapped data
@param label: Data label
@param index: Index of data to update
@param column_names: Names of columns to update
@param new_data: Data to replace the old data
'''
self.data[label].loc[index, column_names] = new_data
def addColumn(self, label, column_names, new_data):
'''
Add new column to data
@param label: Data label
@param column_names: Names of columns to update
@param new_data: New data to add
'''
self.data[label].loc[:,column_names] = new_data
def getDefaultColumns(self):
'''
Get the default columns of data
@return List of default columns
'''
return self.default_columns
def getDefaultErrorColumns(self):
'''
Get the default error columns of data
@return List of default error columns
'''
return self.default_error_columns
def removeFrames(self,label_list):
'''
Remove Data Frames from wrapper
@param label_list: List of labels to remove
'''
for label in label_list:
del self.data[label]
def updateFrames(self,label_list,frame_list):
'''
Update data frames
@param label_list: List of labels to update
@param frame_list: List of updated frames
'''
for label, frame in zip(label_list, frame_list):
self.data[label] = frame
class ImageWrapper(DataWrapperBase):
'''
Wrapper for image data
'''
def getIterator(self):
'''
Get an iterator to the data
@return Iterator yielding (label, image_data)
'''
return iter(self.data.items())
def updateData(self, label, new_data):
'''
Change image
@param label: Label of data to be changed
@param new_data: New data to replace old data
'''
self.data[label] = new_data
def deleteData(self, label):
'''
Delete image
@param label: Delete image with label
'''
del self.data[label]
class XArrayWrapper(DataWrapperBase):
'''
Wrapper for xarrays
'''
def __init__(self, obj_wrap, index_list, run_id = -1 ):
self.index_list = index_list
super(XArrayWrapper, self).__init__(obj_wrap, run_id)
def getIterator(self):
'''
Get an iterator that iterators over the index
@return iterator to data
'''
for index in self.index_list:
yield index, self.data[index]
def info(self, key=None):
'''
Get information about xarray data wrapper
@return The stored metadata
'''
if key==None:
return self.data.attrs
else:
return self.data[key].attrs | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/framework/data_class.py | data_class.py |
# """@package DataClass
# Provides base data classes inherited by the specific data fetchers
# """
# Standard library imports
import os
import pathlib
from glob import glob
from urllib import request, parse
import shutil
from collections import OrderedDict
import warnings
from urllib.request import HTTPPasswordMgrWithDefaultRealm
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPCookieProcessor
from urllib.request import build_opener, install_opener, urlopen
from io import BytesIO
from http.cookiejar import CookieJar
import fcntl
import struct
# Compatability imports for standard library
from six.moves import configparser
from six.moves.configparser import NoOptionError, NoSectionError
# 3rd party imports
from tqdm import tqdm
from skimage.io import imread
from astropy.io import fits
from atomicwrites import atomic_write
import requests
class DataFetcherBase(object):
'''
Base class for all data fetchers
'''
def __init__(self, ap_paramList=[], verbose=False):
'''
Initialize data fetcher with parameter list
@param ap_paramList: List of parameters
@param verbose: Output extra information
'''
self.ap_paramList = ap_paramList
self.verbose = verbose
def output(self):
'''
Output data wrapper
@return Datawrapper
'''
pass
def perturb(self):
'''Perturb parameters'''
for param in self.ap_paramList:
param.perturb()
def reset(self):
'''Set all parameters to initial value'''
for param in self.ap_paramList:
param.reset()
def __str__(self):
''' Generate string description'''
return str( [ str(item) for item in self.ap_paramList ] )
def getMetadata(self):
'''
Return metadata about Data Fetcher
@return metadata of object.
'''
return str(self)
def getConfig():
'''
Retrieve skdaccess configuration
@return configParser.ConfigParser object of configuration
'''
config_location = os.path.join(os.path.expanduser('~'), '.skdaccess.conf')
conf = configparser.ConfigParser()
conf.read(config_location)
return conf
def getConfigItem(section, key):
"""
Retrieve skdaccess configuration item
@param section: Section of configuration item
@param key: Configuration key value
@return Requested configuration item or None if it doesn't exist
"""
conf = DataFetcherBase.getConfig()
if section in conf:
return conf.get(section, key, fallback=None)
else:
return None
def writeConfigItem(section, key, value):
"""
Retrieve skdaccess configuration item
@param section: Section of configuration item
@param key: Configuration key value
@param value: Value to be written
@return Requested configuration item or None if it doesn't exist
"""
conf = DataFetcherBase.getConfig()
if section not in conf:
conf.add_section(section)
conf.set(section, key, value)
DataFetcherBase.writeConfig(conf)
def writeConfig(conf):
'''
Write config to disk
@param conf: configparser.ConfigParser object
'''
config_location = os.path.join(os.path.expanduser('~'), '.skdaccess.conf')
config_handle = open(config_location, "w")
conf.write(config_handle)
config_handle.close()
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
pass
def verbose_print(self, *args, **kwargs):
"""
Print statement if verbose flag is set
@param *args: Arguments to pass to print
@param **kwargs: Keyword arguments to pass to print
"""
if self.verbose:
print(*args, **kwargs)
class DataFetcherLocal(DataFetcherBase):
''' Data fetcher base class for use when storing data locally'''
def getDataLocation(data_name):
'''
Get the location of data set
@param data_name: Name of data set
@return string of data location, None if not found
'''
data_name = str.lower(data_name)
conf = DataFetcherLocal.getConfig()
try:
return conf.get(data_name, 'data_location')
except (NoOptionError, NoSectionError):
# If it doesn't exist, create a new one
# Check if an alternate root has been defined
try:
data_location = os.path.join(conf.get('skdaccess', 'root'), data_name)
except (NoOptionError, NoSectionError):
data_location = os.path.join(os.path.expanduser('~'), '.skdaccess', data_name)
# Make directory and set location
os.makedirs(data_location, exist_ok=True)
DataFetcherLocal.setDataLocation(data_name, data_location)
return data_location
def setDataLocation(data_name, location, key='data_location'):
'''
Set the location of a data set
@param data_name: Name of data set
@param location: Location of data set
@param key: Key of configuration option
'''
conf = DataFetcherLocal.getConfig()
if not conf.has_section(data_name):
conf.add_section(data_name)
conf.set(data_name, key, location)
DataFetcherLocal.writeConfig(conf)
class DataFetcherStorage(DataFetcherLocal):
''' Data fetcher base class for use when entire data set is downloaded'''
@classmethod
def downloadFullDataset(cls, out_file, use_file=None):
'''
Abstract function used to download full data set
@param out_file: output file name
@param use_file: Use previously downloaded data
@return Absolute path of parsed data
'''
pass
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return True
class DataFetcherStream(DataFetcherBase):
'''
Data fetcher base class for downloading data into memory
'''
def retrieveOnlineData(self, data_specification):
'''
Method for downloading data into memory
@param data_specification: Url list of data to be retrieved
@return Retrieved data
'''
# Dictionary to store results
data_dict = OrderedDict()
metadata_dict = OrderedDict()
# Parse data
for url in data_specification:
# Get http data type
with urlopen(url) as url_access:
content_type = url_access.info().get_content_type()
# Access fits file
if content_type == 'application/fits':
# Do not want caching to avoid issues when running multiple pipelines
bytes_data = BytesIO(url_access.read())
with warnings.catch_warnings(), fits.open(bytes_data, cache=False) as hdu_list:
warnings.simplefilter("ignore", fits.verify.VerifyWarning)
# Need to fix header otherwise astropy can fail to read data
hdu_list.verify('fix')
data_dict[url] = hdu_list[1].data
metadata_dict[url] = hdu_list[1].header
# Access jpg file
elif content_type == 'image/jpeg':
data_dict[url] = imread(url)
metadata_dict[url] = None
# Throw warning if content_type not understood
else:
raise RuntimeError('Did not understand content type: ' + content_type)
return metadata_dict, data_dict
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return True
class DataFetcherCache(DataFetcherLocal):
'''
Data fetcher base class for downloading data and caching results on hard disk
'''
def checkIfDataExists(self, in_file_name):
'''
Checks if the file exists on the filesystem and the file is not empty
@param in_file_name: Input filename to test
@return True if data exists and False otherwise
'''
try:
with open(in_file_name, 'rb') as read_file:
rv = fcntl.fcntl(read_file, fcntl.LOCK_SH)
first_byte = read_file.read(1)
if len(first_byte) == 0:
return False
else:
return True
except FileNotFoundError:
return False
def cacheData(self, keyname, online_path_list, username=None, password=None, authentication_url=None,
cookiejar = None, use_requests=False, use_progress_bar=True):
'''
Download and store specified data to local disk
@param keyname: Name of dataset in configuration file
@param online_path_list: List of urls to data
@param username: Username for accessing online resources
@param password: Password for accessing online resources
@param authentication_url: The url used for authentication (unused when use_requests=True)
@param cookiejar: The cookiejar that stores credentials (unused when use_requests=True)
@param use_requests: Use the requests library instead of the standard library for accessing resources
@param use_progress_bar: Use a progress bar to show number of items downloaded
@return List of downloaded file locations
'''
def parseURL(data_location, in_path):
'''
This function takes the file path of saved data and determines
what url created it.
@param data_location: Absolute path to root directory whose path is not part of the url
@param path: Path to object that will be used to generate a url
@return ParseResult of url generated from in_path
'''
data_location_parts = len(pathlib.Path(data_location).parts[:])
path = pathlib.Path(in_path)
access_type = path.parts[data_location_parts]
if access_type != 'file':
access_type += '://'
else:
access_type += ':///'
url_path = pathlib.Path(*path.parts[data_location_parts+1:]).as_posix()
return parse.urlparse(access_type+url_path)
def generatePath(data_location, parsed_url):
'''
This function takes a parsed url (ParseResult) and
generates the filepath to where the data should be stored
stored
@param data_location: Location where data is stored
@param parsed_url: ParseResult generated from url
@return Local path to file
'''
if parsed_url.query == '':
return os.path.join(data_location, parsed_url.scheme,parsed_url.netloc, parsed_url.path[1:])
else:
return os.path.join(data_location, parsed_url.scheme,parsed_url.netloc,
parsed_url.path[1:] + '?' + parsed_url.query)
# Get absolute path to data directory
data_location = DataFetcherCache.getDataLocation(keyname)
# If it doesn't exist, create a new one
if data_location == None:
data_location = os.path.join(os.path.expanduser('~'), '.skdaccess',keyname)
os.makedirs(data_location, exist_ok=True)
DataFetcherCache.setDataLocation(keyname, data_location)
# Get currently downloaded files
downloaded_full_file_paths = [filename for filename in glob(os.path.join(data_location,'**'), recursive=True) if os.path.isfile(filename)]
# Remove files empty files
downloaded_full_file_paths = [filename for filename in downloaded_full_file_paths if self.checkIfDataExists(filename)]
# Convert filenames to urls
downloaded_parsed_urls = set(parseURL(data_location, file_path) for file_path in downloaded_full_file_paths)
# Determine which files are missing
parsed_http_paths = [parse.urlparse(online_path) for online_path in online_path_list]
missing_files = list(set(parsed_http_paths).difference(downloaded_parsed_urls))
missing_files.sort()
# Download missing files
if len(missing_files) > 0:
# Sanity check on input options
if use_requests == True and authentication_url != None:
raise ValueError('Cannot use an authentication url with requests')
# Setup connection (non requests)
if not use_requests:
# Deal with password protected urls
# This method comes from
# https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
if username != None or password != None:
password_manager = HTTPPasswordMgrWithDefaultRealm()
if authentication_url == None:
authentication_url = [parsed_url.geturl() for parsed_url in missing_files]
password_manager.add_password(None, authentication_url, username, password)
handler = HTTPBasicAuthHandler(password_manager)
# If no cookiejar was given, create a new one
if cookiejar == None:
cookiejar = CookieJar()
cookie_processor = HTTPCookieProcessor(cookiejar)
install_opener(build_opener(cookie_processor, handler))
# Use a cookie with no username or password
elif cookiejar != None:
cookie_processor = HTTPCookieProcessor(cookiejar)
install_opener(build_opener(cookie_processor))
if use_progress_bar:
missing_files_loop = tqdm(missing_files)
else:
missing_files_loop = missing_files
for parsed_url in missing_files_loop:
out_filename = generatePath(data_location, parsed_url)
os.makedirs(os.path.split(out_filename)[0],exist_ok=True)
with open(out_filename, 'a+b') as lockfile:
fcntl.lockf(lockfile, fcntl.LOCK_EX)
lockfile.seek(0)
if len(lockfile.read(1)) == 0:
with atomic_write(out_filename, mode='wb', overwrite=True) as data_file:
if not use_requests:
shutil.copyfileobj(urlopen(parsed_url.geturl()), data_file)
else:
if username != None or password != None:
# This method to download password protected data comes from
# https://wiki.earthdata.nasa.gov/display/EL/How+To+Access+Data+With+Python
with requests.Session() as session:
initial_request = session.request('get',parsed_url.geturl())
r = session.get(initial_request.url, auth=(username,password), stream=True)
if r.status_code == 401:
raise RuntimeError("Authorization Denied")
shutil.copyfileobj(r.raw, data_file, 1024*1024*10)
else:
with requests.Session() as session:
r = session.get(parsed_url.geturl(), stream=True)
shutil.copyfileobj(r.raw, data_file, 1024*1024*10)
# Return a list of file locations for parsing
return [generatePath(data_location, parsed_url) for parsed_url in parsed_http_paths]
def multirun_enabled(self):
'''
Returns whether or not this data fetcher is multirun enabled.
@return Boolean indicating whether or not this data fetcher is multirun enabled
'''
return False
def getHDFStorage(self, keyname):
"""
Retrieve a Pandas HDF Store for a dataset
@param keyname: Key name of HDF store
@return Pandas HDF Store
"""
data_location = DataFetcherCache.getDataLocation(keyname)
if data_location == None:
data_location = os.path.join(os.path.expanduser('~'),'.skdaccess',keyname)
os.makedirs(data_location, exist_ok=True)
data_location = os.path.join(data_location, keyname + '_data.h5')
DataFetcher.setDataLocation(keyname, data_location)
return pd.HDFStore(data_location)
class DataWrapperBase(object):
''' Base class for wrapping data for use in DiscoveryPipeline '''
def __init__(self, obj_wrap, run_id = -1, meta_data = None):
'''
Construct wrapper from input data.
@param obj_wrap: Data to be wrapped
@param run_id: ID of the run
@param meta_data: Metadata to store with data
'''
self.data = obj_wrap
self.results = dict()
self.constants = dict()
self.run_id = run_id
self.meta_data = meta_data
def update(self, obj):
'''
Updated wrapped data
@param obj: New data for wrapper
'''
self.data = obj
def updateMetadata(self, new_metadata):
'''
Update metadata
@param new_metadata: New metadata
'''
self.meta_data = new_metadata
def get(self):
'''
Retrieve stored data.
@return Stored data
'''
return self.data
def getResults(self):
'''
Retrieve accumulated results, if any.
@return store results
'''
return self.results
def addResult(self,rkey,rres):
'''
Add a result to the data wrapper
@param rkey: Result key
@param rres: Result
'''
self.results[rkey] = rres
def reset(self):
''' Reset data back to original state '''
self.results = dict()
def info(self, key=None):
'''
Get information about data wrapper
@return The stored metadata
'''
if key==None:
return self.meta_data
else:
return self.meta_data[key]
def getIterator(self):
'''
Get an iterator to the data
@return iterator to data
'''
pass
def __len__(self):
'''
Get length of wrapped data
@return length of wrapped data
'''
return len(self.data)
def getRunID(self):
'''
Get the Run ID
@return run_id
'''
return self.run_id
class SeriesWrapper(DataWrapperBase):
'''
Data wrapper for series data using a data panel
'''
def __init__(self, obj_wrap, data_names, error_names = None, meta_data = None, run_id = -1):
'''
Initialize Series Wrapper
@param obj_wrap: Pandas data panel to wrap
@param data_names: List of data column names
@param error_names: List of error column names
@param meta_data: Metadata
@param run_id: ID of run
'''
self.data_names = data_names
self.error_names = error_names
super(SeriesWrapper, self).__init__(obj_wrap, run_id, meta_data)
def getIterator(self):
'''
Get an iterator to the data
@return Iterator (label, data, errors) that will cycle over data and error names
'''
if self.error_names != None:
for frame in self.data.minor_axis:
for data_index,error_index in zip(self.data_names, self.error_names):
yield data_index, self.data.loc[data_index, :, frame], self.data.loc[error_index, :, frame]
else:
for frame in self.data.minor_axis:
for data_index in self.data_names:
yield data_index, self.data.loc[data_index, :, frame], None
def getIndices(self):
'''
Get the indicies of the data
@return index of data
'''
return (list(self.data.minor_axis), self.data_names)
def getLength(self):
'''
Get total number of series that the iterate will loop over
@return Number of series iterator will traverse over
'''
return self.data.shape[2]*len(self.data_names)
class SeriesDictionaryWrapper(SeriesWrapper):
'''
Data wrapper for series data using a dictionary of data frames
'''
def getIterator(self):
'''
Get an iterator to the data
@return Iterator (label, data, errors) that will cycle over data and error names
'''
if self.error_names != None:
for frame in self.data.keys():
for data_index,error_index in zip(self.data_names, self.error_names):
yield data_index, self.data[frame].loc[:, data_index], self.data[frame].loc[:, error_index]
else:
for frame in self.data.keys():
for data_index in self.data_names:
yield data_index, self.data[frame].loc[:, data_index], None
def getIndices(self):
'''
Get the indices of the data
@return index of data
'''
return (list(self.data.keys()), self.data_names)
def getLength(self):
'''
Get total number of series that the iterate will loop over
@return Number of series iterator will traverse over
'''
return len(self.data) * len(self.data_names)
class TableWrapper(DataWrapperBase):
'''
Data wrapper for table data using an ordered dictionary
'''
def __init__(self, obj_wrap, run_id = -1, meta_data = None, default_columns = None, default_error_columns = None):
'''
Construct object from input data.
@param obj_wrap: Data to be wrapped
@param run_id: ID of the run
@param meta_data: Metadata to store with data
@param default_columns: Default columns for pipeline items
@param default_error_columns: Default error columns for pipeline items
'''
self.default_columns = default_columns
self.default_error_columns = default_error_columns
super(TableWrapper, self).__init__(obj_wrap, run_id, meta_data)
def getIterator(self):
'''
Iterator access to data.
@return iterator to (label, data frame) from Dictionary
'''
for label,frame in self.data.items():
yield label,frame
def getLength(self):
'''
Get number of data frames
@return Number of data frames
'''
return len(self.data)
def updateData(self, label, index, column_names, new_data):
'''
Update wrapped data
@param label: Data label
@param index: Index of data to update
@param column_names: Names of columns to update
@param new_data: Data to replace the old data
'''
self.data[label].loc[index, column_names] = new_data
def addColumn(self, label, column_names, new_data):
'''
Add new column to data
@param label: Data label
@param column_names: Names of columns to update
@param new_data: New data to add
'''
self.data[label].loc[:,column_names] = new_data
def getDefaultColumns(self):
'''
Get the default columns of data
@return List of default columns
'''
return self.default_columns
def getDefaultErrorColumns(self):
'''
Get the default error columns of data
@return List of default error columns
'''
return self.default_error_columns
def removeFrames(self,label_list):
'''
Remove Data Frames from wrapper
@param label_list: List of labels to remove
'''
for label in label_list:
del self.data[label]
def updateFrames(self,label_list,frame_list):
'''
Update data frames
@param label_list: List of labels to update
@param frame_list: List of updated frames
'''
for label, frame in zip(label_list, frame_list):
self.data[label] = frame
class ImageWrapper(DataWrapperBase):
'''
Wrapper for image data
'''
def getIterator(self):
'''
Get an iterator to the data
@return Iterator yielding (label, image_data)
'''
return iter(self.data.items())
def updateData(self, label, new_data):
'''
Change image
@param label: Label of data to be changed
@param new_data: New data to replace old data
'''
self.data[label] = new_data
def deleteData(self, label):
'''
Delete image
@param label: Delete image with label
'''
del self.data[label]
class XArrayWrapper(DataWrapperBase):
'''
Wrapper for xarrays
'''
def __init__(self, obj_wrap, index_list, run_id = -1 ):
self.index_list = index_list
super(XArrayWrapper, self).__init__(obj_wrap, run_id)
def getIterator(self):
'''
Get an iterator that iterators over the index
@return iterator to data
'''
for index in self.index_list:
yield index, self.data[index]
def info(self, key=None):
'''
Get information about xarray data wrapper
@return The stored metadata
'''
if key==None:
return self.data.attrs
else:
return self.data[key].attrs | 0.676299 | 0.154759 |
# """@package AlgoParam
# Provides tunable parameter classes for use in the Computer-Aided Discovery pipeline.
# """
import random
import itertools
class AutoParam:
'''
Defines a tunable parameter class inherited by specific subclasses
AutoParam class and subclass work on a single value.
functions: perturb value and reset to initial value
'''
def __init__(self, val_init):
'''
Initialize an AutoParam object.
@param val_init: Value for parameter
'''
self.val = val_init
self.val_init = val_init
def perturb(self):
'''
Perturb paramter.
This class doesn't change the value.
'''
self.val = self.val
def reset(self):
''' Reset value to initial value '''
self.val = self.val_init
def __str__(self):
'''
String representation of class
@return String of current value
'''
return str(self.val)
def __call__(self):
'''
Retrieves current value of the parameter
@return Current value of the parameter
'''
return self.val
class AutoParamMinMax(AutoParam):
'''
A tunable parameter with min and max ranges, perturbs to a random value in range.
It can optionally choose either the min or the max after n perturbs
'''
def __init__(self, val_init, val_min, val_max, decimals=0, extreme=0):
'''
Construct AutoParamMinMax object
@param val_init: Initial value for parameter
@param val_min: Minimum value for param
@param val_max: Maximum value for parameter
@param decimals: Number of decimals to include in the random number
@param extreme: Either the maximum or minimum is chosen every
extreme number of iterations. Using a value of
one will be an extreme value every time.
Using a value of zero will always choose a
random value.
'''
self.val = val_init
self.val_init = val_init
self.val_min = val_min
self.val_max = val_max
self.n = 0
self.n_max = extreme
self.decimals = decimals
def perturb(self):
'''
Peturb the paramter by choosing a random value between val_min and val_max.
Will choose a random number with precision specified by decimals. Will optionally
pick the min or the max value after a specified number of perturb calls
'''
if self.n == self.n_max - 1:
# Choose and extreme value
self.val = random.sample([self.val_min, self.val_max], 1)[0]
self.n = 0
else:
if self.decimals == 0:
self.val = random.randint(self.val_min,self.val_max)
else:
self.val = random.random() * (self.val_max - self.val_min + 10**-self.decimals) + (self.val_min - 0.5 * 10**-self.decimals)
self.val = round(self.val, ndigits=self.decimals)
if self.n_max > 0:
self.n += 1
def reset(self):
''' Reset to initial value '''
self.n = 0
self.val = self.val_init
class AutoParamList(AutoParam):
'''
A tunable parameter with a specified list of choices that can be randomly selected via perturb
'''
def __init__(self, val_init, val_list):
'''
Construct an AutoParamList object
@param val_init: initial value for the parameter
@param val_list: List of possible variants for the parameter
'''
self.val = val_init
self.val_init = val_init
self.val_list = val_list
def perturb(self):
''' Randomly select a value from val_list '''
self.val = random.choice(self.val_list)
def reset(self):
''' Reset the list to the default value '''
self.val = self.val_init
class AutoParamListCycle(AutoParam):
'''
Cycles through a list of paramters
'''
def __init__(self, val_list):
'''
Construct an AutoParamListCycle
@param val_list: List of possible variants for the parameter
'''
self.val = val_list[0]
self.val_list = val_list
self.current_index = 0
def perturb(self):
'''
Select the next value from the list of parameters.
'''
if self.current_index >= len(self.val_list) - 1:
self.current_index = 0
else:
self.current_index += 1
self.val = self.val_list[self.current_index]
def reset(self):
''' Reset the list to the default values '''
self.val = self.val_list[0]
self.current_index = 0
### Starting list perturber
class AutoList(object):
'''
Specifies a list for returning selections of lists, as opposed to a single element
'''
def __init__(self, val_list):
'''
Construct a AutoList object
@param val_list: List of parameters
'''
self.val_init = val_list
self.val_list = val_list
def val(self):
'''
Retrieves current list of parameters.
@return List of current parameters
'''
return self.val_list
def perturb(self):
''' This class doesn't change the list when being perturbed '''
self.val_list = self.val_list
def reset(self):
''' Reset current list to initial list '''
self.val_list = self.val_init
def getAllOptions(self):
'''
Get all possible options
@return List that contains every option that could possibly be selected
'''
return self.val_init
def __str__(self):
'''
String representation of class.
@return String containing all parmaters in list
'''
return '[' + ', '.join([str(val) for val in self.val_list]) + ']'
return str(self.val_list)
def __len__(self):
'''
Retrieves the length of parameters contained in the list
@return Number of elements in the list
'''
return len(self.val_list)
def __getitem__(self, ii):
'''
Retrieves item from list
@param ii: Index of item to be retrieved
@return Item at index ii
'''
return self.val_list[ii]
def __setitem__(self, ii, val):
'''
Set a value in the list.
@param ii: Index of list to be set
@param val: Input value
'''
self.val_list[ii] = val
def __call__(self):
'''
Retrieve current list
@return Current list
'''
return self.val_list
class AutoListSubset(AutoList):
'''
An AutoList perturber that creates random subsets of a list. List can be empty
'''
def perturb(self):
''' Peturb the list by selecting a random subset of the initial list '''
# randomly index list elements to be kept
index = [random.randint(0,1) for r in range(len(self.val_init))]
# update list and keep list values where index is 1
self.val_list = list(itertools.compress(self.val_init, index))
class AutoListPermute(AutoList):
'''
A perturber that permutes a list
'''
def perturb(self):
''' Randomly permutes the initial list '''
random.shuffle(self.val_list) #shuffles in place and updates at same time
class AutoListRemove(AutoList):
'''
Removes a different single element from the initial list at each perturb call
'''
def __init__(self, val_list):
'''
Construct a AutoList_Cycle object
@param val_list: Initial list of parameters.
'''
self.n = -1
super(AutoListRemove, self).__init__(val_list)
def perturb(self):
'''
Systematically change which item is absent from the list
'''
self.n = self.n + 1
if self.n >= len(self.val_init):
self.n = 0
index = [1 for i in range(len(self.val_init))]
index[self.n] = 0
self.val_list = list(itertools.compress(self.val_init, index))
def reset(self):
''' Reset the list to its initial value '''
self.n = -1
self.val_list = self.val_init
class AutoListCycle(AutoList):
'''
An Autolist that cycles through different lists
'''
def __init__(self, list_val_list):
'''
Construct a AutoList_Cycle object
@param list_val_list: List of different lists to cycle through
'''
self.list_val_list = list_val_list
self.val_list = self.list_val_list[0]
self.index = 0
def perturb(self):
'''
Select next list from list of lists
'''
if self.index < len(self.list_val_list) - 1:
self.index += 1
else:
self.index = 0
self.val_list = self.list_val_list[self.index]
def reset(self):
'''
Resets to the first list in the list of lists
'''
self.index = 0
self.val_list = self.list_val_list[self.index]
def getAllOptions(self):
'''
Get elements that could possibly be called
@return List of all possible elements
'''
all_options = []
for option_list in list_val_list:
all_options += option_list
return all_options | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/framework/param_class.py | param_class.py |
# """@package AlgoParam
# Provides tunable parameter classes for use in the Computer-Aided Discovery pipeline.
# """
import random
import itertools
class AutoParam:
'''
Defines a tunable parameter class inherited by specific subclasses
AutoParam class and subclass work on a single value.
functions: perturb value and reset to initial value
'''
def __init__(self, val_init):
'''
Initialize an AutoParam object.
@param val_init: Value for parameter
'''
self.val = val_init
self.val_init = val_init
def perturb(self):
'''
Perturb paramter.
This class doesn't change the value.
'''
self.val = self.val
def reset(self):
''' Reset value to initial value '''
self.val = self.val_init
def __str__(self):
'''
String representation of class
@return String of current value
'''
return str(self.val)
def __call__(self):
'''
Retrieves current value of the parameter
@return Current value of the parameter
'''
return self.val
class AutoParamMinMax(AutoParam):
'''
A tunable parameter with min and max ranges, perturbs to a random value in range.
It can optionally choose either the min or the max after n perturbs
'''
def __init__(self, val_init, val_min, val_max, decimals=0, extreme=0):
'''
Construct AutoParamMinMax object
@param val_init: Initial value for parameter
@param val_min: Minimum value for param
@param val_max: Maximum value for parameter
@param decimals: Number of decimals to include in the random number
@param extreme: Either the maximum or minimum is chosen every
extreme number of iterations. Using a value of
one will be an extreme value every time.
Using a value of zero will always choose a
random value.
'''
self.val = val_init
self.val_init = val_init
self.val_min = val_min
self.val_max = val_max
self.n = 0
self.n_max = extreme
self.decimals = decimals
def perturb(self):
'''
Peturb the paramter by choosing a random value between val_min and val_max.
Will choose a random number with precision specified by decimals. Will optionally
pick the min or the max value after a specified number of perturb calls
'''
if self.n == self.n_max - 1:
# Choose and extreme value
self.val = random.sample([self.val_min, self.val_max], 1)[0]
self.n = 0
else:
if self.decimals == 0:
self.val = random.randint(self.val_min,self.val_max)
else:
self.val = random.random() * (self.val_max - self.val_min + 10**-self.decimals) + (self.val_min - 0.5 * 10**-self.decimals)
self.val = round(self.val, ndigits=self.decimals)
if self.n_max > 0:
self.n += 1
def reset(self):
''' Reset to initial value '''
self.n = 0
self.val = self.val_init
class AutoParamList(AutoParam):
'''
A tunable parameter with a specified list of choices that can be randomly selected via perturb
'''
def __init__(self, val_init, val_list):
'''
Construct an AutoParamList object
@param val_init: initial value for the parameter
@param val_list: List of possible variants for the parameter
'''
self.val = val_init
self.val_init = val_init
self.val_list = val_list
def perturb(self):
''' Randomly select a value from val_list '''
self.val = random.choice(self.val_list)
def reset(self):
''' Reset the list to the default value '''
self.val = self.val_init
class AutoParamListCycle(AutoParam):
'''
Cycles through a list of paramters
'''
def __init__(self, val_list):
'''
Construct an AutoParamListCycle
@param val_list: List of possible variants for the parameter
'''
self.val = val_list[0]
self.val_list = val_list
self.current_index = 0
def perturb(self):
'''
Select the next value from the list of parameters.
'''
if self.current_index >= len(self.val_list) - 1:
self.current_index = 0
else:
self.current_index += 1
self.val = self.val_list[self.current_index]
def reset(self):
''' Reset the list to the default values '''
self.val = self.val_list[0]
self.current_index = 0
### Starting list perturber
class AutoList(object):
'''
Specifies a list for returning selections of lists, as opposed to a single element
'''
def __init__(self, val_list):
'''
Construct a AutoList object
@param val_list: List of parameters
'''
self.val_init = val_list
self.val_list = val_list
def val(self):
'''
Retrieves current list of parameters.
@return List of current parameters
'''
return self.val_list
def perturb(self):
''' This class doesn't change the list when being perturbed '''
self.val_list = self.val_list
def reset(self):
''' Reset current list to initial list '''
self.val_list = self.val_init
def getAllOptions(self):
'''
Get all possible options
@return List that contains every option that could possibly be selected
'''
return self.val_init
def __str__(self):
'''
String representation of class.
@return String containing all parmaters in list
'''
return '[' + ', '.join([str(val) for val in self.val_list]) + ']'
return str(self.val_list)
def __len__(self):
'''
Retrieves the length of parameters contained in the list
@return Number of elements in the list
'''
return len(self.val_list)
def __getitem__(self, ii):
'''
Retrieves item from list
@param ii: Index of item to be retrieved
@return Item at index ii
'''
return self.val_list[ii]
def __setitem__(self, ii, val):
'''
Set a value in the list.
@param ii: Index of list to be set
@param val: Input value
'''
self.val_list[ii] = val
def __call__(self):
'''
Retrieve current list
@return Current list
'''
return self.val_list
class AutoListSubset(AutoList):
'''
An AutoList perturber that creates random subsets of a list. List can be empty
'''
def perturb(self):
''' Peturb the list by selecting a random subset of the initial list '''
# randomly index list elements to be kept
index = [random.randint(0,1) for r in range(len(self.val_init))]
# update list and keep list values where index is 1
self.val_list = list(itertools.compress(self.val_init, index))
class AutoListPermute(AutoList):
'''
A perturber that permutes a list
'''
def perturb(self):
''' Randomly permutes the initial list '''
random.shuffle(self.val_list) #shuffles in place and updates at same time
class AutoListRemove(AutoList):
'''
Removes a different single element from the initial list at each perturb call
'''
def __init__(self, val_list):
'''
Construct a AutoList_Cycle object
@param val_list: Initial list of parameters.
'''
self.n = -1
super(AutoListRemove, self).__init__(val_list)
def perturb(self):
'''
Systematically change which item is absent from the list
'''
self.n = self.n + 1
if self.n >= len(self.val_init):
self.n = 0
index = [1 for i in range(len(self.val_init))]
index[self.n] = 0
self.val_list = list(itertools.compress(self.val_init, index))
def reset(self):
''' Reset the list to its initial value '''
self.n = -1
self.val_list = self.val_init
class AutoListCycle(AutoList):
'''
An Autolist that cycles through different lists
'''
def __init__(self, list_val_list):
'''
Construct a AutoList_Cycle object
@param list_val_list: List of different lists to cycle through
'''
self.list_val_list = list_val_list
self.val_list = self.list_val_list[0]
self.index = 0
def perturb(self):
'''
Select next list from list of lists
'''
if self.index < len(self.list_val_list) - 1:
self.index += 1
else:
self.index = 0
self.val_list = self.list_val_list[self.index]
def reset(self):
'''
Resets to the first list in the list of lists
'''
self.index = 0
self.val_list = self.list_val_list[self.index]
def getAllOptions(self):
'''
Get elements that could possibly be called
@return List of all possible elements
'''
all_options = []
for option_list in list_val_list:
all_options += option_list
return all_options | 0.754282 | 0.432423 |
import argparse
import os
from skdaccess.astro.kepler import DataFetcher as KDF
from skdaccess.geo.pbo import DataFetcher as PBODF
from skdaccess.geo.groundwater import DataFetcher as WDF
from skdaccess.geo.grace import DataFetcher as GRACEDF
from skdaccess.geo.gldas import DataFetcher as GLDASDF
def skdaccess_script():
'''This funcion defines a script for downloading data'''
parser = argparse.ArgumentParser(description='The Sci-kit Data Access (skdaccess) package is a tool for integrating various scientific data sets into the Python environment using a common interface. This script can download different scientific data sets for offline analysis.')
parser.add_argument('data_set', help='Name of data set', nargs='?')
parser.add_argument('-l','--list', dest='list_bool', help='List data sets', action='store_true')
parser.add_argument('-i','--input', dest='local_data', help='Use LOCAL_DATA that has already been downloaded')
parser.add_argument('-c','--check',dest='check_bool', help='Print data location for data set', action='store_true')
args = parser.parse_args()
if args.list_bool:
print("This utility can install one of the following data sets:")
print()
print('\tPBO - Plate Boundary Observatory GPS Time Series ')
print('\tGRACE - Monthly Mass Grids')
print('\tGLDAS - Monthly estimates from GDLAS model in same resolution as GRACE')
print('\tGroundwater - Ground water daily values from across the US')
parser.exit(1)
elif args.data_set is None:
parser.print_help()
parser.exit(1)
elif args.check_bool:
config = PBODF.getConfig()
location = config.get(str.lower(args.data_set), 'data_location',fallback=None)
if location == None:
print('No data location available for ' + str.lower(args.data_set))
else:
print('The data is located at: ' + location)
parser.exit(1)
if str.lower(args.data_set) == 'pbo':
PBODF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'grace':
GRACEDF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'gldas':
GLDASDF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'groundwater':
WDF.downloadFullDataset(use_file=args.local_data)
else:
print('Data set not understood') | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/bin/skdaccess.py | skdaccess.py |
import argparse
import os
from skdaccess.astro.kepler import DataFetcher as KDF
from skdaccess.geo.pbo import DataFetcher as PBODF
from skdaccess.geo.groundwater import DataFetcher as WDF
from skdaccess.geo.grace import DataFetcher as GRACEDF
from skdaccess.geo.gldas import DataFetcher as GLDASDF
def skdaccess_script():
'''This funcion defines a script for downloading data'''
parser = argparse.ArgumentParser(description='The Sci-kit Data Access (skdaccess) package is a tool for integrating various scientific data sets into the Python environment using a common interface. This script can download different scientific data sets for offline analysis.')
parser.add_argument('data_set', help='Name of data set', nargs='?')
parser.add_argument('-l','--list', dest='list_bool', help='List data sets', action='store_true')
parser.add_argument('-i','--input', dest='local_data', help='Use LOCAL_DATA that has already been downloaded')
parser.add_argument('-c','--check',dest='check_bool', help='Print data location for data set', action='store_true')
args = parser.parse_args()
if args.list_bool:
print("This utility can install one of the following data sets:")
print()
print('\tPBO - Plate Boundary Observatory GPS Time Series ')
print('\tGRACE - Monthly Mass Grids')
print('\tGLDAS - Monthly estimates from GDLAS model in same resolution as GRACE')
print('\tGroundwater - Ground water daily values from across the US')
parser.exit(1)
elif args.data_set is None:
parser.print_help()
parser.exit(1)
elif args.check_bool:
config = PBODF.getConfig()
location = config.get(str.lower(args.data_set), 'data_location',fallback=None)
if location == None:
print('No data location available for ' + str.lower(args.data_set))
else:
print('The data is located at: ' + location)
parser.exit(1)
if str.lower(args.data_set) == 'pbo':
PBODF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'grace':
GRACEDF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'gldas':
GLDASDF.downloadFullDataset(use_file=args.local_data)
elif str.lower(args.data_set) == 'groundwater':
WDF.downloadFullDataset(use_file=args.local_data)
else:
print('Data set not understood') | 0.400867 | 0.327615 |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.framework.param_class import *
from skdaccess.utilities.mahali_util import convert_date
from pkg_resources import resource_filename
# Standard library imports
from glob import glob
import shutil
import os
import json
from collections import OrderedDict
# 3rd part imports
from six.moves.urllib.request import urlopen
from tqdm import tqdm
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStream):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali temperature data fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
if start_date == None:
self.start_date = pd.to_datetime('2015271', format='%Y%j')
else:
self.start_date = convert_date(start_date)
if end_date == None:
self.end_date = pd.to_datetime('2015315', format='%Y%j')
else:
self.end_date = convert_date(end_date)
if len(ap_paramList) == 0:
station_list = [
'mh02',
'mh03',
'mh04',
'mh05',
'mh06',
'mh07',
'mh08',
'mh09',
'mh13',
]
ap_paramList = [ AutoList(station_list) ]
super(DataFetcher, self).__init__(ap_paramList)
def retrieveOnlineData(self, data_specification):
'''
Load data in from a remote source
@param data_specification: Pandas dataframe containing the columns 'station', 'date', and 'filename'
@return Ordered dictionary for each station (key) which cointains a pandas data frame of the temperature
'''
# Location of data depot
url = 'http://apollo.haystack.mit.edu/mahali-data/'
locations = ( url
+ 'metadata/'
+ data_specification['station']
+ '/logs/sensor/'
+ data_specification['date'].apply(lambda x: x.strftime('%Y%j'))
+ '/'
+ data_specification['filename'] ).tolist()
# Data will go into this dictionary as {station: [(time, measurement), (time2, measurement2), ...]}
all_temperature_data = OrderedDict()
# Parse jsonl files
for station, location in zip(data_specification['station'], locations):
with urlopen(location) as this_json_file:
# Encased in a try/except because of lines full of junk
# (e.g. the last line of metadata/mh02/logs/sensor/2015277/sensor@2015-10-04T225240Z_1443999160.jsonl)
try:
for line in this_json_file:
line_data = json.loads(line)
this_time = pd.to_datetime(line_data['time'])
this_temp = float(line_data["event_data"]["data"])
# If data for that station already exists
try:
all_temperature_data[station].append([this_time, this_temp])
# If there's no existing entry for that station
except KeyError:
all_temperature_data[station] = [ [this_time, this_temp] ]
except ValueError:
pass
for station in all_temperature_data.keys():
all_temperature_data[station] = pd.DataFrame(all_temperature_data[station], columns=['Time','Temperature']).set_index('Time')
return all_temperature_data
def output(self):
'''
Generate data wrapper for Mahali temperatures
@return Mahali temperature data wrapper
'''
# Function to extract date from filename (only month/day/year, no hours/minutes/seconds)
def toDateTime(in_filename):
return pd.to_datetime(pd.to_datetime(in_filename[7:25]).strftime('%Y-%m-%d'))
# Read in file list:
mahali_temperature_info = resource_filename('skdaccess', os.path.join('support','mahali_temperature_info.txt'))
filenames = pd.read_csv(mahali_temperature_info,header=None,
names=('station','filename'),
skipinitialspace=True)
# Create a columns of dates
filenames['date'] = filenames['filename'].apply(toDateTime)
# Need to grab day before as data can spill over
adjusted_start_date = self.start_date - pd.to_timedelta('1d')
adjusted_end_date = self.end_date + pd.to_timedelta('1d')
station_list = self.ap_paramList[0]()
# Get data for each selected station one day before until one day afte requested date
index_to_retrieve = np.logical_and.reduce([filenames.loc[:, 'station'].apply(lambda x: x in station_list),
filenames.loc[:, 'date'] >= adjusted_start_date,
filenames.loc[:, 'date'] <= self.end_date])
all_temperature_data = self.retrieveOnlineData(filenames[index_to_retrieve])
# Due to data spillover, cut each data frame in dictionary
for station in all_temperature_data.keys():
all_temperature_data[station] = all_temperature_data[station].loc[adjusted_start_date:adjusted_end_date]
# Return table wrapper of data
return TableWrapper(all_temperature_data, default_columns = ['Temperature']) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/mahali/temperature/data_fetcher.py | data_fetcher.py |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.framework.param_class import *
from skdaccess.utilities.mahali_util import convert_date
from pkg_resources import resource_filename
# Standard library imports
from glob import glob
import shutil
import os
import json
from collections import OrderedDict
# 3rd part imports
from six.moves.urllib.request import urlopen
from tqdm import tqdm
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStream):
'''
Data Fetcher for Mahali temperature data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali temperature data fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
if start_date == None:
self.start_date = pd.to_datetime('2015271', format='%Y%j')
else:
self.start_date = convert_date(start_date)
if end_date == None:
self.end_date = pd.to_datetime('2015315', format='%Y%j')
else:
self.end_date = convert_date(end_date)
if len(ap_paramList) == 0:
station_list = [
'mh02',
'mh03',
'mh04',
'mh05',
'mh06',
'mh07',
'mh08',
'mh09',
'mh13',
]
ap_paramList = [ AutoList(station_list) ]
super(DataFetcher, self).__init__(ap_paramList)
def retrieveOnlineData(self, data_specification):
'''
Load data in from a remote source
@param data_specification: Pandas dataframe containing the columns 'station', 'date', and 'filename'
@return Ordered dictionary for each station (key) which cointains a pandas data frame of the temperature
'''
# Location of data depot
url = 'http://apollo.haystack.mit.edu/mahali-data/'
locations = ( url
+ 'metadata/'
+ data_specification['station']
+ '/logs/sensor/'
+ data_specification['date'].apply(lambda x: x.strftime('%Y%j'))
+ '/'
+ data_specification['filename'] ).tolist()
# Data will go into this dictionary as {station: [(time, measurement), (time2, measurement2), ...]}
all_temperature_data = OrderedDict()
# Parse jsonl files
for station, location in zip(data_specification['station'], locations):
with urlopen(location) as this_json_file:
# Encased in a try/except because of lines full of junk
# (e.g. the last line of metadata/mh02/logs/sensor/2015277/sensor@2015-10-04T225240Z_1443999160.jsonl)
try:
for line in this_json_file:
line_data = json.loads(line)
this_time = pd.to_datetime(line_data['time'])
this_temp = float(line_data["event_data"]["data"])
# If data for that station already exists
try:
all_temperature_data[station].append([this_time, this_temp])
# If there's no existing entry for that station
except KeyError:
all_temperature_data[station] = [ [this_time, this_temp] ]
except ValueError:
pass
for station in all_temperature_data.keys():
all_temperature_data[station] = pd.DataFrame(all_temperature_data[station], columns=['Time','Temperature']).set_index('Time')
return all_temperature_data
def output(self):
'''
Generate data wrapper for Mahali temperatures
@return Mahali temperature data wrapper
'''
# Function to extract date from filename (only month/day/year, no hours/minutes/seconds)
def toDateTime(in_filename):
return pd.to_datetime(pd.to_datetime(in_filename[7:25]).strftime('%Y-%m-%d'))
# Read in file list:
mahali_temperature_info = resource_filename('skdaccess', os.path.join('support','mahali_temperature_info.txt'))
filenames = pd.read_csv(mahali_temperature_info,header=None,
names=('station','filename'),
skipinitialspace=True)
# Create a columns of dates
filenames['date'] = filenames['filename'].apply(toDateTime)
# Need to grab day before as data can spill over
adjusted_start_date = self.start_date - pd.to_timedelta('1d')
adjusted_end_date = self.end_date + pd.to_timedelta('1d')
station_list = self.ap_paramList[0]()
# Get data for each selected station one day before until one day afte requested date
index_to_retrieve = np.logical_and.reduce([filenames.loc[:, 'station'].apply(lambda x: x in station_list),
filenames.loc[:, 'date'] >= adjusted_start_date,
filenames.loc[:, 'date'] <= self.end_date])
all_temperature_data = self.retrieveOnlineData(filenames[index_to_retrieve])
# Due to data spillover, cut each data frame in dictionary
for station in all_temperature_data.keys():
all_temperature_data[station] = all_temperature_data[station].loc[adjusted_start_date:adjusted_end_date]
# Return table wrapper of data
return TableWrapper(all_temperature_data, default_columns = ['Temperature']) | 0.639061 | 0.311453 |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
from pkg_resources import resource_filename
from skdaccess.utilities.mahali_util import convert_date, parseIonoFile
from skdaccess.utilities.support import retrieveCommonDatesHDF
# Standard library imports
from urllib import parse
from collections import OrderedDict
from collections import defaultdict
from itertools import repeat
# 3rd party imports
from tqdm import tqdm
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali Data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali Data Fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
# Get start date
if start_date == None:
self.start_date = pd.to_datetime('2015275', format='%Y%j')
else:
self.start_date = convert_date(start_date)
# Get end date
if end_date == None:
self.end_date = pd.to_datetime('2015307', format='%Y%j')
else:
self.end_date = convert_date(end_date)
self.date_range = pd.date_range(self.start_date, self.end_date)
# Set station list if none is given
if len(ap_paramList) == 0:
station_list = [
'mh02',
'mh03',
'mh04',
'mh05',
'mh06',
'mh07',
'mh08',
'mh09',
'mh13',
]
ap_paramList = [ AutoList(station_list) ]
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper for Mahali tec data
@return Mahali data wrapper
'''
def generatePath(base_url, station, in_date):
'''
Generate path to file based on station, date, and base url
@param base_url: Base url to put in front of generated url
@param station: Name of station
@param in_date: Date of data to create path for
@return The url for the station data
'''
year = in_date.strftime('%Y')
day = in_date.strftime('%j')
date = in_date.strftime('%Y%m%d')
path = 'tec/{year}/{day}/{station}-{date}.iono.gz'.format(year=year,
day=day,
station=station,
date=date)
return parse.urljoin(base_url, path)
# Get station lists
station_list = self.ap_paramList[0]()
# Retrieve dates containing data for station list
available_data_dict = retrieveCommonDatesHDF('mahali_tec_info.hdf', station_list, self.date_range)
# Location of data
base_url = 'http://apollo.haystack.mit.edu/mahali-data/'
url_list = []
# Generate url list
for station, dates in available_data_dict.items():
url_list += list(map(generatePath, repeat(base_url), repeat(station), dates))
# Cache data
file_list = self.cacheData('mahali_tec', url_list)
# Dictionary to hold parsed data
parsed_data_dict = defaultdict(list)
# Parse data
for filename in file_list:
station = filename[-21:-17]
parsed_data_dict[station].append(parseIonoFile(filename))
# combine data frames for each station into a single
combined_data_dict = OrderedDict()
for station,data in parsed_data_dict.items():
combined_data_dict[station] = pd.concat(data)
# Return data wrapper
return TableWrapper(combined_data_dict, default_columns=['vertical_tec']) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/mahali/tec/data_fetcher.py | data_fetcher.py |
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
from pkg_resources import resource_filename
from skdaccess.utilities.mahali_util import convert_date, parseIonoFile
from skdaccess.utilities.support import retrieveCommonDatesHDF
# Standard library imports
from urllib import parse
from collections import OrderedDict
from collections import defaultdict
from itertools import repeat
# 3rd party imports
from tqdm import tqdm
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali Data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali Data Fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
# Get start date
if start_date == None:
self.start_date = pd.to_datetime('2015275', format='%Y%j')
else:
self.start_date = convert_date(start_date)
# Get end date
if end_date == None:
self.end_date = pd.to_datetime('2015307', format='%Y%j')
else:
self.end_date = convert_date(end_date)
self.date_range = pd.date_range(self.start_date, self.end_date)
# Set station list if none is given
if len(ap_paramList) == 0:
station_list = [
'mh02',
'mh03',
'mh04',
'mh05',
'mh06',
'mh07',
'mh08',
'mh09',
'mh13',
]
ap_paramList = [ AutoList(station_list) ]
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper for Mahali tec data
@return Mahali data wrapper
'''
def generatePath(base_url, station, in_date):
'''
Generate path to file based on station, date, and base url
@param base_url: Base url to put in front of generated url
@param station: Name of station
@param in_date: Date of data to create path for
@return The url for the station data
'''
year = in_date.strftime('%Y')
day = in_date.strftime('%j')
date = in_date.strftime('%Y%m%d')
path = 'tec/{year}/{day}/{station}-{date}.iono.gz'.format(year=year,
day=day,
station=station,
date=date)
return parse.urljoin(base_url, path)
# Get station lists
station_list = self.ap_paramList[0]()
# Retrieve dates containing data for station list
available_data_dict = retrieveCommonDatesHDF('mahali_tec_info.hdf', station_list, self.date_range)
# Location of data
base_url = 'http://apollo.haystack.mit.edu/mahali-data/'
url_list = []
# Generate url list
for station, dates in available_data_dict.items():
url_list += list(map(generatePath, repeat(base_url), repeat(station), dates))
# Cache data
file_list = self.cacheData('mahali_tec', url_list)
# Dictionary to hold parsed data
parsed_data_dict = defaultdict(list)
# Parse data
for filename in file_list:
station = filename[-21:-17]
parsed_data_dict[station].append(parseIonoFile(filename))
# combine data frames for each station into a single
combined_data_dict = OrderedDict()
for station,data in parsed_data_dict.items():
combined_data_dict[station] = pd.concat(data)
# Return data wrapper
return TableWrapper(combined_data_dict, default_columns=['vertical_tec']) | 0.604983 | 0.224034 |
# """@package GRACE
# Provides classes for accessing GRACE data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# standard library imports
import re
from ftplib import FTP
import os
import glob
from collections import OrderedDict
from configparser import NoSectionError, NoOptionError
from glob import glob
from math import floor
# 3rd party package imports
import pandas as pd
import numpy as np
from tqdm import tqdm
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GRACE data '''
def __init__(self, ap_paramList, start_date = None, end_date = None):
'''
Construct a Grace Data Fetcher
@param ap_paramList[geo_point]: AutoList of geographic location tuples (lat,lon)
@param start_date: Beginning date
@param end_date: Ending date
'''
self.start_date = start_date
self.end_date = end_date
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of grace data for specified geopoints.
@return Grace Data Wrapper
'''
conf = DataFetcher.getConfig()
try:
data_location = conf.get('grace', 'data_location')
csr_filename = conf.get('grace', 'csr_filename')
jpl_filename = conf.get('grace', 'jpl_filename')
gfz_filename = conf.get('grace', 'gfz_filename')
scale_factor_filename = conf.get('grace', 'scale_factor_filename')
except (NoOptionError, NoSectionError) as exc:
print('No data information available, please run: skdaccess grace')
raise exc
geo_point_list = self.ap_paramList[0]()
csr_data, csr_meta, lat_bounds, lon_bounds = readTellusData(os.path.join(data_location, csr_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'CSR','time')
jpl_data, jpl_meta, = readTellusData(os.path.join(data_location, jpl_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'JPL','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
gfz_data, gfz_meta, = readTellusData(os.path.join(data_location, gfz_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'GFZ','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
scale_factor_data, scale_factor_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'SCALE_FACTOR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
leakage_error_data, leakage_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'LEAKAGE_ERROR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
measurement_error_data, measurement_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude',
'MEASUREMENT_ERROR', lat_bounds=lat_bounds,
lon_bounds=lon_bounds)[:2]
# Get appropriate time range
start_date = self.start_date
end_date = self.end_date
def getMaskedValue(in_value):
'''
Retrieve the value if not masked,
otherwise return np.nan
@param in_value: Input value to check
@return input value or nan
'''
if np.ma.is_masked(in_value):
return np.nan
else:
return in_value
if start_date == None or end_date == None:
csr_start_date, csr_end_date = getStartEndDate(csr_data)
jpl_start_date, jpl_end_date = getStartEndDate(jpl_data)
gfz_start_date, gfz_end_date = getStartEndDate(gfz_data)
if start_date == None:
start_date = np.min([csr_start_date, jpl_start_date, gfz_start_date])
if end_date == None:
end_date = np.max([csr_end_date, jpl_end_date, gfz_end_date])
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for (csr_label, csr_frame), (jpl_label, jpl_frame), (gfz_label, gfz_frame) in zip(csr_data.items(),
jpl_data.items(),
gfz_data.items()):
data = pd.concat([csr_frame.loc[start_date:end_date],
jpl_frame.loc[start_date:end_date],
gfz_frame.loc[start_date:end_date]], axis=1)
data.index.name = 'Date'
label = csr_label
metadata_dict[label] = pd.Series({'scale_factor' : getMaskedValue(scale_factor_data[csr_label]),
'measurement_error' : getMaskedValue(measurement_error_data[csr_label]),
'leakage_error' : getMaskedValue(leakage_error_data[csr_label])})
data_dict[label] = data
metadata_frame = pd.DataFrame.from_dict(metadata_dict)
return(TableWrapper(data_dict,meta_data = metadata_frame,default_columns=['CSR','JPL','GFZ']))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'Grace Data Fetcher' + super(DataFetcher, self).__str__()
@classmethod
def downloadFullDataset(cls, out_file = 'grace.h5', use_file = None):
'''
Download and parse data from the Gravity Recovery and Climate Experiment.
@param out_file: Output filename for parsed data
@param use_file: Directory of already downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# Get date of grace data from filename
def setConfigFile(filename):
if re.search('SCALE_FACTOR', filename):
DataFetcher.setDataLocation('grace', filename, key='scale_factor_filename')
elif re.search('CSR', filename):
DataFetcher.setDataLocation('grace', filename, key='csr_filename')
elif re.search('GFZ', filename):
DataFetcher.setDataLocation('grace', filename, key='gfz_filename')
elif re.search('JPL', filename):
DataFetcher.setDataLocation('grace', filename, key='jpl_filename')
else:
return False
return True
if use_file is None:
print("Downloading GRACE Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('/allData/tellus/L3/land_mass/RL05/netcdf')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
for filename in tqdm(file_list):
status = setConfigFile(filename)
if status == False:
print("Uknown file:", filename)
continue
ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)
ftp.quit()
DataFetcher.setDataLocation('grace', os.path.abspath('./'))
else:
files = glob(os.path.join(use_file, '*.nc'))
for filename in files:
status = setConfigFile(filename)
if status == False:
print('Unknown file')
DataFetcher.setDataLocation('grace', os.path.abspath(use_file)) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/grace/data_fetcher.py | data_fetcher.py |
# """@package GRACE
# Provides classes for accessing GRACE data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# standard library imports
import re
from ftplib import FTP
import os
import glob
from collections import OrderedDict
from configparser import NoSectionError, NoOptionError
from glob import glob
from math import floor
# 3rd party package imports
import pandas as pd
import numpy as np
from tqdm import tqdm
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GRACE data '''
def __init__(self, ap_paramList, start_date = None, end_date = None):
'''
Construct a Grace Data Fetcher
@param ap_paramList[geo_point]: AutoList of geographic location tuples (lat,lon)
@param start_date: Beginning date
@param end_date: Ending date
'''
self.start_date = start_date
self.end_date = end_date
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of grace data for specified geopoints.
@return Grace Data Wrapper
'''
conf = DataFetcher.getConfig()
try:
data_location = conf.get('grace', 'data_location')
csr_filename = conf.get('grace', 'csr_filename')
jpl_filename = conf.get('grace', 'jpl_filename')
gfz_filename = conf.get('grace', 'gfz_filename')
scale_factor_filename = conf.get('grace', 'scale_factor_filename')
except (NoOptionError, NoSectionError) as exc:
print('No data information available, please run: skdaccess grace')
raise exc
geo_point_list = self.ap_paramList[0]()
csr_data, csr_meta, lat_bounds, lon_bounds = readTellusData(os.path.join(data_location, csr_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'CSR','time')
jpl_data, jpl_meta, = readTellusData(os.path.join(data_location, jpl_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'JPL','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
gfz_data, gfz_meta, = readTellusData(os.path.join(data_location, gfz_filename), geo_point_list, 'lat','lon',
'lwe_thickness', 'GFZ','time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
scale_factor_data, scale_factor_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'SCALE_FACTOR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
leakage_error_data, leakage_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude', 'LEAKAGE_ERROR',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
measurement_error_data, measurement_error_meta, = readTellusData(os.path.join(data_location, scale_factor_filename),
geo_point_list, 'Latitude', 'Longitude',
'MEASUREMENT_ERROR', lat_bounds=lat_bounds,
lon_bounds=lon_bounds)[:2]
# Get appropriate time range
start_date = self.start_date
end_date = self.end_date
def getMaskedValue(in_value):
'''
Retrieve the value if not masked,
otherwise return np.nan
@param in_value: Input value to check
@return input value or nan
'''
if np.ma.is_masked(in_value):
return np.nan
else:
return in_value
if start_date == None or end_date == None:
csr_start_date, csr_end_date = getStartEndDate(csr_data)
jpl_start_date, jpl_end_date = getStartEndDate(jpl_data)
gfz_start_date, gfz_end_date = getStartEndDate(gfz_data)
if start_date == None:
start_date = np.min([csr_start_date, jpl_start_date, gfz_start_date])
if end_date == None:
end_date = np.max([csr_end_date, jpl_end_date, gfz_end_date])
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for (csr_label, csr_frame), (jpl_label, jpl_frame), (gfz_label, gfz_frame) in zip(csr_data.items(),
jpl_data.items(),
gfz_data.items()):
data = pd.concat([csr_frame.loc[start_date:end_date],
jpl_frame.loc[start_date:end_date],
gfz_frame.loc[start_date:end_date]], axis=1)
data.index.name = 'Date'
label = csr_label
metadata_dict[label] = pd.Series({'scale_factor' : getMaskedValue(scale_factor_data[csr_label]),
'measurement_error' : getMaskedValue(measurement_error_data[csr_label]),
'leakage_error' : getMaskedValue(leakage_error_data[csr_label])})
data_dict[label] = data
metadata_frame = pd.DataFrame.from_dict(metadata_dict)
return(TableWrapper(data_dict,meta_data = metadata_frame,default_columns=['CSR','JPL','GFZ']))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'Grace Data Fetcher' + super(DataFetcher, self).__str__()
@classmethod
def downloadFullDataset(cls, out_file = 'grace.h5', use_file = None):
'''
Download and parse data from the Gravity Recovery and Climate Experiment.
@param out_file: Output filename for parsed data
@param use_file: Directory of already downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# Get date of grace data from filename
def setConfigFile(filename):
if re.search('SCALE_FACTOR', filename):
DataFetcher.setDataLocation('grace', filename, key='scale_factor_filename')
elif re.search('CSR', filename):
DataFetcher.setDataLocation('grace', filename, key='csr_filename')
elif re.search('GFZ', filename):
DataFetcher.setDataLocation('grace', filename, key='gfz_filename')
elif re.search('JPL', filename):
DataFetcher.setDataLocation('grace', filename, key='jpl_filename')
else:
return False
return True
if use_file is None:
print("Downloading GRACE Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('/allData/tellus/L3/land_mass/RL05/netcdf')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
for filename in tqdm(file_list):
status = setConfigFile(filename)
if status == False:
print("Uknown file:", filename)
continue
ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)
ftp.quit()
DataFetcher.setDataLocation('grace', os.path.abspath('./'))
else:
files = glob(os.path.join(use_file, '*.nc'))
for filename in files:
status = setConfigFile(filename)
if status == False:
print('Unknown file')
DataFetcher.setDataLocation('grace', os.path.abspath(use_file)) | 0.594904 | 0.174235 |
# Standard library imports
from collections import OrderedDict
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.grace_util import readTellusData
# 3rd party imports
import pandas as pd
from netCDF4 import Dataset
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for GRACE mascon data
'''
def __init__(self, ap_paramList, start_date = None, end_date = None):
'''
Construct a GRACE mascon Data Fetcher
@param ap_paramList[geo_point]: AutoList of geographic location tuples (lat,lon)
@param start_date: Beginning date
@param end_date: Ending date
'''
self.start_date = start_date
self.end_date = end_date
self.mascon_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/GRCTellus.JPL.200204_201706.GLO.RL05M_1.MSCNv02CRIv02.nc'
self.scale_factor_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/CLM4.SCALE_FACTOR.JPL.MSCNv01CRIv01.nc'
self.mascon_placement_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/JPL_MSCNv01_PLACEMENT.nc'
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create a datawrapper containing GRACE mascon data
@return Table Datawrapper containing Mascon GRACE data
'''
geo_point_list = self.ap_paramList[0]()
file_list = self.cacheData('mascon', [self.mascon_url, self.scale_factor_url])
data, metadata, lat_bounds, lon_bounds = readTellusData(file_list[0], geo_point_list,'lat','lon','lwe_thickness', 'EWD', time_name='time',
lat_bounds_name='lat_bounds', lon_bounds_name='lon_bounds')
unc_data, unc_metadata = readTellusData(file_list[0], geo_point_list,'lat','lon','uncertainty', 'EWD_Error',
time_name='time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
scale_data, scale_metadata = readTellusData(file_list[1], geo_point_list, 'lat', 'lon', 'scale_factor',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
for data_name in data.keys():
data[data_name] = pd.concat([data[data_name], unc_data[data_name]], axis=1)
metadata[data_name]['scale_factor'] = scale_data[data_name]
if self.start_date != None or self.end_date != None:
for label in data.keys():
if self.start_date != None:
data[label] = data[label][self.start_date:]
if self.end_date != None:
data[label] = data[label][:self.end_date]
return TableWrapper(data, meta_data=metadata,
default_columns=['Equivalent Water Thickness'],
default_error_columns=['EWT Uncertainty'])
def getMasconPlacement(self):
'''
Retrieve mascon placement data
@return Mascon data, Mascon metadata
'''
file_list = self.cacheData('mascon', [self.mascon_placement_url])
placement = Dataset('JPL_MSCNv01_PLACEMENT.nc')
mascon_data = OrderedDict()
mascon_meta = OrderedDict()
for label, data in placement.variables.items():
mascon_data[label] = data[:]
mascon_meta = OrderedDict()
for meta_label in data.ncattrs():
mascon_meta[meta_label] = data.getncattr(meta_label)
return mascon_data, mascon_meta | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/grace/mascon/cache/data_fetcher.py | data_fetcher.py |
# Standard library imports
from collections import OrderedDict
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.grace_util import readTellusData
# 3rd party imports
import pandas as pd
from netCDF4 import Dataset
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for GRACE mascon data
'''
def __init__(self, ap_paramList, start_date = None, end_date = None):
'''
Construct a GRACE mascon Data Fetcher
@param ap_paramList[geo_point]: AutoList of geographic location tuples (lat,lon)
@param start_date: Beginning date
@param end_date: Ending date
'''
self.start_date = start_date
self.end_date = end_date
self.mascon_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/GRCTellus.JPL.200204_201706.GLO.RL05M_1.MSCNv02CRIv02.nc'
self.scale_factor_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/CLM4.SCALE_FACTOR.JPL.MSCNv01CRIv01.nc'
self.mascon_placement_url = 'ftp://podaac.jpl.nasa.gov/allData/tellus/L3/mascon/RL05/JPL/CRI/netcdf/JPL_MSCNv01_PLACEMENT.nc'
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create a datawrapper containing GRACE mascon data
@return Table Datawrapper containing Mascon GRACE data
'''
geo_point_list = self.ap_paramList[0]()
file_list = self.cacheData('mascon', [self.mascon_url, self.scale_factor_url])
data, metadata, lat_bounds, lon_bounds = readTellusData(file_list[0], geo_point_list,'lat','lon','lwe_thickness', 'EWD', time_name='time',
lat_bounds_name='lat_bounds', lon_bounds_name='lon_bounds')
unc_data, unc_metadata = readTellusData(file_list[0], geo_point_list,'lat','lon','uncertainty', 'EWD_Error',
time_name='time', lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
scale_data, scale_metadata = readTellusData(file_list[1], geo_point_list, 'lat', 'lon', 'scale_factor',
lat_bounds=lat_bounds, lon_bounds=lon_bounds)[:2]
for data_name in data.keys():
data[data_name] = pd.concat([data[data_name], unc_data[data_name]], axis=1)
metadata[data_name]['scale_factor'] = scale_data[data_name]
if self.start_date != None or self.end_date != None:
for label in data.keys():
if self.start_date != None:
data[label] = data[label][self.start_date:]
if self.end_date != None:
data[label] = data[label][:self.end_date]
return TableWrapper(data, meta_data=metadata,
default_columns=['Equivalent Water Thickness'],
default_error_columns=['EWT Uncertainty'])
def getMasconPlacement(self):
'''
Retrieve mascon placement data
@return Mascon data, Mascon metadata
'''
file_list = self.cacheData('mascon', [self.mascon_placement_url])
placement = Dataset('JPL_MSCNv01_PLACEMENT.nc')
mascon_data = OrderedDict()
mascon_meta = OrderedDict()
for label, data in placement.variables.items():
mascon_data[label] = data[:]
mascon_meta = OrderedDict()
for meta_label in data.ncattrs():
mascon_meta[meta_label] = data.getncattr(meta_label)
return mascon_data, mascon_meta | 0.579043 | 0.184988 |
# Standard library imports
from collections import OrderedDict
import os
# skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.image_util import SplineLatLon
from skdaccess.utilities.uavsar_util import readUAVSARMetadata
# 3rd party imports
import numpy as np
class DataFetcher(DataFetcherCache):
''' Data Fetcher for UAVSAR data '''
def __init__(self, slc_url_list, metadata_url_list, llh_url, memmap):
'''
Initialize UAVSAR data fetcher
@param slc_url_list: List of slc urls
@param metadata_url_list: List of metadata urls
@param llh_url: Latitude Longitude Height url
@param memmap: Open files using a memory map
'''
self.slc_url_list = slc_url_list
self.metadata_url_list = metadata_url_list
self.llh_url = llh_url
self.memmap = memmap
super(DataFetcher, self).__init__()
def _parseFilename(self, in_filename):
'''
Retrive information about UAVSAR data from filename
@param in_filename: Input filename
@return information obtained from filename
'''
filename = os.path.basename(in_filename)
filename_info = OrderedDict()
extension = filename[-3:]
split_filename = filename[:-4].split('_')
filename_info['site name'] = split_filename[0]
filename_info['line ID'] = split_filename[1]
if extension == 'llh':
filename_info['stack number'] = split_filename[2]
filename_info['baseline correction'] = split_filename[3]
filename_info['segment number'] = split_filename[4]
filename_info['downsample factor'] = split_filename[5]
if extension == 'slc':
filename_info['flight ID'] = split_filename[2]
filename_info['data take counter'] = split_filename[3]
filename_info['acquisition date'] = split_filename[4]
filename_info['band'] = split_filename[5][0]
filename_info['steering'] = split_filename[5][1:4]
filename_info['polarization'] = split_filename[5][4:]
filename_info['stack_version'] = split_filename[6]
filename_info['baseline correction'] = split_filename[7]
filename_info['segment number'] = split_filename[8]
filename_info['downsample factor'] = split_filename[9]
filename_info['extension'] = extension
return filename_info
def _readUAVSARData(self, filename, metadata, memmap = False):
'''
Load UAVSAR data
@param filename: Input filename
@param metadata: UAVSAR metadata
@param memeap: Open file using a memory map
@return numpy array of data
'''
filename_info = self._parseFilename(filename)
cols = metadata[filename_info['extension'] + '_' +
filename_info['segment number'][1] + '_' +
filename_info['downsample factor'] +
' Columns']
rows = metadata[filename_info['extension'] + '_' +
filename_info['segment number'][1] + '_' +
filename_info['downsample factor'] +
' Rows']
if filename_info['extension'] == 'slc':
dtype = np.dtype('<c8')
elif filename_info['extension'] == 'llh':
dtype = np.dtype([('Latitude','<f4'),
('Longitude','<f4'),
('Height','<f4')])
if memmap == True:
return np.memmap(filename, dtype=dtype, mode='r', shape=(rows,cols)), filename_info
else:
return np.fromfile(filename, dtype=dtype).reshape(rows,cols), filename_info
def output(self):
'''
Output data as a data wrapper
@return Imagewrapper of data
'''
llh_filename = self.cacheData('uavsar', [self.llh_url])
filename_list = self.cacheData('uavsar', self.slc_url_list)
metadata_filename_list = self.cacheData('uavsar', self.metadata_url_list)
llh,llh_info = self._readUAVSARData(llh_filename[0],
readUAVSARMetadata(metadata_filename_list[0]))
metadata_dict = OrderedDict()
data_dict = OrderedDict()
for filename, metadata_filename in zip(filename_list, metadata_filename_list):
filename_key = os.path.basename(filename)
metadata_dict[filename_key] = OrderedDict()
data_metadata = readUAVSARMetadata(metadata_filename)
data, data_filename_info = self._readUAVSARData(filename, data_metadata, self.memmap)
metadata_dict[filename_key]['filename_info'] = data_filename_info
metadata_dict[filename_key]['metadata'] = data_metadata
metadata_dict[filename_key]['Latitude'] = llh['Latitude']
metadata_dict[filename_key]['Longitude'] = llh['Longitude']
metadata_dict[filename_key]['Height'] = llh['Height']
data_dict[filename_key] = data
return ImageWrapper(data_dict, meta_data = metadata_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/uavsar/cache/data_fetcher.py | data_fetcher.py |
# Standard library imports
from collections import OrderedDict
import os
# skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.image_util import SplineLatLon
from skdaccess.utilities.uavsar_util import readUAVSARMetadata
# 3rd party imports
import numpy as np
class DataFetcher(DataFetcherCache):
''' Data Fetcher for UAVSAR data '''
def __init__(self, slc_url_list, metadata_url_list, llh_url, memmap):
'''
Initialize UAVSAR data fetcher
@param slc_url_list: List of slc urls
@param metadata_url_list: List of metadata urls
@param llh_url: Latitude Longitude Height url
@param memmap: Open files using a memory map
'''
self.slc_url_list = slc_url_list
self.metadata_url_list = metadata_url_list
self.llh_url = llh_url
self.memmap = memmap
super(DataFetcher, self).__init__()
def _parseFilename(self, in_filename):
'''
Retrive information about UAVSAR data from filename
@param in_filename: Input filename
@return information obtained from filename
'''
filename = os.path.basename(in_filename)
filename_info = OrderedDict()
extension = filename[-3:]
split_filename = filename[:-4].split('_')
filename_info['site name'] = split_filename[0]
filename_info['line ID'] = split_filename[1]
if extension == 'llh':
filename_info['stack number'] = split_filename[2]
filename_info['baseline correction'] = split_filename[3]
filename_info['segment number'] = split_filename[4]
filename_info['downsample factor'] = split_filename[5]
if extension == 'slc':
filename_info['flight ID'] = split_filename[2]
filename_info['data take counter'] = split_filename[3]
filename_info['acquisition date'] = split_filename[4]
filename_info['band'] = split_filename[5][0]
filename_info['steering'] = split_filename[5][1:4]
filename_info['polarization'] = split_filename[5][4:]
filename_info['stack_version'] = split_filename[6]
filename_info['baseline correction'] = split_filename[7]
filename_info['segment number'] = split_filename[8]
filename_info['downsample factor'] = split_filename[9]
filename_info['extension'] = extension
return filename_info
def _readUAVSARData(self, filename, metadata, memmap = False):
'''
Load UAVSAR data
@param filename: Input filename
@param metadata: UAVSAR metadata
@param memeap: Open file using a memory map
@return numpy array of data
'''
filename_info = self._parseFilename(filename)
cols = metadata[filename_info['extension'] + '_' +
filename_info['segment number'][1] + '_' +
filename_info['downsample factor'] +
' Columns']
rows = metadata[filename_info['extension'] + '_' +
filename_info['segment number'][1] + '_' +
filename_info['downsample factor'] +
' Rows']
if filename_info['extension'] == 'slc':
dtype = np.dtype('<c8')
elif filename_info['extension'] == 'llh':
dtype = np.dtype([('Latitude','<f4'),
('Longitude','<f4'),
('Height','<f4')])
if memmap == True:
return np.memmap(filename, dtype=dtype, mode='r', shape=(rows,cols)), filename_info
else:
return np.fromfile(filename, dtype=dtype).reshape(rows,cols), filename_info
def output(self):
'''
Output data as a data wrapper
@return Imagewrapper of data
'''
llh_filename = self.cacheData('uavsar', [self.llh_url])
filename_list = self.cacheData('uavsar', self.slc_url_list)
metadata_filename_list = self.cacheData('uavsar', self.metadata_url_list)
llh,llh_info = self._readUAVSARData(llh_filename[0],
readUAVSARMetadata(metadata_filename_list[0]))
metadata_dict = OrderedDict()
data_dict = OrderedDict()
for filename, metadata_filename in zip(filename_list, metadata_filename_list):
filename_key = os.path.basename(filename)
metadata_dict[filename_key] = OrderedDict()
data_metadata = readUAVSARMetadata(metadata_filename)
data, data_filename_info = self._readUAVSARData(filename, data_metadata, self.memmap)
metadata_dict[filename_key]['filename_info'] = data_filename_info
metadata_dict[filename_key]['metadata'] = data_metadata
metadata_dict[filename_key]['Latitude'] = llh['Latitude']
metadata_dict[filename_key]['Longitude'] = llh['Longitude']
metadata_dict[filename_key]['Height'] = llh['Height']
data_dict[filename_key] = data
return ImageWrapper(data_dict, meta_data = metadata_dict) | 0.627837 | 0.21684 |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries
# 3rd party imports
import pandas as pd
import numpy as np
from six.moves.urllib.request import urlopen
# Standard library imports
from collections import OrderedDict
class DataFetcher(DataFetcherStream):
''' DataFetcher for retrieving Wyoming Sounding data '''
def __init__(self, station_number, year, month, day_start, day_end, start_hour = 0, end_hour = 12):
'''
Initialize Data Fetcher
@param station_number: Station number
@param year: Input year
@param month: Input month (Integer for a single month, or a list of integers for multiple months)
@param day_start: First day of the month to include
@param day_end: Last day of the month to include
@param start_hour: Starting hour (may be either 0 or 12)
@param end_hour: Ending hour (may be either 0 or 12)
'''
self.station_number = station_number
if np.isscalar(year):
self.year_list = [year]
else:
self.year_list = year
if np.isscalar(month):
self.month_list = [month]
else:
self.month_list = month
self.day_start = day_start
self.day_end = day_end
self.start_hour = start_hour
self.end_hour = end_hour
super(DataFetcher, self).__init__()
def output(self, shared_lock = None, shared_list = None):
'''
Generate data wrapper
@return Wyoming sounding data in a data wrapper
'''
full_results_dict = OrderedDict()
full_meta_dict = OrderedDict()
for query_url in generateQueries(self.station_number, self.year_list, self.month_list, self.day_start,
self.day_end, self.start_hour, self.end_hour):
with urlopen(query_url) as in_data:
sp = SoundingParser()
sp.feed(in_data.read().decode())
for key, data in sp.data_dict.items():
full_results_dict[key] = data
for key, data in sp.metadata_dict.items():
full_meta_dict[key] = data
return TableWrapper(obj_wrap = full_results_dict, meta_data = full_meta_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/wyoming_sounding/stream/data_fetcher.py | data_fetcher.py |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries
# 3rd party imports
import pandas as pd
import numpy as np
from six.moves.urllib.request import urlopen
# Standard library imports
from collections import OrderedDict
class DataFetcher(DataFetcherStream):
''' DataFetcher for retrieving Wyoming Sounding data '''
def __init__(self, station_number, year, month, day_start, day_end, start_hour = 0, end_hour = 12):
'''
Initialize Data Fetcher
@param station_number: Station number
@param year: Input year
@param month: Input month (Integer for a single month, or a list of integers for multiple months)
@param day_start: First day of the month to include
@param day_end: Last day of the month to include
@param start_hour: Starting hour (may be either 0 or 12)
@param end_hour: Ending hour (may be either 0 or 12)
'''
self.station_number = station_number
if np.isscalar(year):
self.year_list = [year]
else:
self.year_list = year
if np.isscalar(month):
self.month_list = [month]
else:
self.month_list = month
self.day_start = day_start
self.day_end = day_end
self.start_hour = start_hour
self.end_hour = end_hour
super(DataFetcher, self).__init__()
def output(self, shared_lock = None, shared_list = None):
'''
Generate data wrapper
@return Wyoming sounding data in a data wrapper
'''
full_results_dict = OrderedDict()
full_meta_dict = OrderedDict()
for query_url in generateQueries(self.station_number, self.year_list, self.month_list, self.day_start,
self.day_end, self.start_hour, self.end_hour):
with urlopen(query_url) as in_data:
sp = SoundingParser()
sp.feed(in_data.read().decode())
for key, data in sp.data_dict.items():
full_results_dict[key] = data
for key, data in sp.metadata_dict.items():
full_meta_dict[key] = data
return TableWrapper(obj_wrap = full_results_dict, meta_data = full_meta_dict) | 0.765111 | 0.416619 |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries
from skdaccess.utilities.support import convertToStr
# 3rd party imports
import pandas as pd
import numpy as np
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving Wyoming Sounding data '''
def __init__(self, station_number, year, month, day_start, day_end, start_hour = 0, end_hour = 12):
'''
Initialize Data Fetcher
@param station_number: Station number
@param year: Input year
@param month: Input month (Integer for a single month, or a list of integers for multiple months)
@param day_start: First day of the month to include
@param day_end: Last day of the month to include
@param start_hour: Starting hour (may be either 0 or 12)
@param end_hour: Ending hour (may be either 0 or 12)
'''
self.station_number = station_number
if np.isscalar(year):
self.year_list = [year]
else:
self.year_list = year
if np.isscalar(month):
self.month_list = [month]
else:
self.month_list = month
self.day_start = day_start
self.day_end = day_end
self.start_hour = start_hour
self.end_hour = end_hour
super(DataFetcher, self).__init__()
def output(self):
'''
Generate data wrapper
@return Wyoming sounding data in a data wrapper
'''
url_list = generateQueries(self.station_number, self.year_list, self.month_list, 1,
31, 0, 12)
file_list = self.cacheData('wyoming_sounding', url_list)
full_data_dict = OrderedDict()
full_meta_dict = OrderedDict()
for filename in file_list:
with open(filename, 'r') as sounding_data:
sp = SoundingParser()
sp.feed(sounding_data.read())
for label, data in sp.data_dict.items():
data_date = pd.to_datetime(sp.metadata_dict[label]['metadata']['Observation time'],
format='%y%m%d/%H%M')
data_hour = int(data_date.strftime('%H'))
data_day = int(data_date.strftime('%d'))
if data_day >= int(self.day_start) and \
data_day <= int(self.day_end) and \
data_hour >= int(self.start_hour) and \
data_hour <= int(self.end_hour):
full_data_dict[label] = data
full_meta_dict[label] = sp.metadata_dict[label]
return TableWrapper(obj_wrap = full_data_dict, meta_data = full_meta_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/wyoming_sounding/cache/data_fetcher.py | data_fetcher.py |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries
from skdaccess.utilities.support import convertToStr
# 3rd party imports
import pandas as pd
import numpy as np
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving Wyoming Sounding data '''
def __init__(self, station_number, year, month, day_start, day_end, start_hour = 0, end_hour = 12):
'''
Initialize Data Fetcher
@param station_number: Station number
@param year: Input year
@param month: Input month (Integer for a single month, or a list of integers for multiple months)
@param day_start: First day of the month to include
@param day_end: Last day of the month to include
@param start_hour: Starting hour (may be either 0 or 12)
@param end_hour: Ending hour (may be either 0 or 12)
'''
self.station_number = station_number
if np.isscalar(year):
self.year_list = [year]
else:
self.year_list = year
if np.isscalar(month):
self.month_list = [month]
else:
self.month_list = month
self.day_start = day_start
self.day_end = day_end
self.start_hour = start_hour
self.end_hour = end_hour
super(DataFetcher, self).__init__()
def output(self):
'''
Generate data wrapper
@return Wyoming sounding data in a data wrapper
'''
url_list = generateQueries(self.station_number, self.year_list, self.month_list, 1,
31, 0, 12)
file_list = self.cacheData('wyoming_sounding', url_list)
full_data_dict = OrderedDict()
full_meta_dict = OrderedDict()
for filename in file_list:
with open(filename, 'r') as sounding_data:
sp = SoundingParser()
sp.feed(sounding_data.read())
for label, data in sp.data_dict.items():
data_date = pd.to_datetime(sp.metadata_dict[label]['metadata']['Observation time'],
format='%y%m%d/%H%M')
data_hour = int(data_date.strftime('%H'))
data_day = int(data_date.strftime('%d'))
if data_day >= int(self.day_start) and \
data_day <= int(self.day_end) and \
data_hour >= int(self.start_hour) and \
data_hour <= int(self.end_hour):
full_data_dict[label] = data
full_meta_dict[label] = sp.metadata_dict[label]
return TableWrapper(obj_wrap = full_data_dict, meta_data = full_meta_dict) | 0.762336 | 0.409221 |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.sentinel_1_util import parseSatelliteData
# 3rd party imports
import pandas as pd
import numpy as np
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import xml.etree.ElementTree as ET
from scipy.constants import c
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving Sentinel SLC data '''
def __init__(self, url_list, satellite_url_list, username, password, swath, polarization = 'VV', local_paths=False, verbose=True):
'''
Initialize Sentinel Data Fetcher
@param url_list: List of urls of SLC data
@param satellite_url_list: List of satellite urls
@param username: Username for downloading data
@param password: Password for downloading data
@param swath: Swath number (1, 2, or 3)
@param polarization: Polarization of data to retrieve
@param local_paths: locations are local paths, not urls
@param verbose: Print additional information
'''
self.url_list = url_list
self.satellite_url_list = satellite_url_list
self.swath = swath
self.username = username
self.password = password
self.polarization = polarization
self.local_paths = local_paths
super(DataFetcher, self).__init__(verbose=verbose)
def output(self):
'''
Generate data wrapper
@return Sentinel SLC data in a data wrapper
'''
# Check that the number of images matches the number of orbit files
num_images = len(self.url_list)
if num_images != len(self.satellite_url_list):
raise ValueError('Different number of slc and satellite urls')
if not self.local_paths:
self.verbose_print('Retrieving SLC data', flush=True)
file_list = self.cacheData('sentinel_1', self.url_list, self.username, self.password,
use_requests=True, use_progress_bar=self.verbose)
self.verbose_print('Retrieving orbit files', flush=True)
satellite_file_list = self.cacheData('sentinel_1', self.satellite_url_list, self.username, self.password,
use_requests=True, use_progress_bar=self.verbose)
self.verbose_print('All files retrieved', flush=True)
else:
file_list = self.url_list
satellite_file_list = self.satellite_url_list
metadata = OrderedDict()
data_dict = OrderedDict()
for index, (filepath, satellite_filepath) in enumerate(zip(file_list, satellite_file_list)):
filename = os.path.split(filepath)[1]
filename_unzipped = filename[:-3] + 'SAFE'
gdal_path = '/vsizip/' + os.path.join(filepath, filename_unzipped) + ':IW' + convertToStr(self.swath) + '_' + self.polarization
dataset = gdal.Open('SENTINEL1_DS:' + gdal_path)
metadata_filename = os.path.split(dataset.GetFileList()[1])[-1]
metadata[filename] = OrderedDict()
with ZipFile(filepath, 'r') as zipped_file:
metadata[filename]['Tree'] = ET.parse(zipped_file.open(os.path.join(filename_unzipped, 'annotation', metadata_filename)))
radar_freq = float(metadata[filename]['Tree'].find('generalAnnotation/productInformation/radarFrequency').text)
radar_lambda = c/radar_freq
metadata[filename]['Wavelength'] = radar_lambda
metadata[filename]['Orbit'] = parseSatelliteData(satellite_filepath)
# Currently a bug when reading in data using Sentinel-1 Driver
# Directly reading the tif file to avoid issues
# data_dict[filename] = dataset.ReadAsArray()
data_dict[filename] = gdal.Open(dataset.GetFileList()[2]).ReadAsArray()
return ImageWrapper(data_dict, meta_data=metadata) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/sentinel_1/cache/data_fetcher.py | data_fetcher.py |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.sentinel_1_util import parseSatelliteData
# 3rd party imports
import pandas as pd
import numpy as np
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import xml.etree.ElementTree as ET
from scipy.constants import c
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving Sentinel SLC data '''
def __init__(self, url_list, satellite_url_list, username, password, swath, polarization = 'VV', local_paths=False, verbose=True):
'''
Initialize Sentinel Data Fetcher
@param url_list: List of urls of SLC data
@param satellite_url_list: List of satellite urls
@param username: Username for downloading data
@param password: Password for downloading data
@param swath: Swath number (1, 2, or 3)
@param polarization: Polarization of data to retrieve
@param local_paths: locations are local paths, not urls
@param verbose: Print additional information
'''
self.url_list = url_list
self.satellite_url_list = satellite_url_list
self.swath = swath
self.username = username
self.password = password
self.polarization = polarization
self.local_paths = local_paths
super(DataFetcher, self).__init__(verbose=verbose)
def output(self):
'''
Generate data wrapper
@return Sentinel SLC data in a data wrapper
'''
# Check that the number of images matches the number of orbit files
num_images = len(self.url_list)
if num_images != len(self.satellite_url_list):
raise ValueError('Different number of slc and satellite urls')
if not self.local_paths:
self.verbose_print('Retrieving SLC data', flush=True)
file_list = self.cacheData('sentinel_1', self.url_list, self.username, self.password,
use_requests=True, use_progress_bar=self.verbose)
self.verbose_print('Retrieving orbit files', flush=True)
satellite_file_list = self.cacheData('sentinel_1', self.satellite_url_list, self.username, self.password,
use_requests=True, use_progress_bar=self.verbose)
self.verbose_print('All files retrieved', flush=True)
else:
file_list = self.url_list
satellite_file_list = self.satellite_url_list
metadata = OrderedDict()
data_dict = OrderedDict()
for index, (filepath, satellite_filepath) in enumerate(zip(file_list, satellite_file_list)):
filename = os.path.split(filepath)[1]
filename_unzipped = filename[:-3] + 'SAFE'
gdal_path = '/vsizip/' + os.path.join(filepath, filename_unzipped) + ':IW' + convertToStr(self.swath) + '_' + self.polarization
dataset = gdal.Open('SENTINEL1_DS:' + gdal_path)
metadata_filename = os.path.split(dataset.GetFileList()[1])[-1]
metadata[filename] = OrderedDict()
with ZipFile(filepath, 'r') as zipped_file:
metadata[filename]['Tree'] = ET.parse(zipped_file.open(os.path.join(filename_unzipped, 'annotation', metadata_filename)))
radar_freq = float(metadata[filename]['Tree'].find('generalAnnotation/productInformation/radarFrequency').text)
radar_lambda = c/radar_freq
metadata[filename]['Wavelength'] = radar_lambda
metadata[filename]['Orbit'] = parseSatelliteData(satellite_filepath)
# Currently a bug when reading in data using Sentinel-1 Driver
# Directly reading the tif file to avoid issues
# data_dict[filename] = dataset.ReadAsArray()
data_dict[filename] = gdal.Open(dataset.GetFileList()[2]).ReadAsArray()
return ImageWrapper(data_dict, meta_data=metadata) | 0.685002 | 0.364976 |
# """@package GLDAS
# Provides classes for accessing GLDAS data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# Standard library imports
import os
from ftplib import FTP
import re
from collections import OrderedDict
# 3rd party package imports
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GLDAS data '''
def __init__(self, ap_paramList, start_date = None, end_date = None, resample = False):
'''
Construct a GLDAS Data Fetcher
@param ap_paramList[geo_point]: Autolist of Geographic location tuples
@param start_date: Beginning date
@param end_date: Ending date
@param resample: Resample the data to daily resolution, leaving NaN's in days without data (Default True)
'''
self.start_date = start_date
self.end_date = end_date
self.resample = resample
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of GLDAS data for specified geopoint.
@return GLDAS Data Wrapper
'''
data_file = DataFetcher.getDataLocation('gldas')
if data_file is None:
print("No data available")
return None
geo_point_list = self.ap_paramList[0]()
gldas_data_name = 'Equivalent Water Thickness (cm)'
full_data, metadata = readTellusData(data_file, geo_point_list, 'Latitude','Longitude',
'Water_Thickness', gldas_data_name, 'Time')[:2]
# Get appropriate time range
if self.start_date == None or self.end_date == None:
start_date, end_date = getStartEndDate(full_data)
if self.start_date != None:
start_date = self.start_date
elif type(self.start_date) == str:
start_date = pd.to_datetime(self.start_date)
if self.end_date != None:
end_date = self.end_date
elif type(self.end_date) == str:
end_date == pd.to_datetime(self.end_date)
for label in full_data.keys():
full_data[label] = full_data[label][start_date:end_date]
gldas_unc = pd.Series(np.ones(len(full_data[label]),dtype=np.float) * np.nan, index=full_data[label].index,name="Uncertainty")
full_data[label] = pd.concat([full_data[label], gldas_unc], axis=1)
if self.resample == True:
full_data[label] = full_data[label].reindex(pd.date_range(start_date, end_date))
return(TableWrapper(full_data, default_columns = ['Equivalent Water Thickness (cm)'],
default_error_columns=['Uncertainty']))
@classmethod
def downloadFullDataset(cls, out_file=None, use_file=None):
'''
Download GLDAS data
@param out_file: Output filename for parsed data
@param use_file: Directory of downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# No post processing for this data is necessary. If local data is
# specified, just set its location.
if use_file != None:
print('Setting data location for local data')
return os.path.abspath(use_file)
# If no local data, download data from server
print("Downloading GLDAS Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('allData/tellus/L3/gldas_monthly/netcdf/')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
if len(file_list) > 1:
raise ValueError('Too many files found in GLDAS directory')
if out_file == None:
out_file = file_list[0]
ftp.retrbinary('RETR ' + file_list[0], open(''+out_file, 'wb').write)
cls.setDataLocation('gldas', os.path.abspath(file_list[0]))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'GLDAS Data Fetcher' + super(DataFetcher, self).__str__() | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/gldas/data_fetcher.py | data_fetcher.py |
# """@package GLDAS
# Provides classes for accessing GLDAS data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# Standard library imports
import os
from ftplib import FTP
import re
from collections import OrderedDict
# 3rd party package imports
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GLDAS data '''
def __init__(self, ap_paramList, start_date = None, end_date = None, resample = False):
'''
Construct a GLDAS Data Fetcher
@param ap_paramList[geo_point]: Autolist of Geographic location tuples
@param start_date: Beginning date
@param end_date: Ending date
@param resample: Resample the data to daily resolution, leaving NaN's in days without data (Default True)
'''
self.start_date = start_date
self.end_date = end_date
self.resample = resample
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of GLDAS data for specified geopoint.
@return GLDAS Data Wrapper
'''
data_file = DataFetcher.getDataLocation('gldas')
if data_file is None:
print("No data available")
return None
geo_point_list = self.ap_paramList[0]()
gldas_data_name = 'Equivalent Water Thickness (cm)'
full_data, metadata = readTellusData(data_file, geo_point_list, 'Latitude','Longitude',
'Water_Thickness', gldas_data_name, 'Time')[:2]
# Get appropriate time range
if self.start_date == None or self.end_date == None:
start_date, end_date = getStartEndDate(full_data)
if self.start_date != None:
start_date = self.start_date
elif type(self.start_date) == str:
start_date = pd.to_datetime(self.start_date)
if self.end_date != None:
end_date = self.end_date
elif type(self.end_date) == str:
end_date == pd.to_datetime(self.end_date)
for label in full_data.keys():
full_data[label] = full_data[label][start_date:end_date]
gldas_unc = pd.Series(np.ones(len(full_data[label]),dtype=np.float) * np.nan, index=full_data[label].index,name="Uncertainty")
full_data[label] = pd.concat([full_data[label], gldas_unc], axis=1)
if self.resample == True:
full_data[label] = full_data[label].reindex(pd.date_range(start_date, end_date))
return(TableWrapper(full_data, default_columns = ['Equivalent Water Thickness (cm)'],
default_error_columns=['Uncertainty']))
@classmethod
def downloadFullDataset(cls, out_file=None, use_file=None):
'''
Download GLDAS data
@param out_file: Output filename for parsed data
@param use_file: Directory of downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# No post processing for this data is necessary. If local data is
# specified, just set its location.
if use_file != None:
print('Setting data location for local data')
return os.path.abspath(use_file)
# If no local data, download data from server
print("Downloading GLDAS Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('allData/tellus/L3/gldas_monthly/netcdf/')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
if len(file_list) > 1:
raise ValueError('Too many files found in GLDAS directory')
if out_file == None:
out_file = file_list[0]
ftp.retrbinary('RETR ' + file_list[0], open(''+out_file, 'wb').write)
cls.setDataLocation('gldas', os.path.abspath(file_list[0]))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'GLDAS Data Fetcher' + super(DataFetcher, self).__str__() | 0.623721 | 0.325735 |
# """@package Groundwater
# Provides classes for accessing Groundwater data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper, SeriesWrapper
# Python Standard Library
from collections import OrderedDict
import re
import os
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import urlopen
from shutil import copyfileobj
from io import StringIO
# 3rd party package imports
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStorage):
'''
Generates Data Wrappers of groundwater measurements taken in the US
'''
def __init__(self, ap_paramList = [], start_date = None, end_date = None, cutoff=0.75):
'''
Construct a Groundwater Data Fetcher
@param ap_paramList[LowerLat]: Autoparam Lower latitude
@param ap_paramList[UpperLat]: Autoparam Upper latitude
@param ap_paramList[LeftLon]: Autoparam Left longitude
@param ap_paramList[RightLon]: Autoparam Right longitude
@param start_date: Starting date (defualt: None)
@param end_date: Ending date (default: None)
@param cutoff: Required amount of data for each station
'''
self.start_date = pd.to_datetime(start_date)
self.end_date = pd.to_datetime(end_date)
self.ap_paramList = ap_paramList
self.cutoff = cutoff
def output(self):
'''
Fetch Groundwater Data Wrapper
@return Groundwater Data Wrapper
'''
meta_data = DataFetcher.getStationMetadata()
data_file = DataFetcher.getDataLocation('groundwater')
if data_file is None:
print("No data available")
return None
if len(self.ap_paramList) == 1:
station_list = self.ap_paramList[0]()
elif len(self.ap_paramList) == 4:
llat = self.ap_paramList[0]()
ulat = self.ap_paramList[1]()
llon = self.ap_paramList[2]()
rlon = self.ap_paramList[3]()
station_index = np.logical_and.reduce([meta_data.Lat > llat, meta_data.Lat < ulat,
meta_data.Lon > llon, meta_data.Lon < rlon])
cut_metadata = meta_data[station_index]
station_list = cut_metadata[cut_metadata['Data Available'] == 1].index.tolist()
else:
station_list = None
data_dict = OrderedDict()
store = pd.HDFStore(data_file, 'r')
if station_list == None:
stations = [str(site) for site in meta_data[meta_data['Data Available']==1].index]
else:
stations = station_list
for station in stations:
if self.start_date != None and self.end_date != None:
data = store['USGS' + str(station)].reindex(pd.date_range(self.start_date, self.end_date))
else:
data = store['USGS' + str(station)]
if len(data.dropna()) / len(data) >= self.cutoff:
data_dict[int(station)] = data
store.close()
return(TableWrapper(data_dict, meta_data=meta_data, default_columns=['Median Depth to Water']))
def __str__(self):
'''
String representation of data fetcher
@return string describing data fetcher
'''
return 'Ground Water Data Fetcher' + super(DataFetcher, self).__str__()
def getStationMetadata():
'''
Retrieve metadata on groundwater wells
@return pandas dataframe with groundwater well information
'''
data_file = DataFetcher.getDataLocation('groundwater')
if data_file is None:
print('Dataset not available')
return None
store = pd.HDFStore(data_file,'r')
meta_data = store['meta_data']
store.close()
return meta_data
@classmethod
def downloadFullDataset(cls, out_file = 'gw.h5', use_file = None):
'''
Download and parse US groundwater data provided by USGS
@param out_file: Output filename for parsed data
@param use_file: Specify the directory where the data is.
If None, the function will download the data
@return Absolute path of parsed data
'''
# Function that converts a string to a float
def convert_to_float(x):
try:
return np.float(x)
except:
return np.nan
# Function to test if a string can
# be converted to a float
def is_valid_number(x):
try:
test = np.float(x)
return True
except:
return False
# Returns 'No comment' for strings that
# can be interpreted as a float,
# and returns the string if it can't
# be interpreted as a float
def comment(x):
try:
test = np.float(x)
return 'No comment'
except:
return x
# Abbreviations of all 50 states
state_list = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY']
full_meta_data = None
# temporary data storage
data_dict = OrderedDict()
data_filename_list = []
metadata_filename_list = []
for state in state_list:
data_filename = state + '_gw_data.rdb'
metadata_filename = state + '_gw_metadata.rdb'
if use_file is None:
print("Downloading", state, "data")
data_file = open(data_filename, 'wb')
metadata_file = open(metadata_filename, 'wb')
try:
# Download data
copyfileobj(urlopen('http://waterservices.usgs.gov/nwis/dv/?format=rdb&stateCd=' + state +
'&startDT=1800-01-01&endDT=2020-12-31&statCd=00003,00008¶meterCd=72019&siteType=GW'),
data_file)
data_file.close()
# Download meta data
copyfileobj(urlopen('http://waterservices.usgs.gov/nwis/site/?format=rdb&stateCd=' + state +
'&startDT=1800-01-01&endDT=2020-12-31¶meterCd=72019&siteType=GW&hasDataTypeCd=dv'),
metadata_file)
except HTTPError:
print('No data for', state)
finally:
data_file.close()
metadata_file.close()
else:
data_filename = use_file + data_filename
metadata_filename = use_file + metadata_filename
# store data filename and metadata filename
data_filename_list.append(data_filename)
metadata_filename_list.append(metadata_filename)
for data_filename, metadata_filename, state_abbrev in zip(data_filename_list, metadata_filename_list, state_list):
print("Processing ", state_abbrev, ': ', data_filename, sep='')
#Read metadata
meta_data = pd.read_table(metadata_filename, skiprows=31, names = ['Agency', 'Site Number', 'Site Name', 'Site Type',
'Lat', 'Lon', 'LatLon Accuracy', 'LatLon Datum',
'Altitude', 'Altitude Accuracy', 'Altitude Datum',
'Hydrologic Code'], index_col=1, dtype={'Hydrologic Code': "object"})
meta_data['Data Available'] = int(0)
meta_data['State'] = state_abbrev
full_lines = open(data_filename).read().splitlines()
# Get the line number of the header lines
header_nums = []
for line_num, line in enumerate(full_lines):
if re.match('agency_cd', line):
header_nums.append(line_num)
# temporary storage for combine type
type_dict = OrderedDict()
# Read in all the data based on the header lines
for header_num in header_nums:
# Check to make sure there is valid data
if len(full_lines[header_num].split()) < 5:
print('No median or averages available for', data_filename)
continue
start = header_num+2
end = len(full_lines)
for line_num, line in enumerate(full_lines[start:],start):
if line[0] == '#':
end = line_num
break
# If both median and average present
if len(full_lines[header_num].split()) > 5:
in_data = pd.read_table(StringIO('\n'.join(full_lines[start:end])), header=None,
names=['Agency','Site ID','Date','Mean Depth to Water','Mean Quality',
'Median Depth to Water', 'Median Quality'],
index_col=2, parse_dates=True)
in_data.loc[:,'Mean Comment'] = in_data.loc[:,'Mean Depth to Water'].apply(comment)
in_data.loc[:,'Median Comment'] = in_data.loc[:,'Median Depth to Water'].apply(comment)
in_data.loc[:,'Mean Depth to Water'] = in_data.loc[:,'Mean Depth to Water'].apply(convert_to_float)
in_data.loc[:,'Median Depth to Water'] = in_data.loc[:,'Median Depth to Water'].apply(convert_to_float)
# All the data is either median or mean
else:
if full_lines[header_num].split()[3][-5:] == '00008':
data_name = 'Median Depth to Water'
comment_name = 'Median Comment'
quality_name = 'Median Quality'
elif full_lines[header_num].split()[3][-5:] == '00003':
data_name = 'Mean Depth to Water'
comment_name = 'Mean Comment'
quality_name = 'Mean Quality'
else:
raise ValueError('Data type not understood')
in_data = pd.read_table(StringIO('\n'.join(full_lines[start:end])), header=None,
names=['Agency','Site ID','Date', data_name, quality_name],
index_col=2, parse_dates=True)
in_data.loc[:,comment_name] = in_data.loc[:, data_name].apply(comment)
in_data.loc[:,data_name] = in_data.loc[:, data_name].apply(convert_to_float)
# Data has been read in, now determine
# combine type and store results in
# data_dict and type_dict
site_id = in_data.ix[0,'Site ID']
in_data.drop('Site ID', 1,inplace=True)
data_dict[site_id] = in_data
meta_data.loc[site_id, 'Data Available'] = 1
if not data_dict:
print('No valid wells for', data_filename)
continue
full_meta_data = pd.concat([full_meta_data, meta_data])
store = pd.HDFStore(out_file, complevel=5, complib='blosc')
for site,data in data_dict.items():
store.put('USGS' + str(site), data, format='table')
store.put('meta_data',full_meta_data,format='table')
store.close()
DataFetcher.setDataLocation('groundwater', os.path.abspath(out_file)) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/groundwater/data_fetcher.py | data_fetcher.py |
# """@package Groundwater
# Provides classes for accessing Groundwater data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper, SeriesWrapper
# Python Standard Library
from collections import OrderedDict
import re
import os
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import urlopen
from shutil import copyfileobj
from io import StringIO
# 3rd party package imports
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStorage):
'''
Generates Data Wrappers of groundwater measurements taken in the US
'''
def __init__(self, ap_paramList = [], start_date = None, end_date = None, cutoff=0.75):
'''
Construct a Groundwater Data Fetcher
@param ap_paramList[LowerLat]: Autoparam Lower latitude
@param ap_paramList[UpperLat]: Autoparam Upper latitude
@param ap_paramList[LeftLon]: Autoparam Left longitude
@param ap_paramList[RightLon]: Autoparam Right longitude
@param start_date: Starting date (defualt: None)
@param end_date: Ending date (default: None)
@param cutoff: Required amount of data for each station
'''
self.start_date = pd.to_datetime(start_date)
self.end_date = pd.to_datetime(end_date)
self.ap_paramList = ap_paramList
self.cutoff = cutoff
def output(self):
'''
Fetch Groundwater Data Wrapper
@return Groundwater Data Wrapper
'''
meta_data = DataFetcher.getStationMetadata()
data_file = DataFetcher.getDataLocation('groundwater')
if data_file is None:
print("No data available")
return None
if len(self.ap_paramList) == 1:
station_list = self.ap_paramList[0]()
elif len(self.ap_paramList) == 4:
llat = self.ap_paramList[0]()
ulat = self.ap_paramList[1]()
llon = self.ap_paramList[2]()
rlon = self.ap_paramList[3]()
station_index = np.logical_and.reduce([meta_data.Lat > llat, meta_data.Lat < ulat,
meta_data.Lon > llon, meta_data.Lon < rlon])
cut_metadata = meta_data[station_index]
station_list = cut_metadata[cut_metadata['Data Available'] == 1].index.tolist()
else:
station_list = None
data_dict = OrderedDict()
store = pd.HDFStore(data_file, 'r')
if station_list == None:
stations = [str(site) for site in meta_data[meta_data['Data Available']==1].index]
else:
stations = station_list
for station in stations:
if self.start_date != None and self.end_date != None:
data = store['USGS' + str(station)].reindex(pd.date_range(self.start_date, self.end_date))
else:
data = store['USGS' + str(station)]
if len(data.dropna()) / len(data) >= self.cutoff:
data_dict[int(station)] = data
store.close()
return(TableWrapper(data_dict, meta_data=meta_data, default_columns=['Median Depth to Water']))
def __str__(self):
'''
String representation of data fetcher
@return string describing data fetcher
'''
return 'Ground Water Data Fetcher' + super(DataFetcher, self).__str__()
def getStationMetadata():
'''
Retrieve metadata on groundwater wells
@return pandas dataframe with groundwater well information
'''
data_file = DataFetcher.getDataLocation('groundwater')
if data_file is None:
print('Dataset not available')
return None
store = pd.HDFStore(data_file,'r')
meta_data = store['meta_data']
store.close()
return meta_data
@classmethod
def downloadFullDataset(cls, out_file = 'gw.h5', use_file = None):
'''
Download and parse US groundwater data provided by USGS
@param out_file: Output filename for parsed data
@param use_file: Specify the directory where the data is.
If None, the function will download the data
@return Absolute path of parsed data
'''
# Function that converts a string to a float
def convert_to_float(x):
try:
return np.float(x)
except:
return np.nan
# Function to test if a string can
# be converted to a float
def is_valid_number(x):
try:
test = np.float(x)
return True
except:
return False
# Returns 'No comment' for strings that
# can be interpreted as a float,
# and returns the string if it can't
# be interpreted as a float
def comment(x):
try:
test = np.float(x)
return 'No comment'
except:
return x
# Abbreviations of all 50 states
state_list = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY']
full_meta_data = None
# temporary data storage
data_dict = OrderedDict()
data_filename_list = []
metadata_filename_list = []
for state in state_list:
data_filename = state + '_gw_data.rdb'
metadata_filename = state + '_gw_metadata.rdb'
if use_file is None:
print("Downloading", state, "data")
data_file = open(data_filename, 'wb')
metadata_file = open(metadata_filename, 'wb')
try:
# Download data
copyfileobj(urlopen('http://waterservices.usgs.gov/nwis/dv/?format=rdb&stateCd=' + state +
'&startDT=1800-01-01&endDT=2020-12-31&statCd=00003,00008¶meterCd=72019&siteType=GW'),
data_file)
data_file.close()
# Download meta data
copyfileobj(urlopen('http://waterservices.usgs.gov/nwis/site/?format=rdb&stateCd=' + state +
'&startDT=1800-01-01&endDT=2020-12-31¶meterCd=72019&siteType=GW&hasDataTypeCd=dv'),
metadata_file)
except HTTPError:
print('No data for', state)
finally:
data_file.close()
metadata_file.close()
else:
data_filename = use_file + data_filename
metadata_filename = use_file + metadata_filename
# store data filename and metadata filename
data_filename_list.append(data_filename)
metadata_filename_list.append(metadata_filename)
for data_filename, metadata_filename, state_abbrev in zip(data_filename_list, metadata_filename_list, state_list):
print("Processing ", state_abbrev, ': ', data_filename, sep='')
#Read metadata
meta_data = pd.read_table(metadata_filename, skiprows=31, names = ['Agency', 'Site Number', 'Site Name', 'Site Type',
'Lat', 'Lon', 'LatLon Accuracy', 'LatLon Datum',
'Altitude', 'Altitude Accuracy', 'Altitude Datum',
'Hydrologic Code'], index_col=1, dtype={'Hydrologic Code': "object"})
meta_data['Data Available'] = int(0)
meta_data['State'] = state_abbrev
full_lines = open(data_filename).read().splitlines()
# Get the line number of the header lines
header_nums = []
for line_num, line in enumerate(full_lines):
if re.match('agency_cd', line):
header_nums.append(line_num)
# temporary storage for combine type
type_dict = OrderedDict()
# Read in all the data based on the header lines
for header_num in header_nums:
# Check to make sure there is valid data
if len(full_lines[header_num].split()) < 5:
print('No median or averages available for', data_filename)
continue
start = header_num+2
end = len(full_lines)
for line_num, line in enumerate(full_lines[start:],start):
if line[0] == '#':
end = line_num
break
# If both median and average present
if len(full_lines[header_num].split()) > 5:
in_data = pd.read_table(StringIO('\n'.join(full_lines[start:end])), header=None,
names=['Agency','Site ID','Date','Mean Depth to Water','Mean Quality',
'Median Depth to Water', 'Median Quality'],
index_col=2, parse_dates=True)
in_data.loc[:,'Mean Comment'] = in_data.loc[:,'Mean Depth to Water'].apply(comment)
in_data.loc[:,'Median Comment'] = in_data.loc[:,'Median Depth to Water'].apply(comment)
in_data.loc[:,'Mean Depth to Water'] = in_data.loc[:,'Mean Depth to Water'].apply(convert_to_float)
in_data.loc[:,'Median Depth to Water'] = in_data.loc[:,'Median Depth to Water'].apply(convert_to_float)
# All the data is either median or mean
else:
if full_lines[header_num].split()[3][-5:] == '00008':
data_name = 'Median Depth to Water'
comment_name = 'Median Comment'
quality_name = 'Median Quality'
elif full_lines[header_num].split()[3][-5:] == '00003':
data_name = 'Mean Depth to Water'
comment_name = 'Mean Comment'
quality_name = 'Mean Quality'
else:
raise ValueError('Data type not understood')
in_data = pd.read_table(StringIO('\n'.join(full_lines[start:end])), header=None,
names=['Agency','Site ID','Date', data_name, quality_name],
index_col=2, parse_dates=True)
in_data.loc[:,comment_name] = in_data.loc[:, data_name].apply(comment)
in_data.loc[:,data_name] = in_data.loc[:, data_name].apply(convert_to_float)
# Data has been read in, now determine
# combine type and store results in
# data_dict and type_dict
site_id = in_data.ix[0,'Site ID']
in_data.drop('Site ID', 1,inplace=True)
data_dict[site_id] = in_data
meta_data.loc[site_id, 'Data Available'] = 1
if not data_dict:
print('No valid wells for', data_filename)
continue
full_meta_data = pd.concat([full_meta_data, meta_data])
store = pd.HDFStore(out_file, complevel=5, complib='blosc')
for site,data in data_dict.items():
store.put('USGS' + str(site), data, format='table')
store.put('meta_data',full_meta_data,format='table')
store.close()
DataFetcher.setDataLocation('groundwater', os.path.abspath(out_file)) | 0.658747 | 0.354377 |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges
# 3rd party imports
import pandas as pd
import numpy as np
import gdal
from pkg_resources import resource_filename
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission '''
def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end,
username, password, arcsecond_sampling = 1, mask_water = True,
store_geolocation_grids=False):
'''
Initialize Data Fetcher
@param lat_tile_start: Latitude of the southwest corner of the starting tile
@param lat_tile_end: Latitude of the southwset corner of the last tile
@param lon_tile_start: Longitude of the southwest corner of the starting tile
@param lon_tile_end: Longitude of the southwest corner of the last tile
@param username: NASA Earth Data username
@param password: NASA Earth Data Password
@param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc-
second or 3 arc-seconds
@param mask_water: True if the water bodies should be masked, false otherwise
@param store_geolocation_grids: Store grids of latitude and longitude in the metadata
'''
assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds"
self.lat_tile_start = lat_tile_start
self.lat_tile_end = lat_tile_end
self.lon_tile_start = lon_tile_start
self.lon_tile_end = lon_tile_end
self.username = username
self.password = password
self.arcsecond_sampling = arcsecond_sampling
self.mask_water = mask_water
self.store_geolocation_grids = store_geolocation_grids
self._missing_data_projection = '\n'.join([
'GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]]'
])
super(DataFetcher, self).__init__()
def output(self):
'''
Generate SRTM data wrapper
@return SRTM Image Wrapper
'''
lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1)
lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1)
lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array)
lat_grid = lat_grid.ravel()
lon_grid = lon_grid.ravel()
filename_root = '.SRTMGL1.'
base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/'
folder_root = 'SRTMGL1.003/2000.02.11/'
if self.arcsecond_sampling == 3:
filename_root = '.SRTMGL3.'
folder_root = 'SRTMGL3.003/2000.02.11/'
base_url += folder_root
filename_list = []
for lat, lon in zip(lat_grid, lon_grid):
if lat < 0:
lat_label = 'S'
lat = np.abs(lat)
else:
lat_label = 'N'
if lon < 0:
lon_label = 'W'
lon = np.abs(lon)
else:
lon_label = 'E'
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip')
if self.mask_water == True:
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip')
# Read in list of available data
srtm_list_filename = 'srtm_gl1.txt'
if self.arcsecond_sampling == 3:
srtm_list_filename = 'srtm_gl3.txt'
srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename))
available_file_list = open(srtm_support_filename).readlines()
available_file_list = [filename.strip() for filename in available_file_list]
requested_files = pd.DataFrame({'Filename' : filename_list})
requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ]
valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist()
url_list = [base_url + filename for filename in valid_filename_list]
downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password,
'https://urs.earthdata.nasa.gov')
requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list
def getCoordinates(filename):
'''
Determine the longitude and latitude of the lowerleft corner of the input filename
@param in_filename: Input SRTM filename
@return Latitude of southwest corner, Longitude of southwest corner
'''
lat_start = int(filename[1:3])
if filename[0] == 'S':
lat_start *= -1
lon_start = int(filename[4:7])
if filename[3] == 'W':
lon_start *= -1
return lat_start, lon_start
data_dict = OrderedDict()
metadata_dict = OrderedDict()
array_shape = (3601,3601)
if self.arcsecond_sampling == 3:
array_shape = (1201,1201)
file_slice = slice(None)
water_value = 0
if self.mask_water == True:
file_slice = slice(0, -1, 2)
water_value = np.nan
for i in requested_files.index[file_slice]:
hgt_full_path = requested_files.at[i, 'Full Path']
hgt_filename = requested_files.at[i, 'Filename']
label = hgt_filename[:7]
lat_start, lon_start = getCoordinates(hgt_filename)
metadata_dict[label] = OrderedDict()
x_res = 1.0 / (array_shape[0]-1)
y_res = 1.0 / (array_shape[1]-1)
extents = [
lon_start - x_res / 2,
lon_start + 1 + x_res / 2,
lat_start - y_res / 2,
lat_start + 1 + y_res / 2
]
if requested_files.at[i, 'Valid']:
masked_dem_data = np.ones(array_shape)
if self.mask_water == True and requested_files.at[i + 1, 'Valid']:
num_full_path = requested_files.at[i + 1, 'Full Path']
num_filename = requested_files.at[i + 1, 'Full Path']
zipped_num_data = ZipFile(num_full_path)
zipped_num_full_path = zipped_num_data.infolist()[0].filename
num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(),
np.dtype('uint8')).reshape(array_shape)
masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value
i += 1
zipped_hgt_data = ZipFile(hgt_full_path)
dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly)
dem_data = dem_dataset.ReadAsArray()
masked_dem_data *= dem_data
metadata_dict[label]['WKT'] = dem_dataset.GetProjection()
metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform()
else:
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(-y_res)
metadata_dict[label]['WKT'] = self._missing_data_projection
metadata_dict[label]['GeoTransform'] = geo_transform
masked_dem_data = np.full(shape=array_shape, fill_value=water_value)
i += 1
data_dict[label] = masked_dem_data
metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True)
metadata_dict[label]['extents'] = extents
if self.store_geolocation_grids:
lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]),
np.linspace(lon_start, lon_start+1, array_shape[1]),
indexing = 'ij')
metadata_dict[label]['Latitude'] = lat_coords
metadata_dict[label]['Longitude'] = lon_coords
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/srtm/cache/data_fetcher.py | data_fetcher.py |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges
# 3rd party imports
import pandas as pd
import numpy as np
import gdal
from pkg_resources import resource_filename
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission '''
def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end,
username, password, arcsecond_sampling = 1, mask_water = True,
store_geolocation_grids=False):
'''
Initialize Data Fetcher
@param lat_tile_start: Latitude of the southwest corner of the starting tile
@param lat_tile_end: Latitude of the southwset corner of the last tile
@param lon_tile_start: Longitude of the southwest corner of the starting tile
@param lon_tile_end: Longitude of the southwest corner of the last tile
@param username: NASA Earth Data username
@param password: NASA Earth Data Password
@param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc-
second or 3 arc-seconds
@param mask_water: True if the water bodies should be masked, false otherwise
@param store_geolocation_grids: Store grids of latitude and longitude in the metadata
'''
assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds"
self.lat_tile_start = lat_tile_start
self.lat_tile_end = lat_tile_end
self.lon_tile_start = lon_tile_start
self.lon_tile_end = lon_tile_end
self.username = username
self.password = password
self.arcsecond_sampling = arcsecond_sampling
self.mask_water = mask_water
self.store_geolocation_grids = store_geolocation_grids
self._missing_data_projection = '\n'.join([
'GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]]'
])
super(DataFetcher, self).__init__()
def output(self):
'''
Generate SRTM data wrapper
@return SRTM Image Wrapper
'''
lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1)
lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1)
lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array)
lat_grid = lat_grid.ravel()
lon_grid = lon_grid.ravel()
filename_root = '.SRTMGL1.'
base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/'
folder_root = 'SRTMGL1.003/2000.02.11/'
if self.arcsecond_sampling == 3:
filename_root = '.SRTMGL3.'
folder_root = 'SRTMGL3.003/2000.02.11/'
base_url += folder_root
filename_list = []
for lat, lon in zip(lat_grid, lon_grid):
if lat < 0:
lat_label = 'S'
lat = np.abs(lat)
else:
lat_label = 'N'
if lon < 0:
lon_label = 'W'
lon = np.abs(lon)
else:
lon_label = 'E'
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip')
if self.mask_water == True:
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip')
# Read in list of available data
srtm_list_filename = 'srtm_gl1.txt'
if self.arcsecond_sampling == 3:
srtm_list_filename = 'srtm_gl3.txt'
srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename))
available_file_list = open(srtm_support_filename).readlines()
available_file_list = [filename.strip() for filename in available_file_list]
requested_files = pd.DataFrame({'Filename' : filename_list})
requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ]
valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist()
url_list = [base_url + filename for filename in valid_filename_list]
downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password,
'https://urs.earthdata.nasa.gov')
requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list
def getCoordinates(filename):
'''
Determine the longitude and latitude of the lowerleft corner of the input filename
@param in_filename: Input SRTM filename
@return Latitude of southwest corner, Longitude of southwest corner
'''
lat_start = int(filename[1:3])
if filename[0] == 'S':
lat_start *= -1
lon_start = int(filename[4:7])
if filename[3] == 'W':
lon_start *= -1
return lat_start, lon_start
data_dict = OrderedDict()
metadata_dict = OrderedDict()
array_shape = (3601,3601)
if self.arcsecond_sampling == 3:
array_shape = (1201,1201)
file_slice = slice(None)
water_value = 0
if self.mask_water == True:
file_slice = slice(0, -1, 2)
water_value = np.nan
for i in requested_files.index[file_slice]:
hgt_full_path = requested_files.at[i, 'Full Path']
hgt_filename = requested_files.at[i, 'Filename']
label = hgt_filename[:7]
lat_start, lon_start = getCoordinates(hgt_filename)
metadata_dict[label] = OrderedDict()
x_res = 1.0 / (array_shape[0]-1)
y_res = 1.0 / (array_shape[1]-1)
extents = [
lon_start - x_res / 2,
lon_start + 1 + x_res / 2,
lat_start - y_res / 2,
lat_start + 1 + y_res / 2
]
if requested_files.at[i, 'Valid']:
masked_dem_data = np.ones(array_shape)
if self.mask_water == True and requested_files.at[i + 1, 'Valid']:
num_full_path = requested_files.at[i + 1, 'Full Path']
num_filename = requested_files.at[i + 1, 'Full Path']
zipped_num_data = ZipFile(num_full_path)
zipped_num_full_path = zipped_num_data.infolist()[0].filename
num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(),
np.dtype('uint8')).reshape(array_shape)
masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value
i += 1
zipped_hgt_data = ZipFile(hgt_full_path)
dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly)
dem_data = dem_dataset.ReadAsArray()
masked_dem_data *= dem_data
metadata_dict[label]['WKT'] = dem_dataset.GetProjection()
metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform()
else:
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(-y_res)
metadata_dict[label]['WKT'] = self._missing_data_projection
metadata_dict[label]['GeoTransform'] = geo_transform
masked_dem_data = np.full(shape=array_shape, fill_value=water_value)
i += 1
data_dict[label] = masked_dem_data
metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True)
metadata_dict[label]['extents'] = extents
if self.store_geolocation_grids:
lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]),
np.linspace(lon_start, lon_start+1, array_shape[1]),
indexing = 'ij')
metadata_dict[label]['Latitude'] = lat_coords
metadata_dict[label]['Longitude'] = lon_coords
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict) | 0.724675 | 0.480052 |
# """@package MODIS data
# Provides classes for accessing MODIS data.
# """
# Standard library imports
from collections import OrderedDict
from pathlib import Path
from shutil import copyfileobj
import os
import re
# 3rd party package imports
import pandas as pd
from six.moves.urllib.request import urlopen
import numpy as np
# mithagi imports
from skdaccess.framework.data_class import DataFetcherStream, ImageWrapper
from skdaccess.utilities.modis_util import getImageType, createGrid, getFileURLs, readMODISData, getFileURLs, getFileIDs
from tqdm import tqdm
class DataFetcher(DataFetcherStream):
''' Data Fetcher for MODIS data '''
def __init__(self, ap_paramList, modis_platform, modis_id, variable_list, start_date, end_date,
daynightboth = 'D', grid=None, grid_fill = np.nan, use_long_name=False):
'''
Construct Data Fetcher object
@param ap_paramList[lat]: Search latitude
@param ap_paramList[lon]: Search longitude
@param modis_platform: Platform (Either "Terra" or "Aqua")
@param modis_id: Product string (e.g. '06_L2')
@param variable_list: List of variables to fetch
@param start_date: Starting date
@param end_date: Ending date
@param daynightboth: Use daytime data ('D'), nighttime data ('N') or both ('B')
@param grid: Further divide each image into a multiple grids of size (y,x)
@param grid_fill: Fill value to use when creating gridded data
@param use_long_name: Use long names for metadata instead of variable name
'''
self.modis_id = modis_id
self.variable_list = variable_list
self.start_date = start_date
self.end_date = end_date
self.daynightboth = daynightboth
self.grid = grid
self.grid_fill = grid_fill
self.use_long_name = use_long_name
if modis_platform.lower() == 'terra':
self.modis_platform = 'MOD'
elif modis_platform.lower() == 'aqua':
self.modis_platform = 'MYD'
else:
raise ValueError('Did not understand modis platform')
self.modis_identifier = self.modis_platform + modis_id
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper
@return data wrapper of MODIS data
'''
# Determine latitude and longitude for
# output
lat = self.ap_paramList[0]()
lon = self.ap_paramList[1]()
start_date = self.start_date
end_date = self.end_date
time = self.daynightboth
file_ids = getFileIDs(self.modis_identifier, start_date, end_date, lat, lon, time)
file_urls = getFileURLs(file_ids)
# For streaming, need to use opendap urls
url_header = 'http://ladsweb.modaps.eosdis.nasa.gov/opendap/'
opendap_urls = [ url_header + re.search('allData.*$',url).group(0) for url in file_urls ]
# This function reads data and returns a wrapper
return readMODISData(opendap_urls, self.variable_list, grid=self.grid, grid_fill = self.grid_fill,
use_long_name = self.use_long_name, platform = self.modis_platform,
product_id = self.modis_id) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/modis/stream/data_fetcher.py | data_fetcher.py |
# """@package MODIS data
# Provides classes for accessing MODIS data.
# """
# Standard library imports
from collections import OrderedDict
from pathlib import Path
from shutil import copyfileobj
import os
import re
# 3rd party package imports
import pandas as pd
from six.moves.urllib.request import urlopen
import numpy as np
# mithagi imports
from skdaccess.framework.data_class import DataFetcherStream, ImageWrapper
from skdaccess.utilities.modis_util import getImageType, createGrid, getFileURLs, readMODISData, getFileURLs, getFileIDs
from tqdm import tqdm
class DataFetcher(DataFetcherStream):
''' Data Fetcher for MODIS data '''
def __init__(self, ap_paramList, modis_platform, modis_id, variable_list, start_date, end_date,
daynightboth = 'D', grid=None, grid_fill = np.nan, use_long_name=False):
'''
Construct Data Fetcher object
@param ap_paramList[lat]: Search latitude
@param ap_paramList[lon]: Search longitude
@param modis_platform: Platform (Either "Terra" or "Aqua")
@param modis_id: Product string (e.g. '06_L2')
@param variable_list: List of variables to fetch
@param start_date: Starting date
@param end_date: Ending date
@param daynightboth: Use daytime data ('D'), nighttime data ('N') or both ('B')
@param grid: Further divide each image into a multiple grids of size (y,x)
@param grid_fill: Fill value to use when creating gridded data
@param use_long_name: Use long names for metadata instead of variable name
'''
self.modis_id = modis_id
self.variable_list = variable_list
self.start_date = start_date
self.end_date = end_date
self.daynightboth = daynightboth
self.grid = grid
self.grid_fill = grid_fill
self.use_long_name = use_long_name
if modis_platform.lower() == 'terra':
self.modis_platform = 'MOD'
elif modis_platform.lower() == 'aqua':
self.modis_platform = 'MYD'
else:
raise ValueError('Did not understand modis platform')
self.modis_identifier = self.modis_platform + modis_id
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper
@return data wrapper of MODIS data
'''
# Determine latitude and longitude for
# output
lat = self.ap_paramList[0]()
lon = self.ap_paramList[1]()
start_date = self.start_date
end_date = self.end_date
time = self.daynightboth
file_ids = getFileIDs(self.modis_identifier, start_date, end_date, lat, lon, time)
file_urls = getFileURLs(file_ids)
# For streaming, need to use opendap urls
url_header = 'http://ladsweb.modaps.eosdis.nasa.gov/opendap/'
opendap_urls = [ url_header + re.search('allData.*$',url).group(0) for url in file_urls ]
# This function reads data and returns a wrapper
return readMODISData(opendap_urls, self.variable_list, grid=self.grid, grid_fill = self.grid_fill,
use_long_name = self.use_long_name, platform = self.modis_platform,
product_id = self.modis_id) | 0.767864 | 0.331052 |
# """@package MODIS data
# Provides classes for accessing MODIS data.
# """
# Standard library imports
from collections import OrderedDict
from pathlib import Path
from shutil import copyfileobj
import os
import re
import fcntl
# 3rd party package imports
import numpy as np
import pandas as pd
from six.moves.urllib.request import urlopen
# mithagi imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.modis_util import getImageType, createGrid, getFileURLs, readMODISData, getFileURLs, getFileIDs
from tqdm import tqdm
class DataFetcher(DataFetcherCache):
''' Data Fetcher for MODIS data '''
def __init__(self, ap_paramList, modis_platform, modis_id, variable_list, start_date, end_date,
daynightboth = 'D', grid=None, grid_fill = np.nan, use_long_name=False):
'''
Construct Data Fetcher object
@param ap_paramList[lat]: Search latitude
@param ap_paramList[lon]: Search longitude
@param modis_platform: Platform (Either "Terra" or "Aqua")
@param modis_id: Product string (e.g. '06_L2')
@param variable_list: List of variables to fetch
@param start_date: Starting date
@param end_date: Ending date
@param daynightboth: Use daytime data ('D'), nighttime data ('N') or both ('B')
@param grid: Further divide each image into a multiple grids of size (y,x)
@param grid_fill: Fill value to use when creating gridded data
@param use_long_name: Use long names for metadata instead of variable name
'''
self.modis_id = modis_id
self.variable_list = variable_list
self.start_date = start_date
self.end_date = end_date
self.daynightboth = daynightboth
self.grid = grid
self.grid_fill = grid_fill
self.use_long_name = use_long_name
if modis_platform.lower() == 'terra':
self.modis_platform = 'MOD'
elif modis_platform.lower() == 'aqua':
self.modis_platform = 'MYD'
else:
raise ValueError('Did not understand modis platform')
self.modis_identifier = self.modis_platform + modis_id
super(DataFetcher, self).__init__(ap_paramList)
def find_data(self, fileid_list, file_object):
'''
Finds files previously downloaded files associated with fileids
@param fileid_list: List of file id's
@param file_object: File object to read from
@return Pandas series of file locaitons indexed by file id
'''
file_locations = []
try:
metadata = pd.read_csv(file_object, index_col=0)
for fileid in fileid_list:
if fileid in metadata.index:
file_locations.append(metadata.loc[fileid,'filename'])
else:
file_locations.append(None)
except pd.errors.EmptyDataError:
file_locations = [ None for i in range(len(fileid_list)) ]
return pd.Series(file_locations, index=fileid_list)
def cacheData(self, data_specification):
'''
Download MODIS data
@param data_specification: List of file IDs to cache
'''
file_ids = data_specification
def download_data(missing_metadata, file_object):
try:
metadata = pd.read_csv(file_object, index_col=0)
except pd.errors.EmptyDataError:
metadata = pd.DataFrame(columns=["filename"])
metadata.index.name = 'fileid'
fileid_list = list(missing_metadata.index)
file_urls = getFileURLs(fileid_list)
filename_list = []
for fileid, fileurl in tqdm(zip(fileid_list, file_urls), total=len(fileid_list)):
filename = re.search('[^/]*$', fileurl).group()
data_file = open(os.path.join(data_location,filename), 'wb')
copyfileobj(urlopen(fileurl), data_file)
data_file.close()
metadata.loc[fileid] = filename
filename_list.append(filename)
file_object.seek(0)
file_object.truncate()
metadata.to_csv(file_object)
for fileid, filename in zip(fileid_list, filename_list):
missing_metadata.loc[fileid] = filename
return missing_metadata
data_location = DataFetcher.getDataLocation('modis')
metadata_location = os.path.join(data_location, 'metadata.csv')
with open(metadata_location, 'a+') as metadata_file:
fcntl.lockf(metadata_file, fcntl.LOCK_EX)
metadata_file.seek(0)
file_names = self.find_data(file_ids, metadata_file)
metadata_file.seek(0)
missing = file_names[pd.isnull(file_names)]
if len(missing) > 0:
downloaded = download_data(missing, metadata_file)
def output(self):
'''
Generate data wrapper
@return data wrapper of MODIS data
'''
# Determine latitude and longitude for
# output
lat = self.ap_paramList[0]()
lon = self.ap_paramList[1]()
start_date = self.start_date
end_date = self.end_date
time = self.daynightboth
file_ids = getFileIDs(self.modis_identifier, start_date, end_date, lat, lon, time)
self.cacheData(file_ids)
data_location = DataFetcher.getDataLocation('modis')
with open(os.path.join(data_location,'metadata.csv'), 'a+') as file_object:
fcntl.lockf(file_object, fcntl.LOCK_SH)
file_object.seek(0)
file_list = self.find_data(file_ids, file_object)
# Location of data files
data_location = DataFetcher.getDataLocation('modis')
# Generate list containing full paths to data files
file_locations = []
for filename in file_list:
file_locations.append(os.path.join(data_location, filename))
# This function reads data and returns a wrapper
return readMODISData(file_locations, self.variable_list, grid=self.grid, grid_fill = self.grid_fill,
use_long_name = self.use_long_name, platform = self.modis_platform,
product_id = self.modis_id) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/modis/cache/data_fetcher.py | data_fetcher.py |
# """@package MODIS data
# Provides classes for accessing MODIS data.
# """
# Standard library imports
from collections import OrderedDict
from pathlib import Path
from shutil import copyfileobj
import os
import re
import fcntl
# 3rd party package imports
import numpy as np
import pandas as pd
from six.moves.urllib.request import urlopen
# mithagi imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.modis_util import getImageType, createGrid, getFileURLs, readMODISData, getFileURLs, getFileIDs
from tqdm import tqdm
class DataFetcher(DataFetcherCache):
''' Data Fetcher for MODIS data '''
def __init__(self, ap_paramList, modis_platform, modis_id, variable_list, start_date, end_date,
daynightboth = 'D', grid=None, grid_fill = np.nan, use_long_name=False):
'''
Construct Data Fetcher object
@param ap_paramList[lat]: Search latitude
@param ap_paramList[lon]: Search longitude
@param modis_platform: Platform (Either "Terra" or "Aqua")
@param modis_id: Product string (e.g. '06_L2')
@param variable_list: List of variables to fetch
@param start_date: Starting date
@param end_date: Ending date
@param daynightboth: Use daytime data ('D'), nighttime data ('N') or both ('B')
@param grid: Further divide each image into a multiple grids of size (y,x)
@param grid_fill: Fill value to use when creating gridded data
@param use_long_name: Use long names for metadata instead of variable name
'''
self.modis_id = modis_id
self.variable_list = variable_list
self.start_date = start_date
self.end_date = end_date
self.daynightboth = daynightboth
self.grid = grid
self.grid_fill = grid_fill
self.use_long_name = use_long_name
if modis_platform.lower() == 'terra':
self.modis_platform = 'MOD'
elif modis_platform.lower() == 'aqua':
self.modis_platform = 'MYD'
else:
raise ValueError('Did not understand modis platform')
self.modis_identifier = self.modis_platform + modis_id
super(DataFetcher, self).__init__(ap_paramList)
def find_data(self, fileid_list, file_object):
'''
Finds files previously downloaded files associated with fileids
@param fileid_list: List of file id's
@param file_object: File object to read from
@return Pandas series of file locaitons indexed by file id
'''
file_locations = []
try:
metadata = pd.read_csv(file_object, index_col=0)
for fileid in fileid_list:
if fileid in metadata.index:
file_locations.append(metadata.loc[fileid,'filename'])
else:
file_locations.append(None)
except pd.errors.EmptyDataError:
file_locations = [ None for i in range(len(fileid_list)) ]
return pd.Series(file_locations, index=fileid_list)
def cacheData(self, data_specification):
'''
Download MODIS data
@param data_specification: List of file IDs to cache
'''
file_ids = data_specification
def download_data(missing_metadata, file_object):
try:
metadata = pd.read_csv(file_object, index_col=0)
except pd.errors.EmptyDataError:
metadata = pd.DataFrame(columns=["filename"])
metadata.index.name = 'fileid'
fileid_list = list(missing_metadata.index)
file_urls = getFileURLs(fileid_list)
filename_list = []
for fileid, fileurl in tqdm(zip(fileid_list, file_urls), total=len(fileid_list)):
filename = re.search('[^/]*$', fileurl).group()
data_file = open(os.path.join(data_location,filename), 'wb')
copyfileobj(urlopen(fileurl), data_file)
data_file.close()
metadata.loc[fileid] = filename
filename_list.append(filename)
file_object.seek(0)
file_object.truncate()
metadata.to_csv(file_object)
for fileid, filename in zip(fileid_list, filename_list):
missing_metadata.loc[fileid] = filename
return missing_metadata
data_location = DataFetcher.getDataLocation('modis')
metadata_location = os.path.join(data_location, 'metadata.csv')
with open(metadata_location, 'a+') as metadata_file:
fcntl.lockf(metadata_file, fcntl.LOCK_EX)
metadata_file.seek(0)
file_names = self.find_data(file_ids, metadata_file)
metadata_file.seek(0)
missing = file_names[pd.isnull(file_names)]
if len(missing) > 0:
downloaded = download_data(missing, metadata_file)
def output(self):
'''
Generate data wrapper
@return data wrapper of MODIS data
'''
# Determine latitude and longitude for
# output
lat = self.ap_paramList[0]()
lon = self.ap_paramList[1]()
start_date = self.start_date
end_date = self.end_date
time = self.daynightboth
file_ids = getFileIDs(self.modis_identifier, start_date, end_date, lat, lon, time)
self.cacheData(file_ids)
data_location = DataFetcher.getDataLocation('modis')
with open(os.path.join(data_location,'metadata.csv'), 'a+') as file_object:
fcntl.lockf(file_object, fcntl.LOCK_SH)
file_object.seek(0)
file_list = self.find_data(file_ids, file_object)
# Location of data files
data_location = DataFetcher.getDataLocation('modis')
# Generate list containing full paths to data files
file_locations = []
for filename in file_list:
file_locations.append(os.path.join(data_location, filename))
# This function reads data and returns a wrapper
return readMODISData(file_locations, self.variable_list, grid=self.grid, grid_fill = self.grid_fill,
use_long_name = self.use_long_name, platform = self.modis_platform,
product_id = self.modis_id) | 0.635222 | 0.297948 |
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from collections import OrderedDict
import pandas as pd
import numpy as np
import pyproj
class DataFetcher(DataFetcherStorage):
'''
Fetches data for the Interactive Multisensor Snow and Ice Mapping System Daily Northern Hemisphere Snow and Ice Analysis
'''
def __init__(self, coordinate_dict, start_date, end_date):
'''
Intializes the Data Fetcher
@param coordinate_dict: Dictionary of locations where the names are the keys and the items are
lists containing the latitude and longitude are the values
@param start_date: Starting date
@param end_date: Ending date
'''
super(DataFetcher, self).__init__([])
self.coordinate_dict = coordinate_dict
self.start_date = start_date
self.end_date = end_date
def output(self):
'''
Fetch snow coverage data for coordinates
@return Data wrapper for snow coverage
'''
data_file = DataFetcher.getDataLocation('imsdnhs')
if data_file is None:
print("No data available")
return None
store = pd.HDFStore(data_file)
# Projection information
x_start = -12288000.0
x_end = 12288000.0
y_start = 12288000.0
y_end = -12288000.0
x_dim = 6144
y_dim = 6144
x_inc = (x_end - x_start) / x_dim
y_inc = (y_end - y_start) / y_dim
proj = pyproj.Proj('+proj=stere +lat_0=90 +lat_ts=60 +lon_0=-80 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
# Function that determines the x,y image coordinate for a
# given (latitude, longitude) pair
def convertToXY(lat, lon):
ux, uy = proj(lon,lat)
x = np.round(((ux - x_start) / x_inc) - 0.5).astype(np.int)
y = np.round(((uy - y_start) / y_inc) - 0.5).astype(np.int)
return (x,y)
label_list = []
lat_array = np.zeros(len(self.coordinate_dict),dtype=np.float)
lon_array = np.zeros(len(self.coordinate_dict),dtype=np.float)
for i, (label, coordinates) in enumerate(self.coordinate_dict.items()):
label_list.append(label)
lat_array[i] = coordinates[0]
lon_array[i] = coordinates[1]
x_array,y_array = convertToXY(lat_array, lon_array)
# # Forming a complex number to remove duplicate
# # coordinates
# complex_array = np.unique(x_array * 1j * y_array)
# x_array = complex_array.real
# y_array = complex_array.imag
data_dict = OrderedDict()
for label,x,y in zip(label_list, x_array,y_array):
data_dict[label] = pd.DataFrame({'Snow': store['y_' + str(y).zfill(4)].loc[:,x].reindex(pd.date_range(pd.to_datetime(self.start_date),
pd.to_datetime(self.end_date)),fill_value=-1)})
return TableWrapper(data_dict, default_columns = ['Snow']) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/imsdnhs/data_fetcher.py | data_fetcher.py | from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from collections import OrderedDict
import pandas as pd
import numpy as np
import pyproj
class DataFetcher(DataFetcherStorage):
'''
Fetches data for the Interactive Multisensor Snow and Ice Mapping System Daily Northern Hemisphere Snow and Ice Analysis
'''
def __init__(self, coordinate_dict, start_date, end_date):
'''
Intializes the Data Fetcher
@param coordinate_dict: Dictionary of locations where the names are the keys and the items are
lists containing the latitude and longitude are the values
@param start_date: Starting date
@param end_date: Ending date
'''
super(DataFetcher, self).__init__([])
self.coordinate_dict = coordinate_dict
self.start_date = start_date
self.end_date = end_date
def output(self):
'''
Fetch snow coverage data for coordinates
@return Data wrapper for snow coverage
'''
data_file = DataFetcher.getDataLocation('imsdnhs')
if data_file is None:
print("No data available")
return None
store = pd.HDFStore(data_file)
# Projection information
x_start = -12288000.0
x_end = 12288000.0
y_start = 12288000.0
y_end = -12288000.0
x_dim = 6144
y_dim = 6144
x_inc = (x_end - x_start) / x_dim
y_inc = (y_end - y_start) / y_dim
proj = pyproj.Proj('+proj=stere +lat_0=90 +lat_ts=60 +lon_0=-80 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
# Function that determines the x,y image coordinate for a
# given (latitude, longitude) pair
def convertToXY(lat, lon):
ux, uy = proj(lon,lat)
x = np.round(((ux - x_start) / x_inc) - 0.5).astype(np.int)
y = np.round(((uy - y_start) / y_inc) - 0.5).astype(np.int)
return (x,y)
label_list = []
lat_array = np.zeros(len(self.coordinate_dict),dtype=np.float)
lon_array = np.zeros(len(self.coordinate_dict),dtype=np.float)
for i, (label, coordinates) in enumerate(self.coordinate_dict.items()):
label_list.append(label)
lat_array[i] = coordinates[0]
lon_array[i] = coordinates[1]
x_array,y_array = convertToXY(lat_array, lon_array)
# # Forming a complex number to remove duplicate
# # coordinates
# complex_array = np.unique(x_array * 1j * y_array)
# x_array = complex_array.real
# y_array = complex_array.imag
data_dict = OrderedDict()
for label,x,y in zip(label_list, x_array,y_array):
data_dict[label] = pd.DataFrame({'Snow': store['y_' + str(y).zfill(4)].loc[:,x].reindex(pd.date_range(pd.to_datetime(self.start_date),
pd.to_datetime(self.end_date)),fill_value=-1)})
return TableWrapper(data_dict, default_columns = ['Snow']) | 0.812904 | 0.499756 |
# skdaccess imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Standard library imports
from collections import OrderedDict
import os
# 3rd party imports
import pandas as pd
from geomagio.edge import EdgeFactory
from obspy.core import UTCDateTime
from pkg_resources import resource_filename
class DataFetcher(DataFetcherStream):
''' Data fetcher for USGS geomagnetic observatories '''
def __init__(self, ap_paramList, start_time, end_time, interval = 'minute',
channels=('X','Y','Z','F'), data_type = 'variation'):
'''
Geomagnetism Data fetcher constructor
@param ap_paramList[AutoList]: AutoList of Observatory names
@param start_time: Starting time
@param end_time: Ending time
@param interval: Time resolution
@param channels: Data channels
@param data_type = Data type
'''
self.start_time = start_time
self.end_time = end_time
self.interval = interval
self.channels = channels
self.data_type = data_type
super(DataFetcher,self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper for USGS geomagnetic data
@return geomagnetic data wrapper
'''
observatory_list = self.ap_paramList[0]()
# USGS Edge server
base_url = 'cwbpub.cr.usgs.gov'
factory = EdgeFactory(host=base_url, port=2060)
data_dict = OrderedDict()
for observatory in observatory_list:
ret_data = factory.get_timeseries( observatory=observatory,
interval=self.interval,
type=self.data_type,
channels=self.channels,
starttime=UTCDateTime(self.start_time),
endtime=UTCDateTime(self.end_time))
obs_data = OrderedDict()
for label, trace in zip(self.channels, ret_data):
time = pd.to_datetime(trace.stats['starttime'].datetime) + pd.to_timedelta(trace.times(),unit='s')
obs_data[label] = pd.Series(trace.data,time)
data_dict[observatory] = pd.DataFrame(obs_data)
return TableWrapper(data_dict, default_columns=self.channels)
def getDataMetadata():
'''
Get data metadata
@return Pandas dataframe containing station latitude and
longitude coordinates
'''
meta_data_path = resource_filename('skdaccess',os.path.join('support','usgs_geomagnetism_observatories.txt'))
return pd.read_csv(meta_data_path, header=None, names=('Observatory','Lat','Lon')).set_index('Observatory') | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/geo/magnetometer/data_fetcher.py | data_fetcher.py |
# skdaccess imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Standard library imports
from collections import OrderedDict
import os
# 3rd party imports
import pandas as pd
from geomagio.edge import EdgeFactory
from obspy.core import UTCDateTime
from pkg_resources import resource_filename
class DataFetcher(DataFetcherStream):
''' Data fetcher for USGS geomagnetic observatories '''
def __init__(self, ap_paramList, start_time, end_time, interval = 'minute',
channels=('X','Y','Z','F'), data_type = 'variation'):
'''
Geomagnetism Data fetcher constructor
@param ap_paramList[AutoList]: AutoList of Observatory names
@param start_time: Starting time
@param end_time: Ending time
@param interval: Time resolution
@param channels: Data channels
@param data_type = Data type
'''
self.start_time = start_time
self.end_time = end_time
self.interval = interval
self.channels = channels
self.data_type = data_type
super(DataFetcher,self).__init__(ap_paramList)
def output(self):
'''
Generate data wrapper for USGS geomagnetic data
@return geomagnetic data wrapper
'''
observatory_list = self.ap_paramList[0]()
# USGS Edge server
base_url = 'cwbpub.cr.usgs.gov'
factory = EdgeFactory(host=base_url, port=2060)
data_dict = OrderedDict()
for observatory in observatory_list:
ret_data = factory.get_timeseries( observatory=observatory,
interval=self.interval,
type=self.data_type,
channels=self.channels,
starttime=UTCDateTime(self.start_time),
endtime=UTCDateTime(self.end_time))
obs_data = OrderedDict()
for label, trace in zip(self.channels, ret_data):
time = pd.to_datetime(trace.stats['starttime'].datetime) + pd.to_timedelta(trace.times(),unit='s')
obs_data[label] = pd.Series(trace.data,time)
data_dict[observatory] = pd.DataFrame(obs_data)
return TableWrapper(data_dict, default_columns=self.channels)
def getDataMetadata():
'''
Get data metadata
@return Pandas dataframe containing station latitude and
longitude coordinates
'''
meta_data_path = resource_filename('skdaccess',os.path.join('support','usgs_geomagnetism_observatories.txt'))
return pd.read_csv(meta_data_path, header=None, names=('Observatory','Lat','Lon')).set_index('Observatory') | 0.805861 | 0.261693 |
# Standard library imports
from itertools import combinations
from collections import OrderedDict
# Scikit Data Access imports
from .image_util import convertBinCentersToEdges
# 3rd part imports
import pandas as pd
import numpy as np
from netCDF4 import Dataset, num2date
def averageDates(dates, round_nearest_day = False):
'''
Compute the average of a pandas series of timestamps
@param dates: Pandas series of pandas datetime objects
@param round_nearest_day: Round to the nearest day
@return Average of dates
'''
start = dates.min()
newdate = (dates - start).mean() + start
if round_nearest_day:
newdate = newdate.round('D')
return newdate
def dateMismatch(dates, days=10):
'''
Check if dates are not within a certain number of days of each other
@param dates: Iterable container of pandas timestamps
@param days: Number of days
@return true if they are not with 10 days, false otherwise
'''
for combo in combinations(dates,2):
if np.abs(combo[0] - combo[1]) > pd.to_timedelta(days, 'D'):
return True
return False
def computeEWD(grace_data, scale_factor, round_nearest_day=False):
'''
Compute scale corrected equivalent water depth
Equivalent water depth by averaging results from
GFZ, CSR, and JPL, and then applying the scale factor
@param grace_data: Data frame containing grace data
@param scale_factor: Scale factor to apply
@param round_nearest_day: Round dates to nearest day
@return Equivalent water depth determined by applying the scale factor to
the average GFZ, JPL and CSR.
'''
def cutMissingData(in_data, reverse=False):
'''
Removes data from the beginning (or ending if reverse=True) so that
data exists for all 3 sources (GFZ, JPL, and CSR).
This function is necessary as not all sources may get cut when
a starting and ending date is specified.
@param in_data: Input grace data
@param reverse: Remove data from end instead of beginning
@return Tuple containing modified in_data, the last cut date
'''
last_cut_date = None
if reverse==True:
index = in_data.index[::-1]
else:
index = in_data.index
for date in index:
cut = in_data.loc[date-pd.to_timedelta('10D'):date+pd.to_timedelta('10D')]
if min(len(cut['CSR'].dropna()), len(cut['GFZ'].dropna()), len(cut['JPL'].dropna())) == 0:
if reverse:
in_data = in_data.iloc[:-1]
else:
in_data = in_data.iloc[1:]
last_cut_date = date
else:
break
return in_data,last_cut_date
# Check if there is no valid data
if len(grace_data['CSR'].dropna()) + len(grace_data['GFZ'].dropna()) + len(grace_data['JPL'].dropna()) == 0:
if round_nearest_day == True:
return pd.Series(np.nan, index=grace_data.index.round('D'))
else:
return pd.Series(np.nan, index=grace_data.index)
# Find all months that have different dates supplied by GFZ, JPL, and CSR
offsets = grace_data[grace_data.isnull().any(axis=1)]
# Starting and ending months if they don't have valid data for all 3 data sets
offsets,cut_date1 = cutMissingData(offsets)
offsets,cut_date2 = cutMissingData(offsets, reverse=True)
# If beginning data has been cut, update data accordingly
if cut_date1 != None:
index_location = np.argwhere(grace_data.index == cut_date1)[0][0]
new_index = grace_data.index[index_location+1]
grace_data = grace_data.loc[new_index:]
# If ending data has been cut, update data accordingly
if cut_date2 != None:
index_location = np.argwhere(grace_data.index == cut_date2)[0][0]
new_index = grace_data.index[index_location-1]
grace_data = grace_data.loc[:new_index]
# Get all valid data for JPL, GFZ, and CSR
csr = offsets['CSR'].dropna()
gfz = offsets['GFZ'].dropna()
jpl = offsets['JPL'].dropna()
new_index = []
new_measurements = []
# Iterate over all data with offset dates and combine them
for (c_i, c_v), (g_i,g_v), (j_i, j_v) in zip(csr.iteritems(), gfz.iteritems(), jpl.iteritems()):
# Check if the dates are within 10 days of each other
dates = pd.Series([c_i,g_i,j_i])
if dateMismatch(dates):
raise ValueError('Different dates are not within 10 days of each other')
# Determine new index and average value of data
new_index.append(averageDates(dates, round_nearest_day))
new_measurements.append(np.mean([c_v, g_v, j_v]))
# Create series from averaged results
fixed_means = pd.Series(data = new_measurements, index=new_index)
fixed_means.index.name = 'Date'
# Averaging results from non mimsatched days
ewt = grace_data.dropna().mean(axis=1)
# If requested, round dates to nearest day
if round_nearest_day:
ewt_index = ewt.index.round('D')
else:
ewt_index = ewt.index
# Reset ewt index
ewt = pd.Series(ewt.as_matrix(),index = ewt_index)
# Combined data with mismatched days with data
# without mismatched days
ewt = pd.concat([ewt, fixed_means])
ewt.sort_index(inplace=True)
# Apply scale factor
ewt = ewt * scale_factor
# Return results
return ewt
def readTellusData(filename, lat_lon_list, lat_name, lon_name, data_name, data_label=None,
time_name=None, lat_bounds_name=None, lon_bounds_name=None,
uncertainty_name = None, lat_bounds=None, lon_bounds = None):
'''
This function reads in netcdf data provided by GRACE Tellus
@param filename: Name of file to read in
@param lat_lon_list: List of latitude, longitude tuples that are to be read
@param data_label: Label for data
@param lat_name: Name of latitude data
@param lon_name: Name of longitude data
@param data_name: Name of data product
@param time_name: Name of time data
@param lat_bounds_name: Name of latitude boundaries
@param lon_bounds_name: Name of longitude boundaries
@param uncertainty_name: Name of uncertainty in data set
@param lat_bounds: Latitude bounds
@param lon_bounds: Longitude bounds
@return dictionary containing data and dictionary containing latitude and longitude
'''
def findBin(in_value, in_bounds):
search = np.logical_and(in_value >= in_bounds[:,0], in_value < in_bounds[:,1])
if np.sum(search) == 1:
return np.argmax(search)
elif in_value == in_bounds[-1]:
return len(in_bounds)-1
else:
raise RuntimeError("Value not found")
if data_label == None and time_name != None:
raise RuntimeError("Need to specify data label when time data is used")
if lat_bounds is None and lon_bounds is not None or \
lat_bounds is not None and lon_bounds is None:
raise ValueError('Must specify both lat_bounds and lon_bounds, or neither of them')
nc = Dataset(filename, 'r')
lat_data = nc[lat_name][:]
lon_data = nc[lon_name][:]
data = nc[data_name][:]
if lat_bounds is None:
if lat_bounds_name == None and lon_bounds_name == None:
lat_edges = convertBinCentersToEdges(lat_data)
lon_edges = convertBinCentersToEdges(lon_data)
lat_bounds = np.stack([lat_edges[:-1], lat_edges[1:]], axis=1)
lon_bounds = np.stack([lon_edges[:-1], lon_edges[1:]], axis=1)
else:
lat_bounds = nc[lat_bounds_name][:]
lon_bounds = nc[lon_bounds_name][:]
if time_name != None:
time = nc[time_name]
date_index = pd.to_datetime(num2date(time[:],units=time.units,calendar=time.calendar))
if uncertainty_name != None:
uncertainty = nc[uncertainty_name][:]
data_dict = OrderedDict()
meta_dict = OrderedDict()
for lat, lon in lat_lon_list:
# Convert lontitude to 0-360
orig_lon = lon
if lon < 0:
lon += 360.
lat_bin = findBin(lat, lat_bounds)
lon_bin = findBin(lon, lon_bounds)
label = str(lat) + ', ' + str(orig_lon)
if time_name != None and uncertainty_name != None:
frame_data_dict = OrderedDict()
frame_data_dict[data_label] = data[:,lat_bin, lon_bin]
frame_data_dict['Uncertainty'] = uncertainty[:,lat_bin, lon_bin]
data_dict[label] = pd.DataFrame(frame_data_dict, index=date_index)
elif time_name != None and uncertainty_name == None:
data_dict[label] = pd.DataFrame({data_label : data[:, lat_bin, lon_bin]}, index=date_index)
else:
data_dict[label] = data[lat_bin, lon_bin]
meta_dict[label] = OrderedDict()
meta_dict[label]['Lat'] = lat
meta_dict[label]['Lon'] = orig_lon
return data_dict, meta_dict, lat_bounds, lon_bounds
def getStartEndDate(in_data):
label, data = next(in_data.items())
start_date = in_data.index[0]
end_date = in_data.index[-1]
return start_date, end_date | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/grace_util.py | grace_util.py |
# Standard library imports
from itertools import combinations
from collections import OrderedDict
# Scikit Data Access imports
from .image_util import convertBinCentersToEdges
# 3rd part imports
import pandas as pd
import numpy as np
from netCDF4 import Dataset, num2date
def averageDates(dates, round_nearest_day = False):
'''
Compute the average of a pandas series of timestamps
@param dates: Pandas series of pandas datetime objects
@param round_nearest_day: Round to the nearest day
@return Average of dates
'''
start = dates.min()
newdate = (dates - start).mean() + start
if round_nearest_day:
newdate = newdate.round('D')
return newdate
def dateMismatch(dates, days=10):
'''
Check if dates are not within a certain number of days of each other
@param dates: Iterable container of pandas timestamps
@param days: Number of days
@return true if they are not with 10 days, false otherwise
'''
for combo in combinations(dates,2):
if np.abs(combo[0] - combo[1]) > pd.to_timedelta(days, 'D'):
return True
return False
def computeEWD(grace_data, scale_factor, round_nearest_day=False):
'''
Compute scale corrected equivalent water depth
Equivalent water depth by averaging results from
GFZ, CSR, and JPL, and then applying the scale factor
@param grace_data: Data frame containing grace data
@param scale_factor: Scale factor to apply
@param round_nearest_day: Round dates to nearest day
@return Equivalent water depth determined by applying the scale factor to
the average GFZ, JPL and CSR.
'''
def cutMissingData(in_data, reverse=False):
'''
Removes data from the beginning (or ending if reverse=True) so that
data exists for all 3 sources (GFZ, JPL, and CSR).
This function is necessary as not all sources may get cut when
a starting and ending date is specified.
@param in_data: Input grace data
@param reverse: Remove data from end instead of beginning
@return Tuple containing modified in_data, the last cut date
'''
last_cut_date = None
if reverse==True:
index = in_data.index[::-1]
else:
index = in_data.index
for date in index:
cut = in_data.loc[date-pd.to_timedelta('10D'):date+pd.to_timedelta('10D')]
if min(len(cut['CSR'].dropna()), len(cut['GFZ'].dropna()), len(cut['JPL'].dropna())) == 0:
if reverse:
in_data = in_data.iloc[:-1]
else:
in_data = in_data.iloc[1:]
last_cut_date = date
else:
break
return in_data,last_cut_date
# Check if there is no valid data
if len(grace_data['CSR'].dropna()) + len(grace_data['GFZ'].dropna()) + len(grace_data['JPL'].dropna()) == 0:
if round_nearest_day == True:
return pd.Series(np.nan, index=grace_data.index.round('D'))
else:
return pd.Series(np.nan, index=grace_data.index)
# Find all months that have different dates supplied by GFZ, JPL, and CSR
offsets = grace_data[grace_data.isnull().any(axis=1)]
# Starting and ending months if they don't have valid data for all 3 data sets
offsets,cut_date1 = cutMissingData(offsets)
offsets,cut_date2 = cutMissingData(offsets, reverse=True)
# If beginning data has been cut, update data accordingly
if cut_date1 != None:
index_location = np.argwhere(grace_data.index == cut_date1)[0][0]
new_index = grace_data.index[index_location+1]
grace_data = grace_data.loc[new_index:]
# If ending data has been cut, update data accordingly
if cut_date2 != None:
index_location = np.argwhere(grace_data.index == cut_date2)[0][0]
new_index = grace_data.index[index_location-1]
grace_data = grace_data.loc[:new_index]
# Get all valid data for JPL, GFZ, and CSR
csr = offsets['CSR'].dropna()
gfz = offsets['GFZ'].dropna()
jpl = offsets['JPL'].dropna()
new_index = []
new_measurements = []
# Iterate over all data with offset dates and combine them
for (c_i, c_v), (g_i,g_v), (j_i, j_v) in zip(csr.iteritems(), gfz.iteritems(), jpl.iteritems()):
# Check if the dates are within 10 days of each other
dates = pd.Series([c_i,g_i,j_i])
if dateMismatch(dates):
raise ValueError('Different dates are not within 10 days of each other')
# Determine new index and average value of data
new_index.append(averageDates(dates, round_nearest_day))
new_measurements.append(np.mean([c_v, g_v, j_v]))
# Create series from averaged results
fixed_means = pd.Series(data = new_measurements, index=new_index)
fixed_means.index.name = 'Date'
# Averaging results from non mimsatched days
ewt = grace_data.dropna().mean(axis=1)
# If requested, round dates to nearest day
if round_nearest_day:
ewt_index = ewt.index.round('D')
else:
ewt_index = ewt.index
# Reset ewt index
ewt = pd.Series(ewt.as_matrix(),index = ewt_index)
# Combined data with mismatched days with data
# without mismatched days
ewt = pd.concat([ewt, fixed_means])
ewt.sort_index(inplace=True)
# Apply scale factor
ewt = ewt * scale_factor
# Return results
return ewt
def readTellusData(filename, lat_lon_list, lat_name, lon_name, data_name, data_label=None,
time_name=None, lat_bounds_name=None, lon_bounds_name=None,
uncertainty_name = None, lat_bounds=None, lon_bounds = None):
'''
This function reads in netcdf data provided by GRACE Tellus
@param filename: Name of file to read in
@param lat_lon_list: List of latitude, longitude tuples that are to be read
@param data_label: Label for data
@param lat_name: Name of latitude data
@param lon_name: Name of longitude data
@param data_name: Name of data product
@param time_name: Name of time data
@param lat_bounds_name: Name of latitude boundaries
@param lon_bounds_name: Name of longitude boundaries
@param uncertainty_name: Name of uncertainty in data set
@param lat_bounds: Latitude bounds
@param lon_bounds: Longitude bounds
@return dictionary containing data and dictionary containing latitude and longitude
'''
def findBin(in_value, in_bounds):
search = np.logical_and(in_value >= in_bounds[:,0], in_value < in_bounds[:,1])
if np.sum(search) == 1:
return np.argmax(search)
elif in_value == in_bounds[-1]:
return len(in_bounds)-1
else:
raise RuntimeError("Value not found")
if data_label == None and time_name != None:
raise RuntimeError("Need to specify data label when time data is used")
if lat_bounds is None and lon_bounds is not None or \
lat_bounds is not None and lon_bounds is None:
raise ValueError('Must specify both lat_bounds and lon_bounds, or neither of them')
nc = Dataset(filename, 'r')
lat_data = nc[lat_name][:]
lon_data = nc[lon_name][:]
data = nc[data_name][:]
if lat_bounds is None:
if lat_bounds_name == None and lon_bounds_name == None:
lat_edges = convertBinCentersToEdges(lat_data)
lon_edges = convertBinCentersToEdges(lon_data)
lat_bounds = np.stack([lat_edges[:-1], lat_edges[1:]], axis=1)
lon_bounds = np.stack([lon_edges[:-1], lon_edges[1:]], axis=1)
else:
lat_bounds = nc[lat_bounds_name][:]
lon_bounds = nc[lon_bounds_name][:]
if time_name != None:
time = nc[time_name]
date_index = pd.to_datetime(num2date(time[:],units=time.units,calendar=time.calendar))
if uncertainty_name != None:
uncertainty = nc[uncertainty_name][:]
data_dict = OrderedDict()
meta_dict = OrderedDict()
for lat, lon in lat_lon_list:
# Convert lontitude to 0-360
orig_lon = lon
if lon < 0:
lon += 360.
lat_bin = findBin(lat, lat_bounds)
lon_bin = findBin(lon, lon_bounds)
label = str(lat) + ', ' + str(orig_lon)
if time_name != None and uncertainty_name != None:
frame_data_dict = OrderedDict()
frame_data_dict[data_label] = data[:,lat_bin, lon_bin]
frame_data_dict['Uncertainty'] = uncertainty[:,lat_bin, lon_bin]
data_dict[label] = pd.DataFrame(frame_data_dict, index=date_index)
elif time_name != None and uncertainty_name == None:
data_dict[label] = pd.DataFrame({data_label : data[:, lat_bin, lon_bin]}, index=date_index)
else:
data_dict[label] = data[lat_bin, lon_bin]
meta_dict[label] = OrderedDict()
meta_dict[label]['Lat'] = lat
meta_dict[label]['Lon'] = orig_lon
return data_dict, meta_dict, lat_bounds, lon_bounds
def getStartEndDate(in_data):
label, data = next(in_data.items())
start_date = in_data.index[0]
end_date = in_data.index[-1]
return start_date, end_date | 0.750095 | 0.600657 |
# Standard library imports
from collections import OrderedDict
import os
# 3rd party import
import pandas as pd
from pkg_resources import resource_filename
from tqdm import tqdm
def retrieveCommonDatesHDF(support_data_filename, key_list, in_date_list):
'''
Get a list of all dates that have data available
@param support_data_filename: Filename of support data
@param key_list: List of keys in HDF file
@param in_date_list: Input date list to check
@return dictionary of dates with data
'''
valid_dates = OrderedDict()
support_full_path = resource_filename('skdaccess',os.path.join('support',support_data_filename))
for key in key_list:
try:
available_dates = pd.read_hdf(support_full_path, key)
except KeyError:
print('Unknown station:',key)
common_dates = list(set(in_date_list).intersection(set(available_dates)))
common_dates.sort()
valid_dates[key] = common_dates
return valid_dates
def progress_bar(in_iterable, total=None, enabled=True):
'''
Progess bar using tqdm
@param in_iterable: Input iterable
@param total: Total number of elements
@param enabled: Enable progress bar
'''
if enabled==True:
return tqdm(in_iterable, total=total)
else:
return in_iterable
def convertToStr(in_value, zfill=0):
'''
If input is a number, convert to a string
with zero paddding. Otherwise, just return
the string.
@input in_value: Input string or number
@zfill: Amount of zero padding
@return zero padded number as a string, or original string
'''
if isinstance(in_value, str):
return in_value
else:
return str(in_value).zfill(zfill)
def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):
"""
Join two strings together using a concatenation string
Handles the case where either part1 or part2 are an empty string
@param part1: First string
@param part2: Second string
@param concatenation_string: String used to join part1 and part2
@param seperator: Seperator used to between each part and the
concatenation string
@return A single string that consists of the part1 and part2
joined together using a concatenation string
"""
if part1 == '':
return part2
elif part2 == '':
return part1
if part1[-1] == seperator:
sep1 = ''
else:
sep1 = seperator
if part2[0] == seperator:
sep2 = ''
else:
sep2 = ' '
return part1 + sep1 + concatenation_string + sep2 + part2 | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/support.py | support.py |
# Standard library imports
from collections import OrderedDict
import os
# 3rd party import
import pandas as pd
from pkg_resources import resource_filename
from tqdm import tqdm
def retrieveCommonDatesHDF(support_data_filename, key_list, in_date_list):
'''
Get a list of all dates that have data available
@param support_data_filename: Filename of support data
@param key_list: List of keys in HDF file
@param in_date_list: Input date list to check
@return dictionary of dates with data
'''
valid_dates = OrderedDict()
support_full_path = resource_filename('skdaccess',os.path.join('support',support_data_filename))
for key in key_list:
try:
available_dates = pd.read_hdf(support_full_path, key)
except KeyError:
print('Unknown station:',key)
common_dates = list(set(in_date_list).intersection(set(available_dates)))
common_dates.sort()
valid_dates[key] = common_dates
return valid_dates
def progress_bar(in_iterable, total=None, enabled=True):
'''
Progess bar using tqdm
@param in_iterable: Input iterable
@param total: Total number of elements
@param enabled: Enable progress bar
'''
if enabled==True:
return tqdm(in_iterable, total=total)
else:
return in_iterable
def convertToStr(in_value, zfill=0):
'''
If input is a number, convert to a string
with zero paddding. Otherwise, just return
the string.
@input in_value: Input string or number
@zfill: Amount of zero padding
@return zero padded number as a string, or original string
'''
if isinstance(in_value, str):
return in_value
else:
return str(in_value).zfill(zfill)
def join_string(part1, part2, concatenation_string = 'AND', seperator=' '):
"""
Join two strings together using a concatenation string
Handles the case where either part1 or part2 are an empty string
@param part1: First string
@param part2: Second string
@param concatenation_string: String used to join part1 and part2
@param seperator: Seperator used to between each part and the
concatenation string
@return A single string that consists of the part1 and part2
joined together using a concatenation string
"""
if part1 == '':
return part2
elif part2 == '':
return part1
if part1[-1] == seperator:
sep1 = ''
else:
sep1 = seperator
if part2[0] == seperator:
sep2 = ''
else:
sep2 = ' '
return part1 + sep1 + concatenation_string + sep2 + part2 | 0.698638 | 0.334236 |
# 3rd party imports
import numpy as np
from xml.dom import minidom
from six.moves.urllib.request import urlopen
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
import sys
import re
def query_yes_no(question, default = "yes"):
'''
Ask a yes/no question via raw_input() and return the answer
Written by Trent Mick under the MIT license, see:
https://code.activestate.com/recipes/577058-query-yesno/
@param question: A string that is presented to the user
@param default: The presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user)
@return The "answer", i.e., either "yes" or "no"
'''
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number):
'''
Build the query URL using ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param query_type: File type to look for, i.e., Product, Browse, Derived, or Referenced
@param output: Return format for product queries or error messages, i.e, XML or JSON
@param results: Type of files to look for, i.e., c: count of products; o: ODE Product ID;
p: PDS product identifies; m: product metadata; f: product files; b: browse image;
t: thumbnail image; l: complete PDS label; x: single product footprint
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@return Query URL
'''
ODE_REST_base_url = "http://oderest.rsl.wustl.edu/live2/?"
target = 'target=' + target
mission = '&ihid=' + mission
instrument = '&iid=' + instrument
product_type = '&pt=' + product_type
if western_lon is not None:
western_lon = '&westernlon=' + str(western_lon)
else:
western_lon = ''
if eastern_lon is not None:
eastern_lon = '&easternlon=' + str(eastern_lon)
else:
eastern_lon = ''
if min_lat is not None:
min_lat = '&minlat=' + str(min_lat)
else:
min_lat = ''
if max_lat is not None:
max_lat = '&maxlat=' + str(max_lat)
else:
max_lat = ''
if min_ob_time != '':
min_ob_time = '&mincreationtime=' + min_ob_time
if max_ob_time != '':
max_ob_time = '&maxcreationtime=' + max_ob_time
if product_id != '':
product_id = '&productid=' + product_id
if query_type != '':
query_type = '&query=' + query_type
if results != '':
results = '&results=' + results
if output != '':
output = '&output=' + output
if number_product_limit != '':
number_product_limit = '&limit=' + str(number_product_limit)
if result_offset_number != '':
result_offset_number = '&offset=' + str(result_offset_number)
# Concatenate the REST request
return ODE_REST_base_url + target + mission + instrument + product_type \
+ western_lon + eastern_lon + min_lat + max_lat + min_ob_time \
+ max_ob_time + query_type + results + output + number_product_limit \
+ result_offset_number + product_id
def get_files_urls(query_url, file_name = '*', print_info = False):
'''
Retrieve the files' URLs based on a query from ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param query_url: URL resulting from the query of ODE
@param file_name: File name to look for, with wildcards (*) allowed
@param print_info: Print the files that will be downloaded
@return List of URLs
'''
url = urlopen(query_url)
query_results = url.read()
xml_results = minidom.parseString(query_results)
url.close()
error = xml_results.getElementsByTagName('Error')
if len(error) > 0:
print('\nError:', error[0].firstChild.data)
return None
limit_file_types = 'Product'
file_name = file_name.replace('*', '.')
products = xml_results.getElementsByTagName('Product')
file_urls = OrderedDict()
for product in products:
product_files = product.getElementsByTagName('Product_file')
product_id = product.getElementsByTagName('pdsid')[0]
if print_info == True:
print('\nProduct ID:', product_id.firstChild.data)
for product_file in product_files:
file_type = product_file.getElementsByTagName('Type')[0]
file_url = product_file.getElementsByTagName('URL')[0]
file_description = product_file.getElementsByTagName('Description')[0]
local_filename = file_url.firstChild.data.split('/')[-1]
local_file_extension = local_filename.split('.')[-1]
if re.search(file_name, local_filename) is not None:
# Restriction on the file type to download
if len(limit_file_types) > 0:
# If match, get the URL
if file_type.firstChild.data == limit_file_types:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
# No restriction on the file type to download
else:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
return file_urls
def query_files_urls(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id, file_name,
number_product_limit, result_offset_number):
'''
Retrieve the URL locations based on a query using ODE REST interface
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param file_name: File name to look for, with wildcards (*) allowed
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@return List of URL locations
'''
# Returns a list of products with selected product metadata that meet the query parameters
query_type = 'product'
# Controls the return format for product queries or error messages
output = 'XML'
# For each product found return the product files and IDS
results = 'fp'
query_url = get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number)
print('Query URL:', query_url)
print('\nFiles that will be downloaded (if not previously downloaded):')
file_urls = get_files_urls(query_url, file_name, print_info = True)
if file_urls is None:
return OrderedDict()
elif len(file_urls) > 0:
should_continue = query_yes_no('\nDo you want to proceed?')
if should_continue == "no":
return OrderedDict()
else:
print('\nNo file found')
return file_urls
def correct_CRISM_label(label_file_location):
'''
Correct CRISM label file and allow GDAL to read it properly.
Necessary for Targeted Reduced Data Record (TRDR) data
Adapted from https://github.com/jlaura/crism/blob/master/csas.py
@param label_file_location: Local address of the current label
@return Local address of the new label
'''
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
new_label_file = open(new_label_file_location, 'w')
for line in open(label_file_location, 'r'):
if "OBJECT = FILE" in line:
line = "/* OBJECT = FILE */\n"
if "LINES" in line:
lines = int(line.split("=")[1])
if "LINE_SAMPLES" in line:
samples = int(line.split("=")[1])
new_label_file.write(line)
new_label_file.close()
return new_label_file_location
def correct_file_name_case_in_label(label_file_location, other_file_locations):
'''
Correct a label file if the case of the related data file(s) is incorrect
and GDAL cannot read it properly
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
label_file_name = '_'.join('.'.join(label_file_location.split('/')[-1].split('.')[:-1]).split('_')[:-1])
insensitive_lalels = []
for file_location in other_file_locations:
file_name = '.'.join(file_location.split('/')[-1].split('.')[:-1])
if (file_location != label_file_location
and file_name == label_file_name):
insensitive_lalel = re.compile(re.escape(file_location.split('/')[-1]),
re.IGNORECASE)
insensitive_lalels.append((insensitive_lalel,
file_location.split('/')[-1]))
with open(label_file_location, 'r') as file:
label_file = file.read()
for insensitive_lalel, sensitive_lalel in insensitive_lalels:
label_file = insensitive_lalel.sub(sensitive_lalel, label_file)
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
with open(new_label_file_location, 'w') as file:
file.write(label_file)
return new_label_file_location
def correct_label_file(label_file_location, other_file_locations = []):
'''
Correct a label file if GDAL cannot open the corresponding data file
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
# Correction not limited to CRISM data, in case other data had similar issues
new_label_file_location = correct_CRISM_label(label_file_location)
return correct_file_name_case_in_label(new_label_file_location,
other_file_locations)
def get_raster_array(gdal_raster, remove_ndv = True):
'''
Get a NumPy array from a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@param remove_ndv: Replace the no-data value as mentionned in the label by np.nan
@return The array
'''
assert gdal_raster is not None, 'No raster available'
number_of_bands = gdal_raster.RasterCount
raster_array = gdal_raster.ReadAsArray().astype(np.float)
for i_band in range(number_of_bands):
raster_band = gdal_raster.GetRasterBand(i_band + 1)
no_data_value = raster_band.GetNoDataValue()
if no_data_value is not None and remove_ndv == True:
if number_of_bands > 1:
raster_array[i_band, :, :][raster_array[i_band, :, :] == no_data_value] = np.nan
else:
raster_array[raster_array == no_data_value] = np.nan
scale = raster_band.GetScale()
if scale is None:
scale = 1.
offset = raster_band.GetOffset()
if offset is None:
offset = 0.
if number_of_bands > 1:
raster_array[i_band, :, :] = raster_array[i_band, :, :]*scale + offset
else:
raster_array = raster_array*scale + offset
return raster_array
def get_raster_extent(gdal_raster):
'''
Get the extent of a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@return The raster extent
'''
assert gdal_raster is not None, 'No raster available'
raster_x_size = gdal_raster.RasterXSize
raster_y_size = gdal_raster.RasterYSize
geotransform = gdal_raster.GetGeoTransform()
xmin = geotransform[0]
ymax = geotransform[3]
xmax = xmin + geotransform[1]*raster_x_size
ymin = ymax + geotransform[5]*raster_y_size
return (xmin, xmax, ymin, ymax) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/ode_util.py | ode_util.py |
# 3rd party imports
import numpy as np
from xml.dom import minidom
from six.moves.urllib.request import urlopen
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
import sys
import re
def query_yes_no(question, default = "yes"):
'''
Ask a yes/no question via raw_input() and return the answer
Written by Trent Mick under the MIT license, see:
https://code.activestate.com/recipes/577058-query-yesno/
@param question: A string that is presented to the user
@param default: The presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user)
@return The "answer", i.e., either "yes" or "no"
'''
valid = {"yes":"yes", "y":"yes", "ye":"yes",
"no":"no", "n":"no"}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number):
'''
Build the query URL using ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param query_type: File type to look for, i.e., Product, Browse, Derived, or Referenced
@param output: Return format for product queries or error messages, i.e, XML or JSON
@param results: Type of files to look for, i.e., c: count of products; o: ODE Product ID;
p: PDS product identifies; m: product metadata; f: product files; b: browse image;
t: thumbnail image; l: complete PDS label; x: single product footprint
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@return Query URL
'''
ODE_REST_base_url = "http://oderest.rsl.wustl.edu/live2/?"
target = 'target=' + target
mission = '&ihid=' + mission
instrument = '&iid=' + instrument
product_type = '&pt=' + product_type
if western_lon is not None:
western_lon = '&westernlon=' + str(western_lon)
else:
western_lon = ''
if eastern_lon is not None:
eastern_lon = '&easternlon=' + str(eastern_lon)
else:
eastern_lon = ''
if min_lat is not None:
min_lat = '&minlat=' + str(min_lat)
else:
min_lat = ''
if max_lat is not None:
max_lat = '&maxlat=' + str(max_lat)
else:
max_lat = ''
if min_ob_time != '':
min_ob_time = '&mincreationtime=' + min_ob_time
if max_ob_time != '':
max_ob_time = '&maxcreationtime=' + max_ob_time
if product_id != '':
product_id = '&productid=' + product_id
if query_type != '':
query_type = '&query=' + query_type
if results != '':
results = '&results=' + results
if output != '':
output = '&output=' + output
if number_product_limit != '':
number_product_limit = '&limit=' + str(number_product_limit)
if result_offset_number != '':
result_offset_number = '&offset=' + str(result_offset_number)
# Concatenate the REST request
return ODE_REST_base_url + target + mission + instrument + product_type \
+ western_lon + eastern_lon + min_lat + max_lat + min_ob_time \
+ max_ob_time + query_type + results + output + number_product_limit \
+ result_offset_number + product_id
def get_files_urls(query_url, file_name = '*', print_info = False):
'''
Retrieve the files' URLs based on a query from ODE REST interface
Adapted from the Orbital Data Explorer (ODE) REST Interface Manual
@param query_url: URL resulting from the query of ODE
@param file_name: File name to look for, with wildcards (*) allowed
@param print_info: Print the files that will be downloaded
@return List of URLs
'''
url = urlopen(query_url)
query_results = url.read()
xml_results = minidom.parseString(query_results)
url.close()
error = xml_results.getElementsByTagName('Error')
if len(error) > 0:
print('\nError:', error[0].firstChild.data)
return None
limit_file_types = 'Product'
file_name = file_name.replace('*', '.')
products = xml_results.getElementsByTagName('Product')
file_urls = OrderedDict()
for product in products:
product_files = product.getElementsByTagName('Product_file')
product_id = product.getElementsByTagName('pdsid')[0]
if print_info == True:
print('\nProduct ID:', product_id.firstChild.data)
for product_file in product_files:
file_type = product_file.getElementsByTagName('Type')[0]
file_url = product_file.getElementsByTagName('URL')[0]
file_description = product_file.getElementsByTagName('Description')[0]
local_filename = file_url.firstChild.data.split('/')[-1]
local_file_extension = local_filename.split('.')[-1]
if re.search(file_name, local_filename) is not None:
# Restriction on the file type to download
if len(limit_file_types) > 0:
# If match, get the URL
if file_type.firstChild.data == limit_file_types:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
# No restriction on the file type to download
else:
file_urls[file_url.firstChild.data] = (product_id.firstChild.data,
file_description.firstChild.data)
if print_info == True:
print('File name:', file_url.firstChild.data.split('/')[-1])
print('Description:', file_description.firstChild.data)
return file_urls
def query_files_urls(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id, file_name,
number_product_limit, result_offset_number):
'''
Retrieve the URL locations based on a query using ODE REST interface
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product Id to look for, with wildcards (*) allowed
@param file_name: File name to look for, with wildcards (*) allowed
@param number_product_limit: Maximal number of products to return (100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@return List of URL locations
'''
# Returns a list of products with selected product metadata that meet the query parameters
query_type = 'product'
# Controls the return format for product queries or error messages
output = 'XML'
# For each product found return the product files and IDS
results = 'fp'
query_url = get_query_url(target, mission, instrument, product_type,
western_lon, eastern_lon, min_lat, max_lat,
min_ob_time, max_ob_time, product_id,
query_type, output, results,
number_product_limit, result_offset_number)
print('Query URL:', query_url)
print('\nFiles that will be downloaded (if not previously downloaded):')
file_urls = get_files_urls(query_url, file_name, print_info = True)
if file_urls is None:
return OrderedDict()
elif len(file_urls) > 0:
should_continue = query_yes_no('\nDo you want to proceed?')
if should_continue == "no":
return OrderedDict()
else:
print('\nNo file found')
return file_urls
def correct_CRISM_label(label_file_location):
'''
Correct CRISM label file and allow GDAL to read it properly.
Necessary for Targeted Reduced Data Record (TRDR) data
Adapted from https://github.com/jlaura/crism/blob/master/csas.py
@param label_file_location: Local address of the current label
@return Local address of the new label
'''
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
new_label_file = open(new_label_file_location, 'w')
for line in open(label_file_location, 'r'):
if "OBJECT = FILE" in line:
line = "/* OBJECT = FILE */\n"
if "LINES" in line:
lines = int(line.split("=")[1])
if "LINE_SAMPLES" in line:
samples = int(line.split("=")[1])
new_label_file.write(line)
new_label_file.close()
return new_label_file_location
def correct_file_name_case_in_label(label_file_location, other_file_locations):
'''
Correct a label file if the case of the related data file(s) is incorrect
and GDAL cannot read it properly
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
label_file_name = '_'.join('.'.join(label_file_location.split('/')[-1].split('.')[:-1]).split('_')[:-1])
insensitive_lalels = []
for file_location in other_file_locations:
file_name = '.'.join(file_location.split('/')[-1].split('.')[:-1])
if (file_location != label_file_location
and file_name == label_file_name):
insensitive_lalel = re.compile(re.escape(file_location.split('/')[-1]),
re.IGNORECASE)
insensitive_lalels.append((insensitive_lalel,
file_location.split('/')[-1]))
with open(label_file_location, 'r') as file:
label_file = file.read()
for insensitive_lalel, sensitive_lalel in insensitive_lalels:
label_file = insensitive_lalel.sub(sensitive_lalel, label_file)
new_label_file_location = label_file_location
if '_fixed' not in new_label_file_location:
new_label_file_location = '.'.join(label_file_location.split('.')[:-1]) \
+ '_fixed.' + label_file_location.split('.')[-1]
with open(new_label_file_location, 'w') as file:
file.write(label_file)
return new_label_file_location
def correct_label_file(label_file_location, other_file_locations = []):
'''
Correct a label file if GDAL cannot open the corresponding data file
@param label_file_location: Local address of the current label
@param other_file_locations: Other files that were downloaded with the label file
@return Local address of the new label
'''
# Correction not limited to CRISM data, in case other data had similar issues
new_label_file_location = correct_CRISM_label(label_file_location)
return correct_file_name_case_in_label(new_label_file_location,
other_file_locations)
def get_raster_array(gdal_raster, remove_ndv = True):
'''
Get a NumPy array from a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@param remove_ndv: Replace the no-data value as mentionned in the label by np.nan
@return The array
'''
assert gdal_raster is not None, 'No raster available'
number_of_bands = gdal_raster.RasterCount
raster_array = gdal_raster.ReadAsArray().astype(np.float)
for i_band in range(number_of_bands):
raster_band = gdal_raster.GetRasterBand(i_band + 1)
no_data_value = raster_band.GetNoDataValue()
if no_data_value is not None and remove_ndv == True:
if number_of_bands > 1:
raster_array[i_band, :, :][raster_array[i_band, :, :] == no_data_value] = np.nan
else:
raster_array[raster_array == no_data_value] = np.nan
scale = raster_band.GetScale()
if scale is None:
scale = 1.
offset = raster_band.GetOffset()
if offset is None:
offset = 0.
if number_of_bands > 1:
raster_array[i_band, :, :] = raster_array[i_band, :, :]*scale + offset
else:
raster_array = raster_array*scale + offset
return raster_array
def get_raster_extent(gdal_raster):
'''
Get the extent of a raster opened with GDAL
@param gdal_raster: A raster opened with GDAL
@return The raster extent
'''
assert gdal_raster is not None, 'No raster available'
raster_x_size = gdal_raster.RasterXSize
raster_y_size = gdal_raster.RasterYSize
geotransform = gdal_raster.GetGeoTransform()
xmin = geotransform[0]
ymax = geotransform[3]
xmax = xmin + geotransform[1]*raster_x_size
ymin = ymax + geotransform[5]*raster_y_size
return (xmin, xmax, ymin, ymax) | 0.499512 | 0.29471 |
# Standard library imports
from collections import OrderedDict
from html.parser import HTMLParser
from io import StringIO
import re
from calendar import monthrange
# 3rd party imports
import pandas as pd
from six.moves.urllib.parse import urlencode
# Package imports
from .support import convertToStr
class SoundingParser(HTMLParser):
''' This class parses Wyoming Sounding data '''
def __init__(self):
''' Initialize SoundingParser '''
self.data_dict = OrderedDict()
self.metadata_dict = OrderedDict()
self.label = None
self.in_pre_tag = False
self.in_header = False
self.read_data = True
super(SoundingParser, self).__init__()
def handle_starttag(self, tag, attrs):
'''
Function called everytime a start tag is encountered
@param tag: Starting tag
@param attrs: Tag attributes
'''
if tag == 'pre':
self.in_pre_tag = True
elif re.match('h[0-9]*', tag):
self.in_header = True
def handle_endtag(self, tag):
'''
Function called everytime an end tag is encountered
@param tag: Ending tag
'''
if tag == 'pre':
self.in_pre_tag = False
elif re.match('h[0-9]*', tag):
self.in_header = False
def handle_data(self, data):
'''
Function to parse data between \<pre\> tags
@param data: Input data
'''
if self.in_pre_tag == True and self.read_data == True:
self.data_dict[self.label] = pd.read_fwf(StringIO(data), widths=[7,7,7,7,7,7,7,7,7,7,7],
header=0, skiprows=[0,1,3,4])
split_data = data.split('\n')
headings = split_data[2].split()
units = split_data[3].split()
self.metadata_dict[self.label] = OrderedDict()
self.metadata_dict[self.label]['units'] = [(heading, unit) for heading, unit in zip(headings, units)]
self.read_data = False
self.tmp = data
elif self.in_pre_tag == True and self.read_data == False:
station_metadata_dict = OrderedDict()
for line in data.splitlines():
if line != '':
metadata = line.split(':')
station_metadata_dict[metadata[0].strip()] = metadata[1].strip()
self.metadata_dict[self.label]['metadata'] = station_metadata_dict
self.read_data = True
elif self.read_data == True and self.in_header == True:
self.label = data.strip()
def generateQueries(station_number, year_list, month_list, day_start, day_end, start_hour,
end_hour):
'''
Generate url queries for sounding data
@param station_number: Input station number
@param year_list: Input years as a list
@param month_list: Input month as a list
@param day_start: Starting day
@param day_end: Ending day
@param start_hour: Starting hour
@param end_hour: Ending hour
@return list of urls containing requested data
'''
url_query_list = []
base_url = 'http://weather.uwyo.edu/cgi-bin/sounding?'
for year in year_list:
for month in month_list:
day_start = min(day_start, monthrange(year, month)[1])
day_end = min(day_end, monthrange(year, month)[1])
start_time = convertToStr(day_start,2) + convertToStr(start_hour,2)
end_time = convertToStr(day_end,2) + convertToStr(end_hour,2)
query = OrderedDict()
query['region'] = 'naconf'
query['TYPE'] = 'TEXT:LIST'
query['YEAR'] = convertToStr(year, 0)
query['MONTH'] = convertToStr(month, 2)
query['FROM'] = start_time
query['TO'] = end_time
query['STNM'] = convertToStr(station_number, 5)
url_query_list.append(base_url + urlencode(query))
return url_query_list | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/sounding_util.py | sounding_util.py |
# Standard library imports
from collections import OrderedDict
from html.parser import HTMLParser
from io import StringIO
import re
from calendar import monthrange
# 3rd party imports
import pandas as pd
from six.moves.urllib.parse import urlencode
# Package imports
from .support import convertToStr
class SoundingParser(HTMLParser):
''' This class parses Wyoming Sounding data '''
def __init__(self):
''' Initialize SoundingParser '''
self.data_dict = OrderedDict()
self.metadata_dict = OrderedDict()
self.label = None
self.in_pre_tag = False
self.in_header = False
self.read_data = True
super(SoundingParser, self).__init__()
def handle_starttag(self, tag, attrs):
'''
Function called everytime a start tag is encountered
@param tag: Starting tag
@param attrs: Tag attributes
'''
if tag == 'pre':
self.in_pre_tag = True
elif re.match('h[0-9]*', tag):
self.in_header = True
def handle_endtag(self, tag):
'''
Function called everytime an end tag is encountered
@param tag: Ending tag
'''
if tag == 'pre':
self.in_pre_tag = False
elif re.match('h[0-9]*', tag):
self.in_header = False
def handle_data(self, data):
'''
Function to parse data between \<pre\> tags
@param data: Input data
'''
if self.in_pre_tag == True and self.read_data == True:
self.data_dict[self.label] = pd.read_fwf(StringIO(data), widths=[7,7,7,7,7,7,7,7,7,7,7],
header=0, skiprows=[0,1,3,4])
split_data = data.split('\n')
headings = split_data[2].split()
units = split_data[3].split()
self.metadata_dict[self.label] = OrderedDict()
self.metadata_dict[self.label]['units'] = [(heading, unit) for heading, unit in zip(headings, units)]
self.read_data = False
self.tmp = data
elif self.in_pre_tag == True and self.read_data == False:
station_metadata_dict = OrderedDict()
for line in data.splitlines():
if line != '':
metadata = line.split(':')
station_metadata_dict[metadata[0].strip()] = metadata[1].strip()
self.metadata_dict[self.label]['metadata'] = station_metadata_dict
self.read_data = True
elif self.read_data == True and self.in_header == True:
self.label = data.strip()
def generateQueries(station_number, year_list, month_list, day_start, day_end, start_hour,
end_hour):
'''
Generate url queries for sounding data
@param station_number: Input station number
@param year_list: Input years as a list
@param month_list: Input month as a list
@param day_start: Starting day
@param day_end: Ending day
@param start_hour: Starting hour
@param end_hour: Ending hour
@return list of urls containing requested data
'''
url_query_list = []
base_url = 'http://weather.uwyo.edu/cgi-bin/sounding?'
for year in year_list:
for month in month_list:
day_start = min(day_start, monthrange(year, month)[1])
day_end = min(day_end, monthrange(year, month)[1])
start_time = convertToStr(day_start,2) + convertToStr(start_hour,2)
end_time = convertToStr(day_end,2) + convertToStr(end_hour,2)
query = OrderedDict()
query['region'] = 'naconf'
query['TYPE'] = 'TEXT:LIST'
query['YEAR'] = convertToStr(year, 0)
query['MONTH'] = convertToStr(month, 2)
query['FROM'] = start_time
query['TO'] = end_time
query['STNM'] = convertToStr(station_number, 5)
url_query_list.append(base_url + urlencode(query))
return url_query_list | 0.716318 | 0.270739 |
import numpy as np
from scipy.interpolate import RectBivariateSpline
class SplineLatLon(object):
'''
Holds a 2d spline for interpolating lat/lon grid
'''
def __init__(self, lat_func=None, lon_func=None, lat_grid=None, lon_grid=None,
x_points=None, y_points=None, lat_extents=None, lon_extents=None,
y_num_pixels=None, x_num_pixels=None, x_offset=0, y_offset=0,
interp_type='grid'):
'''
Initialize SplineLatLon with premade lat/lon functions or information about the latitude and longitude
@param lat_func: Latitude spline function
@param lon_func: Longitude spline function
@param lat_grid: Latitude grid
@param lon_grid: Longitude grid
@param x_points: 1d array of x coordinates
@param y_points: 1d array of y coordinates
@param lon_extents: Extent of data in longitude
@param lat_extents: Extent of data in latitude
@param y_num_pixels: Number of y coordinates
@param x_num_pixels: Number of x coordinates
@param x_offset: Offset in the x coordinate
@param y_offset: Offset in the y coordinate
@param interp_type: Interpolate type. Currently only 'grid' type is supported
'''
if lat_extents is not None and lon_extents is not None and \
y_num_pixels is not None and x_num_pixels is not None and \
lat_grid is None and lon_grid is None:
lat_pixel_size = (lat_extents[1] - lat_extents[0]) / y_num_pixels
lon_pixel_size = (lon_extents[1] - lon_extents[0]) / x_num_pixels
lat_coords = np.linspace(lat_extents[0] + 0.5*lat_pixel_size,
lat_extents[1] - 0.5*lat_pixel_size,
num=y_num_pixels, endpoint=True)
lon_coords = np.linspace(lon_extents[0] + 0.5*lon_pixel_size,
lon_extents[1] - 0.5*lon_pixel_size,
num=x_num_pixels, endpoint=True)
lon_grid, lat_grid = np.meshgrid(lon_coords, lat_coords)
if lat_func != None and lon_func != None:
self.lat_func = lat_func
self.lon_func = lon_func
elif lat_grid is not None and lon_grid is not None:
if x_points==None and y_points==None:
if interp_type == 'grid':
x_points = np.arange(lat_grid.shape[1])
y_points = np.arange(lat_grid.shape[0])
elif 'coords':
x_points, y_points = np.meshpoints(np.arange(lat_grid.shape[1]), np.arange(lat_grid.shape[0]))
else:
raise NotImplemented('Only interp_type grid is implemented')
elif (x_points is None and y_points is not None) or (x_points is not None and y_points is not None):
raise RuntimeError('Either both x and y points must be specified or neither of them')
if interp_type=='grid':
self.lat_func = RectBivariateSpline(y_points, x_points, lat_grid)
self.lon_func = RectBivariateSpline(y_points, x_points, lon_grid)
else:
raise NotImplemented('Only interp_type grid is implemented')
self.x_offset = x_offset
self.y_offset = y_offset
def __call__(self, y, x):
'''
Convert pixel coordinates to lat/lon
@param y: y coordinate
@param x: x coordinate
@return (lat, lon)
'''
ret_lat = self.lat_func(y+self.y_offset,x+self.x_offset, grid=False)
ret_lon = self.lon_func(y+self.y_offset,x+self.x_offset, grid=False)
if np.isscalar(y) and np.isscalar(x):
ret_lat = ret_lat.item()
ret_lon = ret_lon.item()
return ret_lat, ret_lon
def SplineGeolocation(object):
'''
This class holds splines to convert between 2d cartesian and geodetic coordinates
'''
def __init__(self, lat_spline, lon_spline, x_spline, y_spline, x_offset=0, y_offset=0):
self.x_offset = x_offset
self.y_offset = y_offset
self.lat_spline = lat_spline
self.lon_spline = lon_spline
self.x_spline = x_spline
self.y_spline = y_spline
def _accessSpline(self, *args, spline_function):
'''
Access values from a spline.
@param *args: Input arguments for spline function
@param spline_function: Spline function used for interpolation
@return interpolated values from the spline
'''
ret_val = spline_function(*args, grid=False)
if np.alltrue([np.isscalar([arg for arg in args])]):
ret_val = ret_val.item()
else:
return ret_val
class LinearGeolocation(object):
'''
This class provides functions to convert between pixel and geodetic coordinates
Assumes a linear relationship between pixel and geodetic coordinates
'''
def __init__(self, data, extents, x_offset=0, y_offset=0, flip_y=False):
'''
Initialize Linear Geolocation object
@param data: Numpy 2d data
@param extents: Latitude and longitude extents
@param x_offset: Pixel offset in x
@param y_offset: Pixel offset in y
@param flip_y: The y axis has been flipped so that increasing
y values are decreasing in latitude
'''
self.flip_y = flip_y
self.lon_extents = extents[:2]
self.lat_extents = extents[2:]
self.lat_pixel_size = (self.lat_extents[1] - self.lat_extents[0]) / data.shape[0]
self.lon_pixel_size = (self.lon_extents[1] - self.lon_extents[0]) / data.shape[1]
self.start_lat = self.lat_pixel_size / 2 + self.lat_extents[0]
self.start_lon = self.lon_pixel_size / 2 + self.lon_extents[0]
self.x_offset = x_offset
self.y_offset = y_offset
self.len_x = data.shape[1]
self.len_y = data.shape[0]
def getLatLon(self, y, x):
'''
Retrive the latitude and longitude from pixel coordinates
@param y: The y pixel
@param x: The x pixel
@return (latitude, longitude) of the pixel coordinate
'''
if self.flip_y:
y_coord = (self.len_y - y - 1) + self.y_offset
else:
y_coord = y + self.y_offset
lat = self.start_lat + y_coord * self.lat_pixel_size
lon = self.start_lon + (x + self.x_offset) * self.lon_pixel_size
return lat, lon
def getYX(self, lat, lon):
'''
Retrive the pixel coordinates from the latitude and longitude
@param lat: The Latitude
@param lon: The Longitude
@return (y, x) pixel coordinates of the input latitude and longitude
'''
y = (lat - self.start_lat) / self.lat_pixel_size - self.y_offset
x = (lon - self.start_lon) / self.lon_pixel_size - self.x_offset
if self.flip_y:
y = self.len_y - y - 1
return y, x
def getExtents(self):
'''
Retrieve the extents of the data
@return (minimum_longitude, maximum_longitude, minimum_latitude, maximum_latitude)
'''
return self.lon_extents + self.lat_extents
def getExtentsFromCentersPlateCarree(westmost_pixel_lon, eastmost_pixel_lon,
southmost_pixel_lat, northmost_pixel_lat,
lon_grid_spacing, lat_grid_spacing):
'''
Given the centers and grid spacing, return the extents of data
using assuming Plate Caree
@param westmost_pixel_lon: West most pixel coordinate
@param eastmost_pixel_lon: East most pixel coordinate
@param southmost_pixel_lat: South most pixel coordinate
@param northmost_pixel_lon: North most pixel coordinate
@return The starting longitude, ending longitude, starting latitude, and ending latitude
'''
start_lon = westmost_pixel_lon - lon_grid_spacing/2
end_lon = eastmost_pixel_lon + lon_grid_spacing/2
start_lat = southmost_pixel_lat - lat_grid_spacing/2
end_lat = northmost_pixel_lat + lat_grid_spacing/2
return (start_lon, end_lon, start_lat, end_lat)
def convertBinCentersToEdges(bin_centers, dtype = None):
'''
Calculate edges of a set of bins from their centers
@param bin_centers: Array of bin centers
@param dtype: Data type of array used to store bin edges
@return bin_edges
'''
if dtype is None:
dtype == bin_centers.dtype
centers_length = len(bin_centers)
edges = np.zeros(centers_length + 1, dtype=dtype)
edges[1:centers_length] = (bin_centers[:-1] + bin_centers[1:]) / 2
edges[0] = 2*bin_centers[0] - edges[1]
edges[-1] = 2*bin_centers[-1] - edges[-2]
return edges
class AffineGlobalCoords(object):
'''
Convert between projected and pixel coordinates using an affine transformation
'''
def __init__(self, aff_coeffs, center_pixels=False):
'''
Initialize Global Coords Object
@param aff_coeffs: Affine coefficients
@param center_pixels: Apply offsets so that integer values refer to the
center of the pixel and not the edge
'''
self._aff_coeffs = aff_coeffs
if center_pixels:
self._x_offset = 0.5
self._y_offset = 0.5
else:
self._x_offset = 0.0
self._y_offset = 0.0
def getProjectedYX(self, y_array, x_array):
'''
Convert pixel coordinates to projected coordinates
@param y_array: Input y pixel coordinates
@param x_array: Input x pixel coordinates
@return projected y coordinates, projected x coordinates
'''
y = y_array + self._y_offset
x = x_array + self._x_offset
return (self._aff_coeffs[3] + self._aff_coeffs[4]*x + self._aff_coeffs[5]*y,
self._aff_coeffs[0] + self._aff_coeffs[1]*x + self._aff_coeffs[2]*y)
def getPixelYX(self, y_proj, x_proj):
'''
Convert from projected coordinates to pixel coordinates
@param y_proj: Input projected y coordinates
@param x_proj: Input projected x coordinates
@return y pixel coordinates, x pixel coordinates
'''
c0 = self._aff_coeffs[0]
c1 = self._aff_coeffs[1]
c2 = self._aff_coeffs[2]
c3 = self._aff_coeffs[3]
c4 = self._aff_coeffs[4]
c5 = self._aff_coeffs[5]
y = (c4*(c0-x_proj) + c1*y_proj - c1*c3) / (c1*c5 - c2*c4)
x = -(c5 * (c0 - x_proj) + c2*y_proj - c2*c3) / (c1*c5 - c2*c4)
return y - self._y_offset, x - self._x_offset
def getGeoTransform(extents, x_size, y_size, y_flipped=True):
"""
Get 6 geotransform coefficients from the extents of an image and its shape
Assumes origin is in the upper left and the x pixel coordinate does not depend on
y projected coordinate, and the y pixl coordinate doesn't depend on the x projected
coordinate
@param extents: Image extents (x_min, x_max, y_min, y_max)
@param x_size: Number of x pixels
@param y_size: Number of y pixels
@param y_flipped: The y pixel coordinates are flipped relative
to the projected coordinates
@return list containing the 6 affine transformation coordinates
"""
x_res = (extents[1] - extents[0]) / x_size
y_res = (extents[3] - extents[2]) / y_size
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(y_res)
if y_flipped == True:
geo_transform[-1] *= -1
return geo_transform | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/image_util.py | image_util.py |
import numpy as np
from scipy.interpolate import RectBivariateSpline
class SplineLatLon(object):
'''
Holds a 2d spline for interpolating lat/lon grid
'''
def __init__(self, lat_func=None, lon_func=None, lat_grid=None, lon_grid=None,
x_points=None, y_points=None, lat_extents=None, lon_extents=None,
y_num_pixels=None, x_num_pixels=None, x_offset=0, y_offset=0,
interp_type='grid'):
'''
Initialize SplineLatLon with premade lat/lon functions or information about the latitude and longitude
@param lat_func: Latitude spline function
@param lon_func: Longitude spline function
@param lat_grid: Latitude grid
@param lon_grid: Longitude grid
@param x_points: 1d array of x coordinates
@param y_points: 1d array of y coordinates
@param lon_extents: Extent of data in longitude
@param lat_extents: Extent of data in latitude
@param y_num_pixels: Number of y coordinates
@param x_num_pixels: Number of x coordinates
@param x_offset: Offset in the x coordinate
@param y_offset: Offset in the y coordinate
@param interp_type: Interpolate type. Currently only 'grid' type is supported
'''
if lat_extents is not None and lon_extents is not None and \
y_num_pixels is not None and x_num_pixels is not None and \
lat_grid is None and lon_grid is None:
lat_pixel_size = (lat_extents[1] - lat_extents[0]) / y_num_pixels
lon_pixel_size = (lon_extents[1] - lon_extents[0]) / x_num_pixels
lat_coords = np.linspace(lat_extents[0] + 0.5*lat_pixel_size,
lat_extents[1] - 0.5*lat_pixel_size,
num=y_num_pixels, endpoint=True)
lon_coords = np.linspace(lon_extents[0] + 0.5*lon_pixel_size,
lon_extents[1] - 0.5*lon_pixel_size,
num=x_num_pixels, endpoint=True)
lon_grid, lat_grid = np.meshgrid(lon_coords, lat_coords)
if lat_func != None and lon_func != None:
self.lat_func = lat_func
self.lon_func = lon_func
elif lat_grid is not None and lon_grid is not None:
if x_points==None and y_points==None:
if interp_type == 'grid':
x_points = np.arange(lat_grid.shape[1])
y_points = np.arange(lat_grid.shape[0])
elif 'coords':
x_points, y_points = np.meshpoints(np.arange(lat_grid.shape[1]), np.arange(lat_grid.shape[0]))
else:
raise NotImplemented('Only interp_type grid is implemented')
elif (x_points is None and y_points is not None) or (x_points is not None and y_points is not None):
raise RuntimeError('Either both x and y points must be specified or neither of them')
if interp_type=='grid':
self.lat_func = RectBivariateSpline(y_points, x_points, lat_grid)
self.lon_func = RectBivariateSpline(y_points, x_points, lon_grid)
else:
raise NotImplemented('Only interp_type grid is implemented')
self.x_offset = x_offset
self.y_offset = y_offset
def __call__(self, y, x):
'''
Convert pixel coordinates to lat/lon
@param y: y coordinate
@param x: x coordinate
@return (lat, lon)
'''
ret_lat = self.lat_func(y+self.y_offset,x+self.x_offset, grid=False)
ret_lon = self.lon_func(y+self.y_offset,x+self.x_offset, grid=False)
if np.isscalar(y) and np.isscalar(x):
ret_lat = ret_lat.item()
ret_lon = ret_lon.item()
return ret_lat, ret_lon
def SplineGeolocation(object):
'''
This class holds splines to convert between 2d cartesian and geodetic coordinates
'''
def __init__(self, lat_spline, lon_spline, x_spline, y_spline, x_offset=0, y_offset=0):
self.x_offset = x_offset
self.y_offset = y_offset
self.lat_spline = lat_spline
self.lon_spline = lon_spline
self.x_spline = x_spline
self.y_spline = y_spline
def _accessSpline(self, *args, spline_function):
'''
Access values from a spline.
@param *args: Input arguments for spline function
@param spline_function: Spline function used for interpolation
@return interpolated values from the spline
'''
ret_val = spline_function(*args, grid=False)
if np.alltrue([np.isscalar([arg for arg in args])]):
ret_val = ret_val.item()
else:
return ret_val
class LinearGeolocation(object):
'''
This class provides functions to convert between pixel and geodetic coordinates
Assumes a linear relationship between pixel and geodetic coordinates
'''
def __init__(self, data, extents, x_offset=0, y_offset=0, flip_y=False):
'''
Initialize Linear Geolocation object
@param data: Numpy 2d data
@param extents: Latitude and longitude extents
@param x_offset: Pixel offset in x
@param y_offset: Pixel offset in y
@param flip_y: The y axis has been flipped so that increasing
y values are decreasing in latitude
'''
self.flip_y = flip_y
self.lon_extents = extents[:2]
self.lat_extents = extents[2:]
self.lat_pixel_size = (self.lat_extents[1] - self.lat_extents[0]) / data.shape[0]
self.lon_pixel_size = (self.lon_extents[1] - self.lon_extents[0]) / data.shape[1]
self.start_lat = self.lat_pixel_size / 2 + self.lat_extents[0]
self.start_lon = self.lon_pixel_size / 2 + self.lon_extents[0]
self.x_offset = x_offset
self.y_offset = y_offset
self.len_x = data.shape[1]
self.len_y = data.shape[0]
def getLatLon(self, y, x):
'''
Retrive the latitude and longitude from pixel coordinates
@param y: The y pixel
@param x: The x pixel
@return (latitude, longitude) of the pixel coordinate
'''
if self.flip_y:
y_coord = (self.len_y - y - 1) + self.y_offset
else:
y_coord = y + self.y_offset
lat = self.start_lat + y_coord * self.lat_pixel_size
lon = self.start_lon + (x + self.x_offset) * self.lon_pixel_size
return lat, lon
def getYX(self, lat, lon):
'''
Retrive the pixel coordinates from the latitude and longitude
@param lat: The Latitude
@param lon: The Longitude
@return (y, x) pixel coordinates of the input latitude and longitude
'''
y = (lat - self.start_lat) / self.lat_pixel_size - self.y_offset
x = (lon - self.start_lon) / self.lon_pixel_size - self.x_offset
if self.flip_y:
y = self.len_y - y - 1
return y, x
def getExtents(self):
'''
Retrieve the extents of the data
@return (minimum_longitude, maximum_longitude, minimum_latitude, maximum_latitude)
'''
return self.lon_extents + self.lat_extents
def getExtentsFromCentersPlateCarree(westmost_pixel_lon, eastmost_pixel_lon,
southmost_pixel_lat, northmost_pixel_lat,
lon_grid_spacing, lat_grid_spacing):
'''
Given the centers and grid spacing, return the extents of data
using assuming Plate Caree
@param westmost_pixel_lon: West most pixel coordinate
@param eastmost_pixel_lon: East most pixel coordinate
@param southmost_pixel_lat: South most pixel coordinate
@param northmost_pixel_lon: North most pixel coordinate
@return The starting longitude, ending longitude, starting latitude, and ending latitude
'''
start_lon = westmost_pixel_lon - lon_grid_spacing/2
end_lon = eastmost_pixel_lon + lon_grid_spacing/2
start_lat = southmost_pixel_lat - lat_grid_spacing/2
end_lat = northmost_pixel_lat + lat_grid_spacing/2
return (start_lon, end_lon, start_lat, end_lat)
def convertBinCentersToEdges(bin_centers, dtype = None):
'''
Calculate edges of a set of bins from their centers
@param bin_centers: Array of bin centers
@param dtype: Data type of array used to store bin edges
@return bin_edges
'''
if dtype is None:
dtype == bin_centers.dtype
centers_length = len(bin_centers)
edges = np.zeros(centers_length + 1, dtype=dtype)
edges[1:centers_length] = (bin_centers[:-1] + bin_centers[1:]) / 2
edges[0] = 2*bin_centers[0] - edges[1]
edges[-1] = 2*bin_centers[-1] - edges[-2]
return edges
class AffineGlobalCoords(object):
'''
Convert between projected and pixel coordinates using an affine transformation
'''
def __init__(self, aff_coeffs, center_pixels=False):
'''
Initialize Global Coords Object
@param aff_coeffs: Affine coefficients
@param center_pixels: Apply offsets so that integer values refer to the
center of the pixel and not the edge
'''
self._aff_coeffs = aff_coeffs
if center_pixels:
self._x_offset = 0.5
self._y_offset = 0.5
else:
self._x_offset = 0.0
self._y_offset = 0.0
def getProjectedYX(self, y_array, x_array):
'''
Convert pixel coordinates to projected coordinates
@param y_array: Input y pixel coordinates
@param x_array: Input x pixel coordinates
@return projected y coordinates, projected x coordinates
'''
y = y_array + self._y_offset
x = x_array + self._x_offset
return (self._aff_coeffs[3] + self._aff_coeffs[4]*x + self._aff_coeffs[5]*y,
self._aff_coeffs[0] + self._aff_coeffs[1]*x + self._aff_coeffs[2]*y)
def getPixelYX(self, y_proj, x_proj):
'''
Convert from projected coordinates to pixel coordinates
@param y_proj: Input projected y coordinates
@param x_proj: Input projected x coordinates
@return y pixel coordinates, x pixel coordinates
'''
c0 = self._aff_coeffs[0]
c1 = self._aff_coeffs[1]
c2 = self._aff_coeffs[2]
c3 = self._aff_coeffs[3]
c4 = self._aff_coeffs[4]
c5 = self._aff_coeffs[5]
y = (c4*(c0-x_proj) + c1*y_proj - c1*c3) / (c1*c5 - c2*c4)
x = -(c5 * (c0 - x_proj) + c2*y_proj - c2*c3) / (c1*c5 - c2*c4)
return y - self._y_offset, x - self._x_offset
def getGeoTransform(extents, x_size, y_size, y_flipped=True):
"""
Get 6 geotransform coefficients from the extents of an image and its shape
Assumes origin is in the upper left and the x pixel coordinate does not depend on
y projected coordinate, and the y pixl coordinate doesn't depend on the x projected
coordinate
@param extents: Image extents (x_min, x_max, y_min, y_max)
@param x_size: Number of x pixels
@param y_size: Number of y pixels
@param y_flipped: The y pixel coordinates are flipped relative
to the projected coordinates
@return list containing the 6 affine transformation coordinates
"""
x_res = (extents[1] - extents[0]) / x_size
y_res = (extents[3] - extents[2]) / y_size
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(y_res)
if y_flipped == True:
geo_transform[-1] *= -1
return geo_transform | 0.808294 | 0.504822 |
# """@package pbo_util
# Tools for working with PBO GPS data, including reference frame stabilization code
# """
import numpy as np
import pandas as pd
import warnings
from datetime import datetime
from .support import progress_bar
def getStationCoords( pbo_info, station_list):
'''
Get the station coordinates for a list of stations
@param pbo_info: PBO Metadata
@param station_list: List of stations
@return list of tuples containing lat, lon coordinates of stations
'''
coord_list = []
for station in station_list:
lat = pbo_info[station]['refNEU'][0]
lon = pbo_info[station]['refNEU'][1]-360
coord_list.append( (lat,lon))
return coord_list
def getLatLonRange(pbo_info, station_list):
'''
Retrive the range of latitude and longitude occupied by a set of stations
@param pbo_info: PBO Metadata
@param station_list: List of stations
@return list containg two tuples, lat_range and lon_range
'''
coord_list = getStationCoords(pbo_info, station_list)
lat_list = []
lon_list = []
for coord in coord_list:
lat_list.append(coord[0])
lon_list.append(coord[1])
lat_range = (np.min(lat_list), np.max(lat_list))
lon_range = (np.min(lon_list), np.max(lon_list))
return [lat_range, lon_range]
def getROIstations(geo_point,radiusParam,data,header):
'''
This function returns the 4ID station codes for the stations in a region
The region of interest is defined by the geographic coordinate and a window size
@param geo_point: The geographic (lat,lon) coordinate of interest
@param radiusParam: An overloaded radius of interest [km] or latitude and longitude window [deg] around the geo_point
@param data: Stabilized (or unstabilized) data generated from the data fetcher or out of stab_sys
@param header: Header dictionary with stations metadata keyed by their 4ID code. This is output with the data.
@return station_list, list of site 4ID codes in the specified geographic region
'''
ccPos = (geo_point[0]*np.pi/180, geo_point[1]*np.pi/180)
if np.isscalar(radiusParam):
station_list = []
for ii in header.keys():
coord = (header[ii]['refNEU'][0]*np.pi/180,(header[ii]['refNEU'][1]-360)*np.pi/180)
dist = 6371*2*np.arcsin(np.sqrt(np.sin((ccPos[0]-coord[0])/2)**2+np.cos(ccPos[0])*np.cos(coord[0])*np.sin((ccPos[1]-coord[1])/2)**2))
if np.abs(dist) < radiusParam:
station_list.append(header[ii]['4ID'])
else:
# overloaded radiusParam term to be radius or lat/lon window size
latWin = radiusParam[0]/2
lonWin = radiusParam[1]/2
station_list = []
try:
for ii in header.keys():
coord = (header[ii]['refNEU'][0],(header[ii]['refNEU'][1]-360))
if (geo_point[0]-latWin)<=coord[0]<=(geo_point[0]+latWin) and (geo_point[1]-lonWin)<=coord[1]<=(geo_point[1]+lonWin):
station_list.append(header[ii]['4ID'])
except:
station_list = None
return station_list
def stab_sys(data_iterator,metadata,stab_min_NE=.0005,stab_min_U=.005,sigsc=2,
errProp=1):
'''
Stabilize GPS data to a region
The stab_sys function is a Python implemention of the
Helmhert 7-parameter transformation, used to correct for common
mode error. This builds on Prof Herring's stab_sys function in his
tscon Fortran code. It uses a SVD approach to estimating the
rotation matrix gathered from 'Computing Helmert Transformations'
by G.A. Watson as well as its references. Note that units should
be in meters, that is in the format from the level 2 processed
UNAVCO pos files
@param data_iterator: Expects an iterator that returns label, pandas dataframe
@param metadata: Metadata that contains 'refXYZ' and 'refNEU'
@param stab_min_NE: Optional minimum horizontal covariance parameter
@param stab_min_U: Optional minimum vertical covariance parameter
@param sigsc: Optional scaling factor for determining cutoff bounds for non stable sites
@param errProp: Propagate errors through the transformation
@return smSet, a reduced size dictionary of the data (in mm) for the sites in the specified geographic region,
smHdr, a reduced size dictionary of the headers for the sites in the region
'''
# grabs all of the relevant data into labeled matrices
smTestFlag = 0; numSites = 0; smSet = []; smHdr = [];
smNEUcov = [];
#grab specified sites from the given list of data, or defaults to using all of the sites
for ii, pddata in data_iterator:
# requires the minimum amount of data to be present
# resamples these stations to daily
if smTestFlag == 0:
# grabbing position changes and the NEU change uncertainty
# instead of positions ([2,3,4] and [11,12,13])
# --> had to put the factor of 1000 back in from raw stab processing
smXYZ = pddata.loc[:,['X','Y','Z']] - metadata[ii]['refXYZ']
smNEU = pddata.loc[:,['dN','dE','dU']]
smNEcov = np.sqrt(pddata.loc[:,'Sn']**2 + pddata.loc[:,'Se']**2)
smUcov = pddata.loc[:,'Su']**2
smTestFlag = 1
else:
smXYZ = np.concatenate((smXYZ.T,(pddata.loc[:,['X','Y','Z']] - metadata[ii]['refXYZ']).T)).T
smNEU = np.concatenate((smNEU.T,pddata.loc[:,['dN','dE','dU']].T)).T
smNEcov = np.vstack((smNEcov,np.sqrt(pddata.loc[:,'Sn']**2 + pddata.loc[:,'Se']**2)))
smUcov = np.vstack((smUcov,pddata.loc[:,'Su']**2))
if errProp==1:
smNEUcov.append(np.array(pddata.loc[:,['Sn','Se','Su','Rne','Rnu','Reu']]))
# also keep the headers
numSites += 1
smSet.append(pddata)
smHdr.append(metadata[ii])
# grab the datelen from the last data chunk
datelen = len(pddata)
if numSites <= 1:
# no or only 1 stations
return dict(), dict()
else:
# do stabilization
smNEcov = smNEcov.T
smUcov = smUcov.T
smNEUcov = np.array(smNEUcov)
# minimum tolerances, number of sigma cutoff defined in input
sNEtol = np.nanmax(np.vstack(((np.nanmedian(smNEcov,axis=1)-np.nanmin(smNEcov,axis=1)).T,np.ones((datelen,))*stab_min_NE)),axis=0)
sUtol = np.nanmax(np.vstack(((np.nanmedian(smUcov,axis=1)-np.nanmin(smUcov,axis=1)).T,np.ones((datelen,))*stab_min_U)),axis=0)
stable_site_idx = (np.nan_to_num(smNEcov-np.tile(np.nanmin(smNEcov,axis=1),(numSites,1)).T)<(sigsc*np.tile(sNEtol,(numSites,1)).T))
stable_site_idx *= (np.nan_to_num(smUcov-np.tile(np.nanmin(smUcov,axis=1),(numSites,1)).T)<(sigsc*np.tile(sUtol,(numSites,1)).T))
if np.min(np.sum(stable_site_idx,axis=1)<3):
warnings.warn('Fewer than 3 stabilization sites in part of this interval')
# compute the parameters for each time step
stable_site_idx = np.repeat(stable_site_idx,3,axis=1)
stable_site_idx[pd.isnull(smXYZ)] = False
for ii in range(datelen):
# cut out the nans for stable sites
xyz = smXYZ[ii,stable_site_idx[ii,:]]
xyz = np.reshape(xyz,[int(len(xyz)/3),3])
neu = smNEU[ii,stable_site_idx[ii,:]]
neu = np.reshape(neu,[int(len(neu)/3),3])
# find mean and also remove it from the data
xyzm = np.mean(xyz,axis=0)
xyz = xyz - xyzm
neum = np.mean(neu,axis=0)
neu = neu - neum
# using an SVD method instead
U,s,V = np.linalg.linalg.svd(np.dot(xyz.T,neu))
R=np.dot(U,V)
sc = (np.sum(np.diag(np.dot(neu,np.dot(R.T,xyz.T)))))/(np.sum(np.diag(np.dot(xyz,xyz.T))))
t = neum - sc*np.dot(xyzm,R)
# looping over all sites to apply stabilization, including "stable" sites
# no need to remove nans as transformed nans still nan
xyz = smXYZ[ii,pd.isnull(smXYZ[ii,:])==False]
xyz = np.reshape(xyz,[int(len(xyz)/3),3])
smNEU[ii,pd.isnull(smXYZ[ii,:])==False] = np.reshape(np.dot(xyz,R)*sc + t,[len(xyz)*3,])
# do error propagation
if errProp==1:
propagateErrors(R,sc,smNEUcov[:,ii,:])
# fit back into the panda format overall data set, replaces original NEU, changes to mm units
for jj in range(len(smSet)):
smSet[jj].loc[:,['dN','dE','dU']] = smNEU[:,jj*3:(jj+1)*3]*1000
# the "covariances" put back in also now in mm units
if errProp==1:
smSet[jj].loc[:,['Sn','Se','Su','Rne','Rnu','Reu']] = smNEUcov[jj,:,:]
# returns the corrected data and the relevant headers as dictionaries, and the transformation's 7-parameters
smSet_dict = dict(); smHdr_dict = dict()
for ii in range(len(smHdr)):
smSet_dict[smHdr[ii]['4ID']] = smSet[ii]
smHdr_dict[smHdr[ii]['4ID']] = smHdr[ii]
return smSet_dict, smHdr_dict
def propagateErrors(R,sc,stationCovs):
'''
Propagate GPS errors
By writing out the R*E*R.T equations... to calculate the new covariance matrix
without needing to form the matrix first as an intermediate step. Modifies
covariance matrix in place
@param R: Rotation matrix
@param sc: Scaling value
@param stationCovs: Station Covariances
'''
oldCs = stationCovs.copy()
# need to make a copy to get the std & correlations to covariances
oldCs[:,3] *= oldCs[:,0]*oldCs[:,1]
oldCs[:,4] *= oldCs[:,0]*oldCs[:,2]
oldCs[:,5] *= oldCs[:,1]*oldCs[:,2]
oldCs[:,0] = oldCs[:,0]**2
oldCs[:,1] = oldCs[:,1]**2
oldCs[:,2] = oldCs[:,2]**2
# calculate the modified covariances and reformat back to std and correlations
stationCovs[:,0] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,0]**2,R[0,1]**2,R[0,2]**2,
2*R[0,0]*R[0,1],2*R[0,0]*R[0,2],2*R[0,1]*R[0,2]]))
stationCovs[:,1] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,1]**2,R[1,1]**2,R[1,2]**2,
2*R[0,1]*R[1,1],2*R[0,1]*R[1,2],2*R[1,1]*R[1,2]]))
stationCovs[:,2] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,2]**2,R[1,2]**2,R[2,2]**2,
2*R[0,2]*R[1,2],2*R[0,2]*R[2,2],2*R[1,2]*R[2,2]]))
stationCovs[:,3] = (sc**2)*np.dot(oldCs,[R[0,0]*R[0,1],R[0,1]*R[1,1],R[0,2]*R[1,2],
R[0,1]**2+R[0,0]*R[1,1],R[0,1]*R[0,2]+R[0,0]*R[1,2],
R[0,2]*R[1,1]+R[0,1]*R[1,2]])/(stationCovs[:,0]*stationCovs[:,1])
stationCovs[:,4] = (sc**2)*np.dot(oldCs,[R[0,0]*R[0,2],R[0,1]*R[1,2],R[0,2]*R[2,2],
R[0,0]*R[1,2]+R[0,1]*R[0,2],R[0,0]*R[2,2]+R[0,2]**2,
R[0,1]*R[2,2]+R[0,2]*R[1,2]])/(stationCovs[:,0]*stationCovs[:,2])
stationCovs[:,5] = (sc**2)*np.dot(oldCs,[R[0,2]*R[0,1],R[1,2]*R[1,1],R[1,2]*R[2,2],
R[0,1]*R[1,2]+R[0,2]*R[1,1],R[0,1]*R[2,2]+R[0,2]*R[1,2],
R[2,2]*R[1,1]+R[1,2]**2])/(stationCovs[:,1]*stationCovs[:,2])
oldCs[:,0:3] *= 1000
def nostab_sys(allH,allD,timerng,indx=1,mdyratio=.7, use_progress_bar = True, index_date_only=False):
'''
Do not apply stabilization and simply returns stations after checking for sufficient amount of data
@param allH: a dictionary of all of the headers of all sites loaded from the data directory
@param allD: a dictionary of all of the panda format data of all of the corresponding sites
@param timerng: an array with two string elements, describing the starting and ending dates
@param indx: a list of site 4ID's indicating stations in the relevant geographic location, or 1 for all sites
@param mdyratio: optional parameter for the minimum required ratio of data to determine if a sitef is kept for further analysis
@param use_progress_bar: Display a progress bar
@param index_date_only: When creating an index for the data, use date (not the time) only
@return smSet, a reduced size dictionary of the data (in meters) for the sites in the specified geographic region and
smHdr, a reduced size dictionary of the headers for the sites in the region
'''
# grabs all of the relevant data into labeled matrices
numSites = 0; smSet = []; smHdr = [];
datelen = pd.date_range(start=timerng[0],end=timerng[1],freq='D').shape[0]
# needs the specified ratio of data to be present for further use. or number of days
if mdyratio > 1:
mindays = mdyratio
else:
mindays = ((pd.to_datetime(timerng[1]) - pd.to_datetime(timerng[0]))/pd.to_timedelta(1,'D'))*mdyratio
#grab specified sites from the given list of data, or defaults to using all of the sites
if indx == 1:
indx = list(allH.keys())
for ii in progress_bar(indx,enabled = use_progress_bar):
if index_date_only:
pddata = allD['data_' + ii][timerng[0]:timerng[1]]
else:
pddata = allD['data_' + ii]
jd_conversion = 2400000.5
pddata[pddata.index.name] = pddata.index
pddata = pddata[[pddata.index.name] + pddata.columns.tolist()[:-1]]
pddata.index = pd.to_datetime(pddata['JJJJJ.JJJJ'] + jd_conversion, unit='D', origin='julian')
pddata.index.name = 'Date'
pddata = pddata[timerng[0]:timerng[1]]
dCheck = pddata[timerng[0]:timerng[1]].shape[0]
if dCheck>mindays:
# requires the minimum amount of data to be present
# resamples these stations to daily
if pddata.shape[0] < datelen:
pddata = pddata.reindex(pd.date_range(start=timerng[0],end=timerng[1],freq='D'))
else:
pddata = pddata.reindex(pd.date_range(start=pddata.index[0],end=pddata.index[-1],freq='D'))
# also keep the headers
numSites += 1
smSet.append(pddata)
smHdr.append(allH[ii])
# returns the data and the relevant headers as dictionaries, and the transformation's 7-parameters
smSet_dict = dict(); smHdr_dict = dict()
for ii in range(len(smHdr)):
smSet_dict[smHdr[ii]['4ID']] = smSet[ii]
smHdr_dict[smHdr[ii]['4ID']] = smHdr[ii]
return smSet_dict, smHdr_dict
def removeAntennaOffset(antenna_offsets, data, window_start = pd.to_timedelta('4D'), window_end=pd.to_timedelta('4D'),min_diff=0.005, debug=False):
'''
Remove offsets caused by changes in antennas
@param antenna_offsets: Pandas series of dates describing when the antenna changes were made
@param data: Input GPS data
@param window_start: Starting time before and after event to use for calculating offset
@param window_end: Ending time before and after event to use before calculating offset
@param min_diff: Minimum difference before and after offset to for applying correction
@param debug: Enable debug output
@return GPS data with the offsets removed
'''
if antenna_offsets is None:
return data
data_copy = data.copy()
for full_offset in antenna_offsets:
# truncate date
offset = pd.to_datetime(pd.datetime(full_offset.year, full_offset.month, full_offset.day))
if offset > (data.index[0] + window_end):
before = data_copy.loc[(offset - window_end) - window_start : offset-window_start]
after = data_copy.loc[offset + window_start : (offset + window_end) + window_start]
if min(len(after.dropna()),len(before.dropna())) > 0:
if np.abs(np.nanmedian(before) - np.nanmedian(after)) >= min_diff:
if debug == True:
print('fixing',offset, end=': ')
print(np.nanmedian(before)*1e3, np.nanmedian(after)*1e3)
data_copy.loc[offset:] = data_copy.loc[offset:] + (np.nanmedian(before) - np.nanmedian(after))
if not pd.isnull(data_copy.loc[offset]):
data_copy.loc[offset] = np.nanmedian(pd.concat([before,
data_copy.loc[offset + window_start : (offset + window_end) + window_start]]))
return data_copy | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/pbo_util.py | pbo_util.py |
# """@package pbo_util
# Tools for working with PBO GPS data, including reference frame stabilization code
# """
import numpy as np
import pandas as pd
import warnings
from datetime import datetime
from .support import progress_bar
def getStationCoords( pbo_info, station_list):
'''
Get the station coordinates for a list of stations
@param pbo_info: PBO Metadata
@param station_list: List of stations
@return list of tuples containing lat, lon coordinates of stations
'''
coord_list = []
for station in station_list:
lat = pbo_info[station]['refNEU'][0]
lon = pbo_info[station]['refNEU'][1]-360
coord_list.append( (lat,lon))
return coord_list
def getLatLonRange(pbo_info, station_list):
'''
Retrive the range of latitude and longitude occupied by a set of stations
@param pbo_info: PBO Metadata
@param station_list: List of stations
@return list containg two tuples, lat_range and lon_range
'''
coord_list = getStationCoords(pbo_info, station_list)
lat_list = []
lon_list = []
for coord in coord_list:
lat_list.append(coord[0])
lon_list.append(coord[1])
lat_range = (np.min(lat_list), np.max(lat_list))
lon_range = (np.min(lon_list), np.max(lon_list))
return [lat_range, lon_range]
def getROIstations(geo_point,radiusParam,data,header):
'''
This function returns the 4ID station codes for the stations in a region
The region of interest is defined by the geographic coordinate and a window size
@param geo_point: The geographic (lat,lon) coordinate of interest
@param radiusParam: An overloaded radius of interest [km] or latitude and longitude window [deg] around the geo_point
@param data: Stabilized (or unstabilized) data generated from the data fetcher or out of stab_sys
@param header: Header dictionary with stations metadata keyed by their 4ID code. This is output with the data.
@return station_list, list of site 4ID codes in the specified geographic region
'''
ccPos = (geo_point[0]*np.pi/180, geo_point[1]*np.pi/180)
if np.isscalar(radiusParam):
station_list = []
for ii in header.keys():
coord = (header[ii]['refNEU'][0]*np.pi/180,(header[ii]['refNEU'][1]-360)*np.pi/180)
dist = 6371*2*np.arcsin(np.sqrt(np.sin((ccPos[0]-coord[0])/2)**2+np.cos(ccPos[0])*np.cos(coord[0])*np.sin((ccPos[1]-coord[1])/2)**2))
if np.abs(dist) < radiusParam:
station_list.append(header[ii]['4ID'])
else:
# overloaded radiusParam term to be radius or lat/lon window size
latWin = radiusParam[0]/2
lonWin = radiusParam[1]/2
station_list = []
try:
for ii in header.keys():
coord = (header[ii]['refNEU'][0],(header[ii]['refNEU'][1]-360))
if (geo_point[0]-latWin)<=coord[0]<=(geo_point[0]+latWin) and (geo_point[1]-lonWin)<=coord[1]<=(geo_point[1]+lonWin):
station_list.append(header[ii]['4ID'])
except:
station_list = None
return station_list
def stab_sys(data_iterator,metadata,stab_min_NE=.0005,stab_min_U=.005,sigsc=2,
errProp=1):
'''
Stabilize GPS data to a region
The stab_sys function is a Python implemention of the
Helmhert 7-parameter transformation, used to correct for common
mode error. This builds on Prof Herring's stab_sys function in his
tscon Fortran code. It uses a SVD approach to estimating the
rotation matrix gathered from 'Computing Helmert Transformations'
by G.A. Watson as well as its references. Note that units should
be in meters, that is in the format from the level 2 processed
UNAVCO pos files
@param data_iterator: Expects an iterator that returns label, pandas dataframe
@param metadata: Metadata that contains 'refXYZ' and 'refNEU'
@param stab_min_NE: Optional minimum horizontal covariance parameter
@param stab_min_U: Optional minimum vertical covariance parameter
@param sigsc: Optional scaling factor for determining cutoff bounds for non stable sites
@param errProp: Propagate errors through the transformation
@return smSet, a reduced size dictionary of the data (in mm) for the sites in the specified geographic region,
smHdr, a reduced size dictionary of the headers for the sites in the region
'''
# grabs all of the relevant data into labeled matrices
smTestFlag = 0; numSites = 0; smSet = []; smHdr = [];
smNEUcov = [];
#grab specified sites from the given list of data, or defaults to using all of the sites
for ii, pddata in data_iterator:
# requires the minimum amount of data to be present
# resamples these stations to daily
if smTestFlag == 0:
# grabbing position changes and the NEU change uncertainty
# instead of positions ([2,3,4] and [11,12,13])
# --> had to put the factor of 1000 back in from raw stab processing
smXYZ = pddata.loc[:,['X','Y','Z']] - metadata[ii]['refXYZ']
smNEU = pddata.loc[:,['dN','dE','dU']]
smNEcov = np.sqrt(pddata.loc[:,'Sn']**2 + pddata.loc[:,'Se']**2)
smUcov = pddata.loc[:,'Su']**2
smTestFlag = 1
else:
smXYZ = np.concatenate((smXYZ.T,(pddata.loc[:,['X','Y','Z']] - metadata[ii]['refXYZ']).T)).T
smNEU = np.concatenate((smNEU.T,pddata.loc[:,['dN','dE','dU']].T)).T
smNEcov = np.vstack((smNEcov,np.sqrt(pddata.loc[:,'Sn']**2 + pddata.loc[:,'Se']**2)))
smUcov = np.vstack((smUcov,pddata.loc[:,'Su']**2))
if errProp==1:
smNEUcov.append(np.array(pddata.loc[:,['Sn','Se','Su','Rne','Rnu','Reu']]))
# also keep the headers
numSites += 1
smSet.append(pddata)
smHdr.append(metadata[ii])
# grab the datelen from the last data chunk
datelen = len(pddata)
if numSites <= 1:
# no or only 1 stations
return dict(), dict()
else:
# do stabilization
smNEcov = smNEcov.T
smUcov = smUcov.T
smNEUcov = np.array(smNEUcov)
# minimum tolerances, number of sigma cutoff defined in input
sNEtol = np.nanmax(np.vstack(((np.nanmedian(smNEcov,axis=1)-np.nanmin(smNEcov,axis=1)).T,np.ones((datelen,))*stab_min_NE)),axis=0)
sUtol = np.nanmax(np.vstack(((np.nanmedian(smUcov,axis=1)-np.nanmin(smUcov,axis=1)).T,np.ones((datelen,))*stab_min_U)),axis=0)
stable_site_idx = (np.nan_to_num(smNEcov-np.tile(np.nanmin(smNEcov,axis=1),(numSites,1)).T)<(sigsc*np.tile(sNEtol,(numSites,1)).T))
stable_site_idx *= (np.nan_to_num(smUcov-np.tile(np.nanmin(smUcov,axis=1),(numSites,1)).T)<(sigsc*np.tile(sUtol,(numSites,1)).T))
if np.min(np.sum(stable_site_idx,axis=1)<3):
warnings.warn('Fewer than 3 stabilization sites in part of this interval')
# compute the parameters for each time step
stable_site_idx = np.repeat(stable_site_idx,3,axis=1)
stable_site_idx[pd.isnull(smXYZ)] = False
for ii in range(datelen):
# cut out the nans for stable sites
xyz = smXYZ[ii,stable_site_idx[ii,:]]
xyz = np.reshape(xyz,[int(len(xyz)/3),3])
neu = smNEU[ii,stable_site_idx[ii,:]]
neu = np.reshape(neu,[int(len(neu)/3),3])
# find mean and also remove it from the data
xyzm = np.mean(xyz,axis=0)
xyz = xyz - xyzm
neum = np.mean(neu,axis=0)
neu = neu - neum
# using an SVD method instead
U,s,V = np.linalg.linalg.svd(np.dot(xyz.T,neu))
R=np.dot(U,V)
sc = (np.sum(np.diag(np.dot(neu,np.dot(R.T,xyz.T)))))/(np.sum(np.diag(np.dot(xyz,xyz.T))))
t = neum - sc*np.dot(xyzm,R)
# looping over all sites to apply stabilization, including "stable" sites
# no need to remove nans as transformed nans still nan
xyz = smXYZ[ii,pd.isnull(smXYZ[ii,:])==False]
xyz = np.reshape(xyz,[int(len(xyz)/3),3])
smNEU[ii,pd.isnull(smXYZ[ii,:])==False] = np.reshape(np.dot(xyz,R)*sc + t,[len(xyz)*3,])
# do error propagation
if errProp==1:
propagateErrors(R,sc,smNEUcov[:,ii,:])
# fit back into the panda format overall data set, replaces original NEU, changes to mm units
for jj in range(len(smSet)):
smSet[jj].loc[:,['dN','dE','dU']] = smNEU[:,jj*3:(jj+1)*3]*1000
# the "covariances" put back in also now in mm units
if errProp==1:
smSet[jj].loc[:,['Sn','Se','Su','Rne','Rnu','Reu']] = smNEUcov[jj,:,:]
# returns the corrected data and the relevant headers as dictionaries, and the transformation's 7-parameters
smSet_dict = dict(); smHdr_dict = dict()
for ii in range(len(smHdr)):
smSet_dict[smHdr[ii]['4ID']] = smSet[ii]
smHdr_dict[smHdr[ii]['4ID']] = smHdr[ii]
return smSet_dict, smHdr_dict
def propagateErrors(R,sc,stationCovs):
'''
Propagate GPS errors
By writing out the R*E*R.T equations... to calculate the new covariance matrix
without needing to form the matrix first as an intermediate step. Modifies
covariance matrix in place
@param R: Rotation matrix
@param sc: Scaling value
@param stationCovs: Station Covariances
'''
oldCs = stationCovs.copy()
# need to make a copy to get the std & correlations to covariances
oldCs[:,3] *= oldCs[:,0]*oldCs[:,1]
oldCs[:,4] *= oldCs[:,0]*oldCs[:,2]
oldCs[:,5] *= oldCs[:,1]*oldCs[:,2]
oldCs[:,0] = oldCs[:,0]**2
oldCs[:,1] = oldCs[:,1]**2
oldCs[:,2] = oldCs[:,2]**2
# calculate the modified covariances and reformat back to std and correlations
stationCovs[:,0] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,0]**2,R[0,1]**2,R[0,2]**2,
2*R[0,0]*R[0,1],2*R[0,0]*R[0,2],2*R[0,1]*R[0,2]]))
stationCovs[:,1] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,1]**2,R[1,1]**2,R[1,2]**2,
2*R[0,1]*R[1,1],2*R[0,1]*R[1,2],2*R[1,1]*R[1,2]]))
stationCovs[:,2] = np.sqrt((sc**2)*np.dot(oldCs,[R[0,2]**2,R[1,2]**2,R[2,2]**2,
2*R[0,2]*R[1,2],2*R[0,2]*R[2,2],2*R[1,2]*R[2,2]]))
stationCovs[:,3] = (sc**2)*np.dot(oldCs,[R[0,0]*R[0,1],R[0,1]*R[1,1],R[0,2]*R[1,2],
R[0,1]**2+R[0,0]*R[1,1],R[0,1]*R[0,2]+R[0,0]*R[1,2],
R[0,2]*R[1,1]+R[0,1]*R[1,2]])/(stationCovs[:,0]*stationCovs[:,1])
stationCovs[:,4] = (sc**2)*np.dot(oldCs,[R[0,0]*R[0,2],R[0,1]*R[1,2],R[0,2]*R[2,2],
R[0,0]*R[1,2]+R[0,1]*R[0,2],R[0,0]*R[2,2]+R[0,2]**2,
R[0,1]*R[2,2]+R[0,2]*R[1,2]])/(stationCovs[:,0]*stationCovs[:,2])
stationCovs[:,5] = (sc**2)*np.dot(oldCs,[R[0,2]*R[0,1],R[1,2]*R[1,1],R[1,2]*R[2,2],
R[0,1]*R[1,2]+R[0,2]*R[1,1],R[0,1]*R[2,2]+R[0,2]*R[1,2],
R[2,2]*R[1,1]+R[1,2]**2])/(stationCovs[:,1]*stationCovs[:,2])
oldCs[:,0:3] *= 1000
def nostab_sys(allH,allD,timerng,indx=1,mdyratio=.7, use_progress_bar = True, index_date_only=False):
'''
Do not apply stabilization and simply returns stations after checking for sufficient amount of data
@param allH: a dictionary of all of the headers of all sites loaded from the data directory
@param allD: a dictionary of all of the panda format data of all of the corresponding sites
@param timerng: an array with two string elements, describing the starting and ending dates
@param indx: a list of site 4ID's indicating stations in the relevant geographic location, or 1 for all sites
@param mdyratio: optional parameter for the minimum required ratio of data to determine if a sitef is kept for further analysis
@param use_progress_bar: Display a progress bar
@param index_date_only: When creating an index for the data, use date (not the time) only
@return smSet, a reduced size dictionary of the data (in meters) for the sites in the specified geographic region and
smHdr, a reduced size dictionary of the headers for the sites in the region
'''
# grabs all of the relevant data into labeled matrices
numSites = 0; smSet = []; smHdr = [];
datelen = pd.date_range(start=timerng[0],end=timerng[1],freq='D').shape[0]
# needs the specified ratio of data to be present for further use. or number of days
if mdyratio > 1:
mindays = mdyratio
else:
mindays = ((pd.to_datetime(timerng[1]) - pd.to_datetime(timerng[0]))/pd.to_timedelta(1,'D'))*mdyratio
#grab specified sites from the given list of data, or defaults to using all of the sites
if indx == 1:
indx = list(allH.keys())
for ii in progress_bar(indx,enabled = use_progress_bar):
if index_date_only:
pddata = allD['data_' + ii][timerng[0]:timerng[1]]
else:
pddata = allD['data_' + ii]
jd_conversion = 2400000.5
pddata[pddata.index.name] = pddata.index
pddata = pddata[[pddata.index.name] + pddata.columns.tolist()[:-1]]
pddata.index = pd.to_datetime(pddata['JJJJJ.JJJJ'] + jd_conversion, unit='D', origin='julian')
pddata.index.name = 'Date'
pddata = pddata[timerng[0]:timerng[1]]
dCheck = pddata[timerng[0]:timerng[1]].shape[0]
if dCheck>mindays:
# requires the minimum amount of data to be present
# resamples these stations to daily
if pddata.shape[0] < datelen:
pddata = pddata.reindex(pd.date_range(start=timerng[0],end=timerng[1],freq='D'))
else:
pddata = pddata.reindex(pd.date_range(start=pddata.index[0],end=pddata.index[-1],freq='D'))
# also keep the headers
numSites += 1
smSet.append(pddata)
smHdr.append(allH[ii])
# returns the data and the relevant headers as dictionaries, and the transformation's 7-parameters
smSet_dict = dict(); smHdr_dict = dict()
for ii in range(len(smHdr)):
smSet_dict[smHdr[ii]['4ID']] = smSet[ii]
smHdr_dict[smHdr[ii]['4ID']] = smHdr[ii]
return smSet_dict, smHdr_dict
def removeAntennaOffset(antenna_offsets, data, window_start = pd.to_timedelta('4D'), window_end=pd.to_timedelta('4D'),min_diff=0.005, debug=False):
'''
Remove offsets caused by changes in antennas
@param antenna_offsets: Pandas series of dates describing when the antenna changes were made
@param data: Input GPS data
@param window_start: Starting time before and after event to use for calculating offset
@param window_end: Ending time before and after event to use before calculating offset
@param min_diff: Minimum difference before and after offset to for applying correction
@param debug: Enable debug output
@return GPS data with the offsets removed
'''
if antenna_offsets is None:
return data
data_copy = data.copy()
for full_offset in antenna_offsets:
# truncate date
offset = pd.to_datetime(pd.datetime(full_offset.year, full_offset.month, full_offset.day))
if offset > (data.index[0] + window_end):
before = data_copy.loc[(offset - window_end) - window_start : offset-window_start]
after = data_copy.loc[offset + window_start : (offset + window_end) + window_start]
if min(len(after.dropna()),len(before.dropna())) > 0:
if np.abs(np.nanmedian(before) - np.nanmedian(after)) >= min_diff:
if debug == True:
print('fixing',offset, end=': ')
print(np.nanmedian(before)*1e3, np.nanmedian(after)*1e3)
data_copy.loc[offset:] = data_copy.loc[offset:] + (np.nanmedian(before) - np.nanmedian(after))
if not pd.isnull(data_copy.loc[offset]):
data_copy.loc[offset] = np.nanmedian(pd.concat([before,
data_copy.loc[offset + window_start : (offset + window_end) + window_start]]))
return data_copy | 0.550849 | 0.622631 |
# Scikit Data Access imports
from .image_util import AffineGlobalCoords, getGeoTransform
# 3rd party imports
import numpy as np
def merge_srtm_tiles(srtm_tiles, lon_min, lon_max, lat_min, lat_max):
'''
Merge the tiles retrieved from the Shuttle Radar Topography Mission data
using a DataFetcher
@param srtm_tiles: The tiles to merge, contained in an ImageWrapper
@param lon_min: Minimal longitude used in the DataFectcher
@param lon_max: Maximal longitude used in the DataFectcher
@param lat_min: Minimal latitude used in the DataFectcher
@param lon_min: Maximal latitude used in the DataFectcher
@return A NumPy array with the merged tiles and its extent in longitude and
latitude
'''
tile = list(srtm_tiles.data.keys())[0]
tile_width = srtm_tiles.data[tile].shape[1]
tile_height = srtm_tiles.data[tile].shape[0]
number_tile_y = abs(lat_max - lat_min)
number_tile_x = abs(lon_max - lon_min)
topography = np.empty((tile_height*number_tile_y - (number_tile_y - 1),
tile_width*number_tile_x - (number_tile_x - 1)))
tile_index = 0
i_factor = 0
j_factor = number_tile_y - 1
for i in range(0, number_tile_x):
for j in range(number_tile_y, 0, -1):
tile = list(srtm_tiles.data.keys())[tile_index]
topography[(j - 1)*tile_height - j_factor:j*tile_height - j_factor,
i*tile_width - i_factor:(i + 1)*tile_width - i_factor] = srtm_tiles.data[tile]
tile_index += 1
j_factor -= 1
i_factor += 1
j_factor = number_tile_y - 1
pixel_lon_size = (lon_max - lon_min)/(topography.shape[1] - 1)
pixel_lat_size = (lat_max - lat_min)/(topography.shape[0] - 1)
topography_extent = (lon_min - 0.5*pixel_lon_size,
lon_max + 0.5*pixel_lon_size,
lat_min - 0.5*pixel_lat_size,
lat_max + 0.5*pixel_lat_size)
return topography, topography_extent
def getSRTMLatLon(lat_min, lat_max, lon_min, lon_max):
'''
Retrieve parameters that encompass area when creating SRTM data fetcher.
@param lat_min: Minimum latitude
@param lat_max: Maximum latitude
@param lon_min: Minimum longitude
@param lon_max: Maximum longitude
@return (starting_latitude, ending_latitude,
starting_longitude, ending_longitude)
'''
start_lat = int(np.floor(lat_min))
start_lon = int(np.floor(lon_min))
end_lat = int(np.floor(lat_max))
end_lon = int(np.floor(lon_max))
return start_lat, end_lat, start_lon, end_lon
def getSRTMData(srtmdw, lat_start,lat_end, lon_start,lon_end):
'''
Select SRTM data in a latitude/longitude box
@param srtmdw: SRTM data wrapper
@param lat_start: Starting latiude
@param lat_end: Ending latiude
@param lon_start: Starting longitude
@param lon_end: Ending longitude
@param flip_y: Flip the y axis so that increasing y pixels are increasing in latitude
@return Tuple containing the cut data, new extents, and a affine geotransform coefficients
'''
tiles = getSRTMLatLon(lat_start, lat_end, lon_start, lon_end)
srtm_data, srtm_extents = merge_srtm_tiles(srtmdw, tiles[2],tiles[3]+1, tiles[0], tiles[1]+1)
full_geotransform = getGeoTransform(srtm_extents, srtm_data.shape[1], srtm_data.shape[0])
full_geo = AffineGlobalCoords(full_geotransform)
start_y, start_x = np.floor(full_geo.getPixelYX(lat_end,lon_start)).astype(np.int)
end_y, end_x = np.ceil(full_geo.getPixelYX(lat_start,lon_end)).astype(np.int)
cut_data = srtm_data[start_y:end_y, start_x:end_x]
cut_proj_y_start, cut_proj_x_start = full_geo.getProjectedYX(end_y, start_x)
cut_proj_y_end, cut_proj_x_end = full_geo.getProjectedYX(start_y, end_x)
cut_extents = [
cut_proj_x_start,
cut_proj_x_end,
cut_proj_y_start,
cut_proj_y_end
]
cut_geotransform = full_geotransform.copy()
cut_geotransform[0] = cut_extents[0]
cut_geotransform[3] = cut_extents[-1]
return cut_data, cut_extents, cut_geotransform | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/srtm_util.py | srtm_util.py |
# Scikit Data Access imports
from .image_util import AffineGlobalCoords, getGeoTransform
# 3rd party imports
import numpy as np
def merge_srtm_tiles(srtm_tiles, lon_min, lon_max, lat_min, lat_max):
'''
Merge the tiles retrieved from the Shuttle Radar Topography Mission data
using a DataFetcher
@param srtm_tiles: The tiles to merge, contained in an ImageWrapper
@param lon_min: Minimal longitude used in the DataFectcher
@param lon_max: Maximal longitude used in the DataFectcher
@param lat_min: Minimal latitude used in the DataFectcher
@param lon_min: Maximal latitude used in the DataFectcher
@return A NumPy array with the merged tiles and its extent in longitude and
latitude
'''
tile = list(srtm_tiles.data.keys())[0]
tile_width = srtm_tiles.data[tile].shape[1]
tile_height = srtm_tiles.data[tile].shape[0]
number_tile_y = abs(lat_max - lat_min)
number_tile_x = abs(lon_max - lon_min)
topography = np.empty((tile_height*number_tile_y - (number_tile_y - 1),
tile_width*number_tile_x - (number_tile_x - 1)))
tile_index = 0
i_factor = 0
j_factor = number_tile_y - 1
for i in range(0, number_tile_x):
for j in range(number_tile_y, 0, -1):
tile = list(srtm_tiles.data.keys())[tile_index]
topography[(j - 1)*tile_height - j_factor:j*tile_height - j_factor,
i*tile_width - i_factor:(i + 1)*tile_width - i_factor] = srtm_tiles.data[tile]
tile_index += 1
j_factor -= 1
i_factor += 1
j_factor = number_tile_y - 1
pixel_lon_size = (lon_max - lon_min)/(topography.shape[1] - 1)
pixel_lat_size = (lat_max - lat_min)/(topography.shape[0] - 1)
topography_extent = (lon_min - 0.5*pixel_lon_size,
lon_max + 0.5*pixel_lon_size,
lat_min - 0.5*pixel_lat_size,
lat_max + 0.5*pixel_lat_size)
return topography, topography_extent
def getSRTMLatLon(lat_min, lat_max, lon_min, lon_max):
'''
Retrieve parameters that encompass area when creating SRTM data fetcher.
@param lat_min: Minimum latitude
@param lat_max: Maximum latitude
@param lon_min: Minimum longitude
@param lon_max: Maximum longitude
@return (starting_latitude, ending_latitude,
starting_longitude, ending_longitude)
'''
start_lat = int(np.floor(lat_min))
start_lon = int(np.floor(lon_min))
end_lat = int(np.floor(lat_max))
end_lon = int(np.floor(lon_max))
return start_lat, end_lat, start_lon, end_lon
def getSRTMData(srtmdw, lat_start,lat_end, lon_start,lon_end):
'''
Select SRTM data in a latitude/longitude box
@param srtmdw: SRTM data wrapper
@param lat_start: Starting latiude
@param lat_end: Ending latiude
@param lon_start: Starting longitude
@param lon_end: Ending longitude
@param flip_y: Flip the y axis so that increasing y pixels are increasing in latitude
@return Tuple containing the cut data, new extents, and a affine geotransform coefficients
'''
tiles = getSRTMLatLon(lat_start, lat_end, lon_start, lon_end)
srtm_data, srtm_extents = merge_srtm_tiles(srtmdw, tiles[2],tiles[3]+1, tiles[0], tiles[1]+1)
full_geotransform = getGeoTransform(srtm_extents, srtm_data.shape[1], srtm_data.shape[0])
full_geo = AffineGlobalCoords(full_geotransform)
start_y, start_x = np.floor(full_geo.getPixelYX(lat_end,lon_start)).astype(np.int)
end_y, end_x = np.ceil(full_geo.getPixelYX(lat_start,lon_end)).astype(np.int)
cut_data = srtm_data[start_y:end_y, start_x:end_x]
cut_proj_y_start, cut_proj_x_start = full_geo.getProjectedYX(end_y, start_x)
cut_proj_y_end, cut_proj_x_end = full_geo.getProjectedYX(start_y, end_x)
cut_extents = [
cut_proj_x_start,
cut_proj_x_end,
cut_proj_y_start,
cut_proj_y_end
]
cut_geotransform = full_geotransform.copy()
cut_geotransform[0] = cut_extents[0]
cut_geotransform[3] = cut_extents[-1]
return cut_data, cut_extents, cut_geotransform | 0.747339 | 0.696604 |
import re
from collections import OrderedDict
def readUAVSARMetadata(in_file):
'''
Parse UAVSAR metadata
@param in_file: String of Metadata filename or file object (file should end in .ann)
@return OrderedDict of metadata
'''
if isinstance(in_file, str):
with open(in_file, 'r') as info_file:
data_info = info_file.readlines()
else:
data_info = [line.decode() for line in in_file.readlines()]
data_info = [line.strip() for line in data_info]
# Function to convert string to a number
def str_to_number(in_string):
try:
return int(in_string)
except:
return float(in_string)
data_name = data_info[0][31:]
meta_data_dict = OrderedDict()
for line in data_info:
# Only work on lines that aren't commented out
if re.match('^[^;]',line) != None:
# Get the data type ('&' is text)
data_type = re.search('\s+\((.*)\)\s+=', line).group(1)
# Remove data type from line
tmp = re.sub('\s+\(.*\)\s+=', ' =', line)
# Split line into key,value
split_list = tmp.split('=',maxsplit=1)
# remove any trailing comments and strip whitespace
split_list[1] = re.search('[^;]*',split_list[1]).group().strip()
split_list[0] = split_list[0].strip()
#If data type is not a string, parse it as a float or int
if data_type != '&':
# Check if value is N/A
if split_list[1] == 'N/A':
split_list[1] = float('nan')
# Check for Raskew Doppler Near Mid Far as this
# entry should be three seperate entries
elif split_list[0] == 'Reskew Doppler Near Mid Far':
split_list[0] = 'Reskew Doppler Near'
second_split = split_list[1].split()
split_list[1] = str_to_number(second_split[0])
meta_data_dict['Reskew Doppler Mid'] = str_to_number(second_split[1])
meta_data_dict['Reskew Doppler Far'] = str_to_number(second_split[2])
# Parse value to an int or float
else:
split_list[1] = str_to_number(split_list[1])
# Add key, value pair to dictionary
meta_data_dict[split_list[0]] = split_list[1]
return meta_data_dict | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/utilities/uavsar_util.py | uavsar_util.py |
import re
from collections import OrderedDict
def readUAVSARMetadata(in_file):
'''
Parse UAVSAR metadata
@param in_file: String of Metadata filename or file object (file should end in .ann)
@return OrderedDict of metadata
'''
if isinstance(in_file, str):
with open(in_file, 'r') as info_file:
data_info = info_file.readlines()
else:
data_info = [line.decode() for line in in_file.readlines()]
data_info = [line.strip() for line in data_info]
# Function to convert string to a number
def str_to_number(in_string):
try:
return int(in_string)
except:
return float(in_string)
data_name = data_info[0][31:]
meta_data_dict = OrderedDict()
for line in data_info:
# Only work on lines that aren't commented out
if re.match('^[^;]',line) != None:
# Get the data type ('&' is text)
data_type = re.search('\s+\((.*)\)\s+=', line).group(1)
# Remove data type from line
tmp = re.sub('\s+\(.*\)\s+=', ' =', line)
# Split line into key,value
split_list = tmp.split('=',maxsplit=1)
# remove any trailing comments and strip whitespace
split_list[1] = re.search('[^;]*',split_list[1]).group().strip()
split_list[0] = split_list[0].strip()
#If data type is not a string, parse it as a float or int
if data_type != '&':
# Check if value is N/A
if split_list[1] == 'N/A':
split_list[1] = float('nan')
# Check for Raskew Doppler Near Mid Far as this
# entry should be three seperate entries
elif split_list[0] == 'Reskew Doppler Near Mid Far':
split_list[0] = 'Reskew Doppler Near'
second_split = split_list[1].split()
split_list[1] = str_to_number(second_split[0])
meta_data_dict['Reskew Doppler Mid'] = str_to_number(second_split[1])
meta_data_dict['Reskew Doppler Far'] = str_to_number(second_split[2])
# Parse value to an int or float
else:
split_list[1] = str_to_number(split_list[1])
# Add key, value pair to dictionary
meta_data_dict[split_list[0]] = split_list[1]
return meta_data_dict | 0.39257 | 0.328583 |
# Standard library imports
from collections import OrderedDict
from io import StringIO
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party imports
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import pandas as pd
class DataFetcher(DataFetcherStream):
"""
Class for handling data requests to data.lacity.org
"""
def __init__(self, endpoint, parameters, label, verbose=False, app_token = None, **pandas_kwargs):
"""
Initialize Data Fetcher for accessing data.lacity.org
@param endpoint: Data endpoint string
@param parameters: Parameters to use when retrieving dta
@param label: Label of pandas dataframe
@param verbose: Print out extra information
@param app_token: Application token to use to avoid throttling issues
@param date_columns
@param pandas_kwargs: Any additional key word arguments are passed to pandas.read_csv
"""
self.base_url = 'https://data.lacity.org/resource/'
self.base_url_and_endpoint = self.base_url + endpoint + '.csv?'
self.parameters = parameters
self.label = label
self.app_token = app_token
self.pandas_kwargs = pandas_kwargs
if '$$app_token' in parameters:
raise RuntimeError("Use app_token option in constructor instead of manually " +
"adding it into the the parameters")
if app_token != None:
self.parameters['$$app_token'] = app_token
super(DataFetcher, self).__init__([], verbose)
def output(self):
"""
Retrieve data from data.lacity.org
@return Table wrapper of containing specified data
"""
data_dict = OrderedDict()
url_query = self.base_url_and_endpoint + urlencode(self.parameters)
with urlopen(url_query) as remote_resource:
raw_string = remote_resource.read().decode()
string_data = StringIO(raw_string)
data_dict[self.label] = pd.read_csv(string_data, **self.pandas_kwargs)
return TableWrapper(data_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/engineering/la/generic/stream.py | stream.py |
# Standard library imports
from collections import OrderedDict
from io import StringIO
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party imports
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
import pandas as pd
class DataFetcher(DataFetcherStream):
"""
Class for handling data requests to data.lacity.org
"""
def __init__(self, endpoint, parameters, label, verbose=False, app_token = None, **pandas_kwargs):
"""
Initialize Data Fetcher for accessing data.lacity.org
@param endpoint: Data endpoint string
@param parameters: Parameters to use when retrieving dta
@param label: Label of pandas dataframe
@param verbose: Print out extra information
@param app_token: Application token to use to avoid throttling issues
@param date_columns
@param pandas_kwargs: Any additional key word arguments are passed to pandas.read_csv
"""
self.base_url = 'https://data.lacity.org/resource/'
self.base_url_and_endpoint = self.base_url + endpoint + '.csv?'
self.parameters = parameters
self.label = label
self.app_token = app_token
self.pandas_kwargs = pandas_kwargs
if '$$app_token' in parameters:
raise RuntimeError("Use app_token option in constructor instead of manually " +
"adding it into the the parameters")
if app_token != None:
self.parameters['$$app_token'] = app_token
super(DataFetcher, self).__init__([], verbose)
def output(self):
"""
Retrieve data from data.lacity.org
@return Table wrapper of containing specified data
"""
data_dict = OrderedDict()
url_query = self.base_url_and_endpoint + urlencode(self.parameters)
with urlopen(url_query) as remote_resource:
raw_string = remote_resource.read().decode()
string_data = StringIO(raw_string)
data_dict[self.label] = pd.read_csv(string_data, **self.pandas_kwargs)
return TableWrapper(data_dict) | 0.868297 | 0.274864 |
# Standard library imports
from collections import OrderedDict
from getpass import getpass
# Scikit Data Access
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party packages
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
class DataFetcher(DataFetcherStream):
""" Data Fetcher for retrieving stock data """
def __init__(self, ap_paramList, data_type, start_date=None, end_date=None, interval=None):
"""
@param ap_paramList[stock_symbol_list]: AutoList of stock symbols
@param data_type: Type of data to retrieve (daily, daily_adjusted, intraday, monthly, monthly_adjusted, weekly, weekly_adjusted)
@param start_date: Starting date
@param end_date: Ending date
@param interval: Interval for intraday (1min, 5min, 15min, 30min, 60min)
@return: Table data wrapper of stock data
"""
self.data_type = data_type
self.start_date = start_date
self.end_date = end_date
self.interval = interval
self.possible_intervals = ('1min', '5min', '15min', '30min', '60min')
self.possible_data_types = ("daily", "daily_adjusted", "intraday", "monthly", "monthly_adjusted", "weekly", "weekly_adjusted")
if interval not in self.possible_intervals and data_type == 'intraday':
raise RuntimeError('Did not understand interval: "' + str(interval) + '" to use with intraday data type')
elif interval is not None and data_type != 'intraday':
raise RuntimeError('interval is only used with data type intraday')
api_key = DataFetcher.getConfigItem('stocks', 'api_key')
write_key = False
while api_key is None or api_key == "":
api_key = getpass(prompt='Alpha Vantage API key')
write_key = True
if write_key:
DataFetcher.writeConfigItem('stocks','api_key', api_key)
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
"""
Retrieve stock data
@return TableWrapper of stock data
"""
stock_symbols = self.ap_paramList[0]()
timeseries_retriever = TimeSeries(key=DataFetcher.getConfigItem('stocks','api_key'),
output_format='pandas',
indexing_type = 'date')
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for symbol in stock_symbols:
# Extract data
if self.data_type == 'daily':
data, metadata = timeseries_retriever.get_daily(symbol, outputsize='full')
elif self.data_type == 'daily_adjusted':
data, metadata = timeseries_retriever.get_daily_adjusted(symbol, outputsize='full')
elif self.data_type == 'monthly':
data, metadata = timeseries_retriever.get_monthly(symbol)
elif self.data_type == 'monthly_adjusted':
data, metadata = timeseries_retriever.get_monthly_adjusted(symbol)
elif self.data_type == 'weekly':
data, metadata = timeseries_retriever.get_weekly(symbol)
elif self.data_type == 'weekly_adjusted':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol)
elif self.data_type == 'intraday':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol, self.interval, outputsize='full')
# Convert index to pandas datetime
if self.data_type == 'intraday':
data.index = pd.to_datetime(data.index).tz_localize(metadata['6. Time Zone'])
else:
data.index = pd.to_datetime(data.index)
data_dict[symbol] = data[self.start_date:self.end_date]
metadata_dict[symbol] = metadata
return TableWrapper(data_dict, meta_data = metadata_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/finance/timeseries/stream.py | stream.py |
# Standard library imports
from collections import OrderedDict
from getpass import getpass
# Scikit Data Access
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
# Third party packages
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
class DataFetcher(DataFetcherStream):
""" Data Fetcher for retrieving stock data """
def __init__(self, ap_paramList, data_type, start_date=None, end_date=None, interval=None):
"""
@param ap_paramList[stock_symbol_list]: AutoList of stock symbols
@param data_type: Type of data to retrieve (daily, daily_adjusted, intraday, monthly, monthly_adjusted, weekly, weekly_adjusted)
@param start_date: Starting date
@param end_date: Ending date
@param interval: Interval for intraday (1min, 5min, 15min, 30min, 60min)
@return: Table data wrapper of stock data
"""
self.data_type = data_type
self.start_date = start_date
self.end_date = end_date
self.interval = interval
self.possible_intervals = ('1min', '5min', '15min', '30min', '60min')
self.possible_data_types = ("daily", "daily_adjusted", "intraday", "monthly", "monthly_adjusted", "weekly", "weekly_adjusted")
if interval not in self.possible_intervals and data_type == 'intraday':
raise RuntimeError('Did not understand interval: "' + str(interval) + '" to use with intraday data type')
elif interval is not None and data_type != 'intraday':
raise RuntimeError('interval is only used with data type intraday')
api_key = DataFetcher.getConfigItem('stocks', 'api_key')
write_key = False
while api_key is None or api_key == "":
api_key = getpass(prompt='Alpha Vantage API key')
write_key = True
if write_key:
DataFetcher.writeConfigItem('stocks','api_key', api_key)
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
"""
Retrieve stock data
@return TableWrapper of stock data
"""
stock_symbols = self.ap_paramList[0]()
timeseries_retriever = TimeSeries(key=DataFetcher.getConfigItem('stocks','api_key'),
output_format='pandas',
indexing_type = 'date')
data_dict = OrderedDict()
metadata_dict = OrderedDict()
for symbol in stock_symbols:
# Extract data
if self.data_type == 'daily':
data, metadata = timeseries_retriever.get_daily(symbol, outputsize='full')
elif self.data_type == 'daily_adjusted':
data, metadata = timeseries_retriever.get_daily_adjusted(symbol, outputsize='full')
elif self.data_type == 'monthly':
data, metadata = timeseries_retriever.get_monthly(symbol)
elif self.data_type == 'monthly_adjusted':
data, metadata = timeseries_retriever.get_monthly_adjusted(symbol)
elif self.data_type == 'weekly':
data, metadata = timeseries_retriever.get_weekly(symbol)
elif self.data_type == 'weekly_adjusted':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol)
elif self.data_type == 'intraday':
data, metadata = timeseries_retriever.get_weekly_adjusted(symbol, self.interval, outputsize='full')
# Convert index to pandas datetime
if self.data_type == 'intraday':
data.index = pd.to_datetime(data.index).tz_localize(metadata['6. Time Zone'])
else:
data.index = pd.to_datetime(data.index)
data_dict[symbol] = data[self.start_date:self.end_date]
metadata_dict[symbol] = metadata
return TableWrapper(data_dict, meta_data = metadata_dict) | 0.795022 | 0.336658 |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.ode_util import *
# 3rd party imports
import pandas as pd
import numpy as np
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
import os
class DataFetcher(DataFetcherCache):
''' Data Fetcher from the Orbital Data Explorer (ODE) '''
def __init__(self, target, mission, instrument, product_type,
western_lon = None, eastern_lon = None, min_lat = None, max_lat = None,
min_ob_time = '', max_ob_time = '', product_id = '', file_name = '*',
number_product_limit = 10, result_offset_number = 0, remove_ndv = True):
'''
Construct Data Fetcher object
For more information about the different fields and the possible values,
see the manual of ODE REST interface at http://oderest.rsl.wustl.edu
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product ID to look for, with wildcards (*) allowed
@param file_name: File name to look for, with wildcards (*) allowed
@param number_product_limit: Maximal number of products to return (ODE allows 100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@param remove_ndv: Replace the no-data value as mentionned in the label by np.nan
'''
assert western_lon is None or 0. <= western_lon <= 360., 'Western longitude is not between 0 and 360 degrees'
assert eastern_lon is None or 0. <= eastern_lon <= 360., 'Eastern longitude is not between 0 and 360 degrees'
assert min_lat is None or -90. <= min_lat <= 90., 'Minimal latitude is not between -90 and 90 degrees'
assert max_lat is None or -90. <= max_lat <= 90., 'Maximal latitude is not between -90 and 90 degrees'
assert 1 <= number_product_limit <= 100, 'Number of product limit must be between 1 and 100'
self.target = target
self.mission = mission
self.instrument = instrument
self.product_type = product_type
self.western_lon = western_lon
self.eastern_lon = eastern_lon
self.min_lat = min_lat
self.max_lat = max_lat
self.min_ob_time = min_ob_time
self.max_ob_time = max_ob_time
self.product_id = product_id
self.file_name = file_name
self.number_product_limit = number_product_limit
self.result_offset_number = result_offset_number
self.remove_ndv = remove_ndv
def output(self):
'''
Generate data wrapper from ODE data
'''
file_urls = query_files_urls(self.target, self.mission, self.instrument, self.product_type,
self.western_lon, self.eastern_lon, self.min_lat, self.max_lat,
self.min_ob_time, self.max_ob_time, self.product_id, self.file_name,
self.number_product_limit, self.result_offset_number)
downloaded_files = self.cacheData('ode', file_urls.keys())
# Gather the data and meta-data
data_dict = OrderedDict()
metadata_dict = OrderedDict()
unopened_files = []
opened_files = []
unlabeled_files = []
for file, key in zip(downloaded_files, file_urls.keys()):
file_description = file_urls.get(key)[1]
if 'LABEL' in file_description or 'IMG' in file_description:
label = file.split('/')[-1]
product = file_urls.get(key)[0]
if metadata_dict.get(product, None) is None:
data_dict[product] = OrderedDict()
metadata_dict[product] = OrderedDict()
metadata_dict[product]['Unopened files'] = []
raster = gdal.Open(file)
# Try to correct the label file
if raster is None:
new_label_file = correct_label_file(file, downloaded_files)
raster = gdal.Open(new_label_file)
if raster is not None:
print('File', label, 'has been corrected')
# If the file still cannot be opened, deal with it later
if raster is None:
unopened_files.append((file, product))
# Otherwise, put the data in a NumPy array and get the meta-data
else:
opened_files.append((file, product))
raster_array = get_raster_array(raster, remove_ndv = self.remove_ndv)
data_dict[product][label] = raster_array
metadata_dict[product][label] = OrderedDict()
metadata_dict[product][label]['Geotransform'] = raster.GetGeoTransform()
metadata_dict[product][label]['Projection'] = raster.GetProjection()
metadata_dict[product][label]['Pixel sizes'] = (raster.GetGeoTransform()[1],
raster.GetGeoTransform()[5])
metadata_dict[product][label]['Extent'] = get_raster_extent(raster)
# Close the data
raster = None
else:
label = file.split('/')[-1]
product = file_urls.get(key)[0]
unlabeled_files.append((file, product))
# Put the unopened files' local address with the meta-data, so that the
# user can decide what to do with them. It implies to look for the
# companion files of the label files that could not be opened.
for file, product in unopened_files:
companion_files = [file]
print('File', file.split('/')[-1], 'could not be opened')
for file_2, product_2 in unlabeled_files:
if (product_2 == product
and '.'.join(file_2.split('/')[-1].split('.')[:-1]) == '.'.join(file.split('/')[-1].split('.')[:-1])):
companion_files.append(file_2)
print('File', file_2.split('/')[-1], 'could not be opened')
metadata_dict[product]['Unopened files'].append(companion_files)
for file, product in unlabeled_files:
companion_files = []
for file_2, product_2 in opened_files + unopened_files:
if (product_2 == product
and '.'.join(file_2.split('/')[-1].split('.')[:-1]) == '.'.join(file.split('/')[-1].split('.')[:-1])):
companion_files.append(file_2)
if len(companion_files) == 0:
print('File', file.split('/')[-1], 'could not be opened')
metadata_dict[product]['Unopened files'].append([file])
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict) | scikit-dataaccess | /scikit-dataaccess-1.9.17.tar.gz/scikit-dataaccess-1.9.17/skdaccess/planetary/ode/cache/data_fetcher.py | data_fetcher.py |
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.ode_util import *
# 3rd party imports
import pandas as pd
import numpy as np
from osgeo import gdal
# Standard library imports
from collections import OrderedDict
import os
class DataFetcher(DataFetcherCache):
''' Data Fetcher from the Orbital Data Explorer (ODE) '''
def __init__(self, target, mission, instrument, product_type,
western_lon = None, eastern_lon = None, min_lat = None, max_lat = None,
min_ob_time = '', max_ob_time = '', product_id = '', file_name = '*',
number_product_limit = 10, result_offset_number = 0, remove_ndv = True):
'''
Construct Data Fetcher object
For more information about the different fields and the possible values,
see the manual of ODE REST interface at http://oderest.rsl.wustl.edu
@param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus
@param mission: Aimed mission, e.g., MGS or MRO
@param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM
@param product_type: Type of product to look for, e.g., DTM or RDRV11
@param western_lon: Western longitude to look for the data, from 0 to 360
@param eastern_lon: Eastern longitude to look for the data, from 0 to 360
@param min_lat: Minimal latitude to look for the data, from -90 to 90
@param max_lat: Maximal latitude to look for the data, from -90 to 90
@param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'
@param product_id: PDS Product ID to look for, with wildcards (*) allowed
@param file_name: File name to look for, with wildcards (*) allowed
@param number_product_limit: Maximal number of products to return (ODE allows 100 at most)
@param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products
@param remove_ndv: Replace the no-data value as mentionned in the label by np.nan
'''
assert western_lon is None or 0. <= western_lon <= 360., 'Western longitude is not between 0 and 360 degrees'
assert eastern_lon is None or 0. <= eastern_lon <= 360., 'Eastern longitude is not between 0 and 360 degrees'
assert min_lat is None or -90. <= min_lat <= 90., 'Minimal latitude is not between -90 and 90 degrees'
assert max_lat is None or -90. <= max_lat <= 90., 'Maximal latitude is not between -90 and 90 degrees'
assert 1 <= number_product_limit <= 100, 'Number of product limit must be between 1 and 100'
self.target = target
self.mission = mission
self.instrument = instrument
self.product_type = product_type
self.western_lon = western_lon
self.eastern_lon = eastern_lon
self.min_lat = min_lat
self.max_lat = max_lat
self.min_ob_time = min_ob_time
self.max_ob_time = max_ob_time
self.product_id = product_id
self.file_name = file_name
self.number_product_limit = number_product_limit
self.result_offset_number = result_offset_number
self.remove_ndv = remove_ndv
def output(self):
'''
Generate data wrapper from ODE data
'''
file_urls = query_files_urls(self.target, self.mission, self.instrument, self.product_type,
self.western_lon, self.eastern_lon, self.min_lat, self.max_lat,
self.min_ob_time, self.max_ob_time, self.product_id, self.file_name,
self.number_product_limit, self.result_offset_number)
downloaded_files = self.cacheData('ode', file_urls.keys())
# Gather the data and meta-data
data_dict = OrderedDict()
metadata_dict = OrderedDict()
unopened_files = []
opened_files = []
unlabeled_files = []
for file, key in zip(downloaded_files, file_urls.keys()):
file_description = file_urls.get(key)[1]
if 'LABEL' in file_description or 'IMG' in file_description:
label = file.split('/')[-1]
product = file_urls.get(key)[0]
if metadata_dict.get(product, None) is None:
data_dict[product] = OrderedDict()
metadata_dict[product] = OrderedDict()
metadata_dict[product]['Unopened files'] = []
raster = gdal.Open(file)
# Try to correct the label file
if raster is None:
new_label_file = correct_label_file(file, downloaded_files)
raster = gdal.Open(new_label_file)
if raster is not None:
print('File', label, 'has been corrected')
# If the file still cannot be opened, deal with it later
if raster is None:
unopened_files.append((file, product))
# Otherwise, put the data in a NumPy array and get the meta-data
else:
opened_files.append((file, product))
raster_array = get_raster_array(raster, remove_ndv = self.remove_ndv)
data_dict[product][label] = raster_array
metadata_dict[product][label] = OrderedDict()
metadata_dict[product][label]['Geotransform'] = raster.GetGeoTransform()
metadata_dict[product][label]['Projection'] = raster.GetProjection()
metadata_dict[product][label]['Pixel sizes'] = (raster.GetGeoTransform()[1],
raster.GetGeoTransform()[5])
metadata_dict[product][label]['Extent'] = get_raster_extent(raster)
# Close the data
raster = None
else:
label = file.split('/')[-1]
product = file_urls.get(key)[0]
unlabeled_files.append((file, product))
# Put the unopened files' local address with the meta-data, so that the
# user can decide what to do with them. It implies to look for the
# companion files of the label files that could not be opened.
for file, product in unopened_files:
companion_files = [file]
print('File', file.split('/')[-1], 'could not be opened')
for file_2, product_2 in unlabeled_files:
if (product_2 == product
and '.'.join(file_2.split('/')[-1].split('.')[:-1]) == '.'.join(file.split('/')[-1].split('.')[:-1])):
companion_files.append(file_2)
print('File', file_2.split('/')[-1], 'could not be opened')
metadata_dict[product]['Unopened files'].append(companion_files)
for file, product in unlabeled_files:
companion_files = []
for file_2, product_2 in opened_files + unopened_files:
if (product_2 == product
and '.'.join(file_2.split('/')[-1].split('.')[:-1]) == '.'.join(file.split('/')[-1].split('.')[:-1])):
companion_files.append(file_2)
if len(companion_files) == 0:
print('File', file.split('/')[-1], 'could not be opened')
metadata_dict[product]['Unopened files'].append([file])
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict) | 0.670285 | 0.476641 |
from __future__ import annotations
from typing import Any, Literal, Tuple, overload
import numpy as np
from sklearn.utils import Bunch
from tensorflow.keras.datasets import (
boston_housing,
cifar10,
cifar100,
fashion_mnist,
imdb,
mnist,
reuters,
)
DATASETS = {
"boston_housing": boston_housing.load_data,
"cifar10": cifar10.load_data,
"cifar100": cifar100.load_data,
"fashion_mnist": fashion_mnist.load_data,
"imdb": imdb.load_data,
"mnist": mnist.load_data,
"reuters": reuters.load_data,
}
@overload
def fetch(
name: str,
*,
return_X_y: Literal[False] = False,
**kwargs: Any,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
return_X_y: Literal[True],
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
pass
def fetch(
name: str,
*,
return_X_y: bool = False,
**kwargs: Any,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
"""
Fetch Keras dataset.
Fetch a Keras dataset by name. More info at https://keras.io/datasets.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
**kwargs : dict
Optional key-value arguments. See https://keras.io/datasets.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
(X_train, y_train), (X_test, y_test) = DATASETS[name](**kwargs)
if len(X_train.shape) > 2:
name = name + " " + str(X_train.shape[1:]) + " shaped"
X_max = np.iinfo(X_train[0][0].dtype).max
n_features = np.prod(X_train.shape[1:])
X_train = X_train.reshape([X_train.shape[0], n_features]) / X_max
X_test = X_test.reshape([X_test.shape[0], n_features]) / X_max
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=list(range(len(X_train))),
validation_indices=[],
test_indices=list(range(len(X_train), len(X))),
inner_cv=None,
outer_cv=None,
DESCR=name,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/keras.py | keras.py | from __future__ import annotations
from typing import Any, Literal, Tuple, overload
import numpy as np
from sklearn.utils import Bunch
from tensorflow.keras.datasets import (
boston_housing,
cifar10,
cifar100,
fashion_mnist,
imdb,
mnist,
reuters,
)
DATASETS = {
"boston_housing": boston_housing.load_data,
"cifar10": cifar10.load_data,
"cifar100": cifar100.load_data,
"fashion_mnist": fashion_mnist.load_data,
"imdb": imdb.load_data,
"mnist": mnist.load_data,
"reuters": reuters.load_data,
}
@overload
def fetch(
name: str,
*,
return_X_y: Literal[False] = False,
**kwargs: Any,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
return_X_y: Literal[True],
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
pass
def fetch(
name: str,
*,
return_X_y: bool = False,
**kwargs: Any,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
"""
Fetch Keras dataset.
Fetch a Keras dataset by name. More info at https://keras.io/datasets.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
**kwargs : dict
Optional key-value arguments. See https://keras.io/datasets.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
(X_train, y_train), (X_test, y_test) = DATASETS[name](**kwargs)
if len(X_train.shape) > 2:
name = name + " " + str(X_train.shape[1:]) + " shaped"
X_max = np.iinfo(X_train[0][0].dtype).max
n_features = np.prod(X_train.shape[1:])
X_train = X_train.reshape([X_train.shape[0], n_features]) / X_max
X_test = X_test.reshape([X_test.shape[0], n_features]) / X_max
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=list(range(len(X_train))),
validation_indices=[],
test_indices=list(range(len(X_train), len(X))),
inner_cv=None,
outer_cv=None,
DESCR=name,
) | 0.914582 | 0.451689 |
from __future__ import annotations
import os
from typing import Final, Literal, Sequence, Tuple, overload
import numpy as np
import scipy as sp
from sklearn.datasets import load_svmlight_file, load_svmlight_files
from sklearn.model_selection import PredefinedSplit
from sklearn.utils import Bunch
from .base import DatasetNotFoundError, fetch_file
BASE_URL: Final = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets"
COLLECTIONS: Final = frozenset(
(
"binary",
"multiclass",
"regression",
"string",
)
)
def _fetch_partition(
collection: str,
name: str,
partition: str,
data_home: str | None = None,
) -> str | None:
"""Fetch dataset partition."""
subfolder = os.path.join("libsvm", collection)
dataname = name.replace("/", "-")
url = f"{BASE_URL}/{collection}/{name}{partition}"
for data_url in (f"{url}.bz2", url):
try:
return os.fspath(
fetch_file(
dataname,
urlname=data_url,
subfolder=subfolder,
data_home=data_home,
),
)
except DatasetNotFoundError:
pass
return None
def _load(
collection: str,
name: str,
data_home: str | None = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[int | float],
Sequence[int],
Sequence[int],
Sequence[int],
PredefinedSplit,
]:
"""Load dataset."""
filename = _fetch_partition(collection, name, "", data_home)
filename_tr = _fetch_partition(collection, name, ".tr", data_home)
filename_val = _fetch_partition(collection, name, ".val", data_home)
filename_t = _fetch_partition(collection, name, ".t", data_home)
filename_r = _fetch_partition(collection, name, ".r", data_home)
if (
(filename_tr is not None)
and (filename_val is not None)
and (filename_t is not None)
):
_, _, X_tr, y_tr, X_val, y_val, X_test, y_test = load_svmlight_files(
[
filename,
filename_tr,
filename_val,
filename_t,
]
)
cv = PredefinedSplit([-1] * X_tr.shape[0] + [0] * X_val.shape[0])
X = sp.sparse.vstack((X_tr, X_val, X_test))
y = np.hstack((y_tr, y_val, y_test))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = list(
range(
X_tr.shape[0],
X_tr.shape[0] + X_val.shape[0],
)
)
test_indices = list(range(X_tr.shape[0] + X_val.shape[0], X.shape[0]))
elif (filename_tr is not None) and (filename_val is not None):
_, _, X_tr, y_tr, X_val, y_val = load_svmlight_files(
[
filename,
filename_tr,
filename_val,
]
)
cv = PredefinedSplit([-1] * X_tr.shape[0] + [0] * X_val.shape[0])
X = sp.sparse.vstack((X_tr, X_val))
y = np.hstack((y_tr, y_val))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = list(range(X_tr.shape[0], X.shape[0]))
test_indices = []
elif (filename_t is not None) and (filename_r is not None):
X_tr, y_tr, X_test, y_test, X_remaining, y_remaining = load_svmlight_files(
[
filename,
filename_t,
filename_r,
]
)
X = sp.sparse.vstack((X_tr, X_test, X_remaining))
y = np.hstack((y_tr, y_test, y_remaining))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = []
test_indices = list(
range(
X_tr.shape[0],
X_tr.shape[0] + X_test.shape[0],
),
)
cv = None
elif filename_t is not None:
X_tr, y_tr, X_test, y_test = load_svmlight_files(
[
filename,
filename_t,
]
)
X = sp.sparse.vstack((X_tr, X_test))
y = np.hstack((y_tr, y_test))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = []
test_indices = list(range(X_tr.shape[0], X.shape[0]))
cv = None
else:
X, y = load_svmlight_file(filename)
# Compute indices
train_indices = []
validation_indices = []
test_indices = []
cv = None
return X, y, train_indices, validation_indices, test_indices, cv
@overload
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int | float]]:
pass
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: bool = False,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[int | float]]:
"""
Fetch LIBSVM dataset.
Fetch a LIBSVM dataset by collection and name. More info at
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets.
Parameters
----------
collection : string
Collection name.
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if collection not in COLLECTIONS:
raise Exception("Avaliable collections are " + str(list(COLLECTIONS)))
X, y, train_indices, validation_indices, test_indices, cv = _load(
collection,
name,
data_home=data_home,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=train_indices,
validation_indices=validation_indices,
test_indices=test_indices,
inner_cv=cv,
outer_cv=None,
DESCR=name,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/libsvm.py | libsvm.py | from __future__ import annotations
import os
from typing import Final, Literal, Sequence, Tuple, overload
import numpy as np
import scipy as sp
from sklearn.datasets import load_svmlight_file, load_svmlight_files
from sklearn.model_selection import PredefinedSplit
from sklearn.utils import Bunch
from .base import DatasetNotFoundError, fetch_file
BASE_URL: Final = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets"
COLLECTIONS: Final = frozenset(
(
"binary",
"multiclass",
"regression",
"string",
)
)
def _fetch_partition(
collection: str,
name: str,
partition: str,
data_home: str | None = None,
) -> str | None:
"""Fetch dataset partition."""
subfolder = os.path.join("libsvm", collection)
dataname = name.replace("/", "-")
url = f"{BASE_URL}/{collection}/{name}{partition}"
for data_url in (f"{url}.bz2", url):
try:
return os.fspath(
fetch_file(
dataname,
urlname=data_url,
subfolder=subfolder,
data_home=data_home,
),
)
except DatasetNotFoundError:
pass
return None
def _load(
collection: str,
name: str,
data_home: str | None = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[int | float],
Sequence[int],
Sequence[int],
Sequence[int],
PredefinedSplit,
]:
"""Load dataset."""
filename = _fetch_partition(collection, name, "", data_home)
filename_tr = _fetch_partition(collection, name, ".tr", data_home)
filename_val = _fetch_partition(collection, name, ".val", data_home)
filename_t = _fetch_partition(collection, name, ".t", data_home)
filename_r = _fetch_partition(collection, name, ".r", data_home)
if (
(filename_tr is not None)
and (filename_val is not None)
and (filename_t is not None)
):
_, _, X_tr, y_tr, X_val, y_val, X_test, y_test = load_svmlight_files(
[
filename,
filename_tr,
filename_val,
filename_t,
]
)
cv = PredefinedSplit([-1] * X_tr.shape[0] + [0] * X_val.shape[0])
X = sp.sparse.vstack((X_tr, X_val, X_test))
y = np.hstack((y_tr, y_val, y_test))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = list(
range(
X_tr.shape[0],
X_tr.shape[0] + X_val.shape[0],
)
)
test_indices = list(range(X_tr.shape[0] + X_val.shape[0], X.shape[0]))
elif (filename_tr is not None) and (filename_val is not None):
_, _, X_tr, y_tr, X_val, y_val = load_svmlight_files(
[
filename,
filename_tr,
filename_val,
]
)
cv = PredefinedSplit([-1] * X_tr.shape[0] + [0] * X_val.shape[0])
X = sp.sparse.vstack((X_tr, X_val))
y = np.hstack((y_tr, y_val))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = list(range(X_tr.shape[0], X.shape[0]))
test_indices = []
elif (filename_t is not None) and (filename_r is not None):
X_tr, y_tr, X_test, y_test, X_remaining, y_remaining = load_svmlight_files(
[
filename,
filename_t,
filename_r,
]
)
X = sp.sparse.vstack((X_tr, X_test, X_remaining))
y = np.hstack((y_tr, y_test, y_remaining))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = []
test_indices = list(
range(
X_tr.shape[0],
X_tr.shape[0] + X_test.shape[0],
),
)
cv = None
elif filename_t is not None:
X_tr, y_tr, X_test, y_test = load_svmlight_files(
[
filename,
filename_t,
]
)
X = sp.sparse.vstack((X_tr, X_test))
y = np.hstack((y_tr, y_test))
# Compute indices
train_indices = list(range(X_tr.shape[0]))
validation_indices = []
test_indices = list(range(X_tr.shape[0], X.shape[0]))
cv = None
else:
X, y = load_svmlight_file(filename)
# Compute indices
train_indices = []
validation_indices = []
test_indices = []
cv = None
return X, y, train_indices, validation_indices, test_indices, cv
@overload
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int | float]]:
pass
def fetch(
collection: str,
name: str,
*,
data_home: str | None = None,
return_X_y: bool = False,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[int | float]]:
"""
Fetch LIBSVM dataset.
Fetch a LIBSVM dataset by collection and name. More info at
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets.
Parameters
----------
collection : string
Collection name.
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if collection not in COLLECTIONS:
raise Exception("Avaliable collections are " + str(list(COLLECTIONS)))
X, y, train_indices, validation_indices, test_indices, cv = _load(
collection,
name,
data_home=data_home,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=train_indices,
validation_indices=validation_indices,
test_indices=test_indices,
inner_cv=cv,
outer_cv=None,
DESCR=name,
) | 0.873134 | 0.375392 |
from __future__ import annotations
import io
import os
from pathlib import Path
from types import MappingProxyType
from typing import (
AbstractSet,
Any,
Final,
Iterator,
Literal,
Optional,
Sequence,
Tuple,
Union,
overload,
)
from zipfile import ZipFile
import numpy as np
import pandas as pd
from sklearn.utils import Bunch
from .base import fetch_file
BASE_URL = "http://sci2s.ugr.es/keel"
COLLECTIONS: Final = frozenset(
(
"classification",
"missing",
"imbalanced",
"multiInstance",
"multilabel",
"textClassification",
"classNoise",
"attributeNoise",
"semisupervised",
"regression",
"timeseries",
"unsupervised",
"lowQuality",
)
)
# WTFs
IMBALANCED_URLS: Final = (
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3",
"dataset/data/imbalanced",
"keel-dataset/datasets/imbalanced/imb_noisyBordExamples",
"keel-dataset/datasets/imbalanced/preprocessed",
)
IRREGULAR_DESCR_IMBALANCED_URLS: Final = (
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3",
)
INCORRECT_DESCR_IMBALANCED_URLS: Final = MappingProxyType(
{"semisupervised": "classification"},
)
class KeelOuterCV(object):
"""Iterable over already separated CV partitions of the dataset."""
def __init__(
self,
Xs: Sequence[np.typing.NDArray[float]],
ys: Sequence[np.typing.NDArray[Union[int, float]]],
Xs_test: Sequence[np.typing.NDArray[float]],
ys_test: Sequence[np.typing.NDArray[Union[int, float]]],
) -> None:
self.Xs = Xs
self.ys = ys
self.Xs_test = Xs_test
self.ys_test = ys_test
def __iter__(
self,
) -> Iterator[
Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
]
]:
return zip(self.Xs, self.ys, self.Xs_test, self.ys_test)
def _load_Xy(
zipfile: Path,
csvfile: str,
sep: str = ",",
header: Optional[int] = None,
engine: str = "python",
na_values: AbstractSet[str] = frozenset(("?")),
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
"""Load a zipped csv file with target in the last column."""
with ZipFile(zipfile) as z:
with z.open(csvfile) as c:
s = io.StringIO(c.read().decode(encoding="utf8"))
data = pd.read_csv(
s,
sep=sep,
header=header,
engine=engine,
na_values=na_values,
**kwargs,
)
data.columns = data.columns.astype(str)
X = pd.get_dummies(data.iloc[:, :-1])
y = pd.factorize(data.iloc[:, -1].tolist(), sort=True)[0]
return X, y
def _load_descr(
collection: str,
name: str,
data_home: Optional[str] = None,
) -> Tuple[int, str]:
"""Load a dataset description."""
subfolder = os.path.join("keel", collection)
filename = name + "-names.txt"
if collection == "imbalanced":
for url in IMBALANCED_URLS:
if url in IRREGULAR_DESCR_IMBALANCED_URLS:
url = BASE_URL + "/" + url + "/" + "names" + "/" + filename
else:
url = BASE_URL + "/" + url + "/" + filename
try:
f = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
break
except Exception:
pass
else:
collection = (
INCORRECT_DESCR_IMBALANCED_URLS[collection]
if collection in INCORRECT_DESCR_IMBALANCED_URLS
else collection
)
url = f"{BASE_URL}/dataset/data/{collection}/{filename}"
f = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
with open(f) as rst_file:
fdescr = rst_file.read()
nattrs = fdescr.count("@attribute")
return nattrs, fdescr
def _fetch_keel_zip(
collection: str,
name: str,
filename: str,
data_home: Optional[str] = None,
) -> Path:
"""Fetch Keel dataset zip file."""
subfolder = os.path.join("keel", collection)
if collection == "imbalanced":
for url in IMBALANCED_URLS:
url = BASE_URL + "/" + url + "/" + filename
try:
return fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
except Exception:
pass
else:
url = f"{BASE_URL}/dataset/data/{collection}/{filename}"
return fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
raise ValueError("Dataset not found")
def _load_folds(
collection: str,
name: str,
nfolds: Literal[None, 1, 5, 10],
dobscv: bool,
nattrs: int,
data_home: Optional[str] = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
Optional[KeelOuterCV],
]:
"""Load a dataset folds."""
filename = name + ".zip"
f = _fetch_keel_zip(collection, name, filename, data_home=data_home)
X, y = _load_Xy(f, name + ".dat", skiprows=nattrs + 4)
cv = None
if nfolds in (5, 10):
fold = "dobscv" if dobscv else "fold"
filename = name + "-" + str(nfolds) + "-" + fold + ".zip"
f = _fetch_keel_zip(collection, name, filename, data_home=data_home)
Xs = []
ys = []
Xs_test = []
ys_test = []
for i in range(nfolds):
if dobscv:
# Zipfiles always use fordward slashes, even in Windows.
_name = f"{name}/{name}-{nfolds}dobscv-{i + 1}"
else:
_name = f"{name}-{nfolds}-{i + 1}"
X_fold, y_fold = _load_Xy(f, _name + "tra.dat", skiprows=nattrs + 4)
X_test_fold, y_test_fold = _load_Xy(
f,
_name + "tst.dat",
skiprows=nattrs + 4,
)
Xs.append(X_fold)
ys.append(y_fold)
Xs_test.append(X_test_fold)
ys_test.append(y_test_fold)
cv = KeelOuterCV(Xs, ys, Xs_test, ys_test)
return X, y, cv
@overload
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
pass
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: bool = False,
) -> Union[
Bunch,
Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]],
]:
"""
Fetch Keel dataset.
Fetch a Keel dataset by collection and name. More info at
http://sci2s.ugr.es/keel.
Parameters
----------
collection : string
Collection name.
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
nfolds : int, default=None
Number of folds. Depending on the dataset, valid values are
{None, 1, 5, 10}.
dobscv : bool, default=False
If folds are in {5, 10}, indicates that the cv folds are distribution
optimally balanced stratified. Only available for some datasets.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
kwargs : dict
Optional key-value arguments
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if collection not in COLLECTIONS:
raise ValueError("Avaliable collections are " + str(list(COLLECTIONS)))
nattrs, DESCR = _load_descr(collection, name, data_home=data_home)
X, y, cv = _load_folds(
collection,
name,
nfolds,
dobscv,
nattrs,
data_home=data_home,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=cv,
DESCR=DESCR,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/keel.py | keel.py | from __future__ import annotations
import io
import os
from pathlib import Path
from types import MappingProxyType
from typing import (
AbstractSet,
Any,
Final,
Iterator,
Literal,
Optional,
Sequence,
Tuple,
Union,
overload,
)
from zipfile import ZipFile
import numpy as np
import pandas as pd
from sklearn.utils import Bunch
from .base import fetch_file
BASE_URL = "http://sci2s.ugr.es/keel"
COLLECTIONS: Final = frozenset(
(
"classification",
"missing",
"imbalanced",
"multiInstance",
"multilabel",
"textClassification",
"classNoise",
"attributeNoise",
"semisupervised",
"regression",
"timeseries",
"unsupervised",
"lowQuality",
)
)
# WTFs
IMBALANCED_URLS: Final = (
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3",
"dataset/data/imbalanced",
"keel-dataset/datasets/imbalanced/imb_noisyBordExamples",
"keel-dataset/datasets/imbalanced/preprocessed",
)
IRREGULAR_DESCR_IMBALANCED_URLS: Final = (
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p1",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p2",
"keel-dataset/datasets/imbalanced/imb_IRhigherThan9p3",
)
INCORRECT_DESCR_IMBALANCED_URLS: Final = MappingProxyType(
{"semisupervised": "classification"},
)
class KeelOuterCV(object):
"""Iterable over already separated CV partitions of the dataset."""
def __init__(
self,
Xs: Sequence[np.typing.NDArray[float]],
ys: Sequence[np.typing.NDArray[Union[int, float]]],
Xs_test: Sequence[np.typing.NDArray[float]],
ys_test: Sequence[np.typing.NDArray[Union[int, float]]],
) -> None:
self.Xs = Xs
self.ys = ys
self.Xs_test = Xs_test
self.ys_test = ys_test
def __iter__(
self,
) -> Iterator[
Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
]
]:
return zip(self.Xs, self.ys, self.Xs_test, self.ys_test)
def _load_Xy(
zipfile: Path,
csvfile: str,
sep: str = ",",
header: Optional[int] = None,
engine: str = "python",
na_values: AbstractSet[str] = frozenset(("?")),
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
"""Load a zipped csv file with target in the last column."""
with ZipFile(zipfile) as z:
with z.open(csvfile) as c:
s = io.StringIO(c.read().decode(encoding="utf8"))
data = pd.read_csv(
s,
sep=sep,
header=header,
engine=engine,
na_values=na_values,
**kwargs,
)
data.columns = data.columns.astype(str)
X = pd.get_dummies(data.iloc[:, :-1])
y = pd.factorize(data.iloc[:, -1].tolist(), sort=True)[0]
return X, y
def _load_descr(
collection: str,
name: str,
data_home: Optional[str] = None,
) -> Tuple[int, str]:
"""Load a dataset description."""
subfolder = os.path.join("keel", collection)
filename = name + "-names.txt"
if collection == "imbalanced":
for url in IMBALANCED_URLS:
if url in IRREGULAR_DESCR_IMBALANCED_URLS:
url = BASE_URL + "/" + url + "/" + "names" + "/" + filename
else:
url = BASE_URL + "/" + url + "/" + filename
try:
f = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
break
except Exception:
pass
else:
collection = (
INCORRECT_DESCR_IMBALANCED_URLS[collection]
if collection in INCORRECT_DESCR_IMBALANCED_URLS
else collection
)
url = f"{BASE_URL}/dataset/data/{collection}/{filename}"
f = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
with open(f) as rst_file:
fdescr = rst_file.read()
nattrs = fdescr.count("@attribute")
return nattrs, fdescr
def _fetch_keel_zip(
collection: str,
name: str,
filename: str,
data_home: Optional[str] = None,
) -> Path:
"""Fetch Keel dataset zip file."""
subfolder = os.path.join("keel", collection)
if collection == "imbalanced":
for url in IMBALANCED_URLS:
url = BASE_URL + "/" + url + "/" + filename
try:
return fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
except Exception:
pass
else:
url = f"{BASE_URL}/dataset/data/{collection}/{filename}"
return fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
raise ValueError("Dataset not found")
def _load_folds(
collection: str,
name: str,
nfolds: Literal[None, 1, 5, 10],
dobscv: bool,
nattrs: int,
data_home: Optional[str] = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
Optional[KeelOuterCV],
]:
"""Load a dataset folds."""
filename = name + ".zip"
f = _fetch_keel_zip(collection, name, filename, data_home=data_home)
X, y = _load_Xy(f, name + ".dat", skiprows=nattrs + 4)
cv = None
if nfolds in (5, 10):
fold = "dobscv" if dobscv else "fold"
filename = name + "-" + str(nfolds) + "-" + fold + ".zip"
f = _fetch_keel_zip(collection, name, filename, data_home=data_home)
Xs = []
ys = []
Xs_test = []
ys_test = []
for i in range(nfolds):
if dobscv:
# Zipfiles always use fordward slashes, even in Windows.
_name = f"{name}/{name}-{nfolds}dobscv-{i + 1}"
else:
_name = f"{name}-{nfolds}-{i + 1}"
X_fold, y_fold = _load_Xy(f, _name + "tra.dat", skiprows=nattrs + 4)
X_test_fold, y_test_fold = _load_Xy(
f,
_name + "tst.dat",
skiprows=nattrs + 4,
)
Xs.append(X_fold)
ys.append(y_fold)
Xs_test.append(X_test_fold)
ys_test.append(y_test_fold)
cv = KeelOuterCV(Xs, ys, Xs_test, ys_test)
return X, y, cv
@overload
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
pass
def fetch(
collection: str,
name: str,
data_home: Optional[str] = None,
nfolds: Literal[None, 1, 5, 10] = None,
dobscv: bool = False,
*,
return_X_y: bool = False,
) -> Union[
Bunch,
Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]],
]:
"""
Fetch Keel dataset.
Fetch a Keel dataset by collection and name. More info at
http://sci2s.ugr.es/keel.
Parameters
----------
collection : string
Collection name.
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
nfolds : int, default=None
Number of folds. Depending on the dataset, valid values are
{None, 1, 5, 10}.
dobscv : bool, default=False
If folds are in {5, 10}, indicates that the cv folds are distribution
optimally balanced stratified. Only available for some datasets.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
kwargs : dict
Optional key-value arguments
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if collection not in COLLECTIONS:
raise ValueError("Avaliable collections are " + str(list(COLLECTIONS)))
nattrs, DESCR = _load_descr(collection, name, data_home=data_home)
X, y, cv = _load_folds(
collection,
name,
nfolds,
dobscv,
nattrs,
data_home=data_home,
)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=cv,
DESCR=DESCR,
) | 0.714827 | 0.330714 |
import numpy as np
from sklearn.utils import Bunch
from .base import fetch_zip
DESCR = """
The AneuRisk data set is based on a set of three-dimensional angiographic
images taken from 65 subjects, hospitalized at Niguarda Ca’ Granda
Hospital (Milan), who were suspected of being affected by cerebral aneurysms.
Out of these 65 subjects, 33 subjects have an aneurysm at or after the
terminal bifurcation of the ICA (“Upper” group), 25 subjects have an aneurysm
along the ICA (“Lower” group), and 7 subjects were not found any visible
aneurysm during the angiography (“No-aneurysm” group).
For more information see:
http://ecm2.mathcs.emory.edu/aneuriskdata/files/ReadMe_AneuRisk-website_2012-05.pdf
"""
def fetch(name="Aneurisk65", *, data_home=None, return_X_y=False):
if name != "Aneurisk65":
raise ValueError(f"Unknown dataset {name}")
n_samples = 65
url = (
"http://ecm2.mathcs.emory.edu/aneuriskdata/files/Carotid-data_MBI_workshop.zip"
)
dataset_path = fetch_zip(
dataname=name,
urlname=url,
subfolder="aneurisk",
data_home=data_home,
)
patient_dtype = [
("patient", np.int_),
("code", "U8"),
("type", "U1"),
("aneurysm location", np.float_),
("left_right", "U2"),
]
functions_dtype = [
("curvilinear abscissa", np.object_),
("MISR", np.object_),
("X0 observed", np.object_),
("Y0 observed", np.object_),
("Z0 observed", np.object_),
("X0 observed FKS", np.object_),
("Y0 observed FKS", np.object_),
("Z0 observed FKS", np.object_),
("X0 observed FKS reflected", np.object_),
("X1 observed FKS", np.object_),
("Y1 observed FKS", np.object_),
("Z1 observed FKS", np.object_),
("X1 observed FKS reflected", np.object_),
("X2 observed FKS", np.object_),
("Y2 observed FKS", np.object_),
("Z2 observed FKS", np.object_),
("X2 observed FKS reflected", np.object_),
("Curvature FKS", np.object_),
]
complete_dtype = patient_dtype + functions_dtype
X = np.zeros(shape=n_samples, dtype=complete_dtype)
X[[p[0] for p in patient_dtype]] = np.genfromtxt(
dataset_path / "Patients.txt",
dtype=patient_dtype,
skip_header=1,
missing_values=("NA",),
)
for i in range(n_samples):
file = f"Rawdata_FKS_{i + 1}.txt"
functions = np.genfromtxt(
dataset_path / file,
skip_header=1,
)
for j, (f_name, _) in enumerate(functions_dtype):
X[i][f_name] = functions[:, j]
X = np.array(X.tolist(), dtype=np.object_)
if return_X_y:
return X, None
return Bunch(
data=X,
target=None,
train_indices=[],
validation_indices=[],
test_indices=[],
name=name,
DESCR=DESCR,
feature_names=[t[0] for t in complete_dtype],
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/aneurisk.py | aneurisk.py |
import numpy as np
from sklearn.utils import Bunch
from .base import fetch_zip
DESCR = """
The AneuRisk data set is based on a set of three-dimensional angiographic
images taken from 65 subjects, hospitalized at Niguarda Ca’ Granda
Hospital (Milan), who were suspected of being affected by cerebral aneurysms.
Out of these 65 subjects, 33 subjects have an aneurysm at or after the
terminal bifurcation of the ICA (“Upper” group), 25 subjects have an aneurysm
along the ICA (“Lower” group), and 7 subjects were not found any visible
aneurysm during the angiography (“No-aneurysm” group).
For more information see:
http://ecm2.mathcs.emory.edu/aneuriskdata/files/ReadMe_AneuRisk-website_2012-05.pdf
"""
def fetch(name="Aneurisk65", *, data_home=None, return_X_y=False):
if name != "Aneurisk65":
raise ValueError(f"Unknown dataset {name}")
n_samples = 65
url = (
"http://ecm2.mathcs.emory.edu/aneuriskdata/files/Carotid-data_MBI_workshop.zip"
)
dataset_path = fetch_zip(
dataname=name,
urlname=url,
subfolder="aneurisk",
data_home=data_home,
)
patient_dtype = [
("patient", np.int_),
("code", "U8"),
("type", "U1"),
("aneurysm location", np.float_),
("left_right", "U2"),
]
functions_dtype = [
("curvilinear abscissa", np.object_),
("MISR", np.object_),
("X0 observed", np.object_),
("Y0 observed", np.object_),
("Z0 observed", np.object_),
("X0 observed FKS", np.object_),
("Y0 observed FKS", np.object_),
("Z0 observed FKS", np.object_),
("X0 observed FKS reflected", np.object_),
("X1 observed FKS", np.object_),
("Y1 observed FKS", np.object_),
("Z1 observed FKS", np.object_),
("X1 observed FKS reflected", np.object_),
("X2 observed FKS", np.object_),
("Y2 observed FKS", np.object_),
("Z2 observed FKS", np.object_),
("X2 observed FKS reflected", np.object_),
("Curvature FKS", np.object_),
]
complete_dtype = patient_dtype + functions_dtype
X = np.zeros(shape=n_samples, dtype=complete_dtype)
X[[p[0] for p in patient_dtype]] = np.genfromtxt(
dataset_path / "Patients.txt",
dtype=patient_dtype,
skip_header=1,
missing_values=("NA",),
)
for i in range(n_samples):
file = f"Rawdata_FKS_{i + 1}.txt"
functions = np.genfromtxt(
dataset_path / file,
skip_header=1,
)
for j, (f_name, _) in enumerate(functions_dtype):
X[i][f_name] = functions[:, j]
X = np.array(X.tolist(), dtype=np.object_)
if return_X_y:
return X, None
return Bunch(
data=X,
target=None,
train_indices=[],
validation_indices=[],
test_indices=[],
name=name,
DESCR=DESCR,
feature_names=[t[0] for t in complete_dtype],
) | 0.645232 | 0.555857 |
import time
from datetime import date, timedelta
import numpy as np
from sklearn.utils import Bunch
from forex_python.bitcoin import BtcConverter
from forex_python.converter import CurrencyRates
def _fetch(get_rate, start=date(2015, 1, 1), end=date.today()):
"""Fetch dataset."""
data = []
delta = end - start
for d in range(delta.days + 1):
day = start + timedelta(days=d)
rate = get_rate(day)
data.append(rate)
return np.asarray(data).reshape((-1, 1))
def _load_bitcoin(start=date(2015, 1, 1), end=date.today(), currency="EUR"):
"""Load bitcoin dataset"""
btcc = BtcConverter()
def get_rate(day):
return btcc.get_previous_price(currency, day)
return _fetch(get_rate, start=start, end=end)
def _load_forex(
start=date(2015, 1, 1), end=date.today(), currency_1="USD", currency_2="EUR"
):
"""Load forex dataset."""
cr = CurrencyRates()
def get_rate(day):
time.sleep(0.1)
return cr.get_rate(currency_1, currency_2, day)
return _fetch(get_rate, start=start, end=end)
def fetch(
start=date(2015, 1, 1),
end=date.today(),
currency_1="USD",
currency_2="EUR",
return_X_y=False,
):
"""Fetch Forex datasets.
Fetches the ECB Forex and Coindesk Bitcoin datasets. More info at
http://forex-python.readthedocs.io.
Parameters
----------
start : date, default=2015-01-01
Initial date.
end : date, default=today
Final date.
currency_1 : str, default='USD'
Currency 1.
currency_2 : str, default='EUR'
Currency 2.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if currency_1 == "BTC":
X = _load_bitcoin(start=start, end=end, currency=currency_2)
descr = "BTC-" + str(currency_2)
elif currency_2 == "BTC":
X = _load_bitcoin(start=start, end=end, currency=currency_1)
descr = "BTC-" + str(currency_1)
else:
X = _load_forex(
start=start, end=end, currency_1=currency_1, currency_2=currency_2
)
descr = str(currency_1) + "-" + str(currency_2)
descr = descr + start.strftime("%Y-%m-%d") + "-" + end.strftime("%Y-%m-%d")
if return_X_y:
return X, None
return Bunch(
data=X,
target=None,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=None,
DESCR=descr,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/forex.py | forex.py | import time
from datetime import date, timedelta
import numpy as np
from sklearn.utils import Bunch
from forex_python.bitcoin import BtcConverter
from forex_python.converter import CurrencyRates
def _fetch(get_rate, start=date(2015, 1, 1), end=date.today()):
"""Fetch dataset."""
data = []
delta = end - start
for d in range(delta.days + 1):
day = start + timedelta(days=d)
rate = get_rate(day)
data.append(rate)
return np.asarray(data).reshape((-1, 1))
def _load_bitcoin(start=date(2015, 1, 1), end=date.today(), currency="EUR"):
"""Load bitcoin dataset"""
btcc = BtcConverter()
def get_rate(day):
return btcc.get_previous_price(currency, day)
return _fetch(get_rate, start=start, end=end)
def _load_forex(
start=date(2015, 1, 1), end=date.today(), currency_1="USD", currency_2="EUR"
):
"""Load forex dataset."""
cr = CurrencyRates()
def get_rate(day):
time.sleep(0.1)
return cr.get_rate(currency_1, currency_2, day)
return _fetch(get_rate, start=start, end=end)
def fetch(
start=date(2015, 1, 1),
end=date.today(),
currency_1="USD",
currency_2="EUR",
return_X_y=False,
):
"""Fetch Forex datasets.
Fetches the ECB Forex and Coindesk Bitcoin datasets. More info at
http://forex-python.readthedocs.io.
Parameters
----------
start : date, default=2015-01-01
Initial date.
end : date, default=today
Final date.
currency_1 : str, default='USD'
Currency 1.
currency_2 : str, default='EUR'
Currency 2.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if currency_1 == "BTC":
X = _load_bitcoin(start=start, end=end, currency=currency_2)
descr = "BTC-" + str(currency_2)
elif currency_2 == "BTC":
X = _load_bitcoin(start=start, end=end, currency=currency_1)
descr = "BTC-" + str(currency_1)
else:
X = _load_forex(
start=start, end=end, currency_1=currency_1, currency_2=currency_2
)
descr = str(currency_1) + "-" + str(currency_2)
descr = descr + start.strftime("%Y-%m-%d") + "-" + end.strftime("%Y-%m-%d")
if return_X_y:
return X, None
return Bunch(
data=X,
target=None,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=None,
DESCR=descr,
) | 0.883538 | 0.363195 |
from __future__ import annotations
import ast
import math
import re
import urllib
from html.parser import HTMLParser
from pathlib import Path
from typing import (
Any,
Final,
List,
Literal,
Mapping,
Sequence,
Tuple,
overload,
)
import numpy as np
import pandas as pd
import wfdb.io
from sklearn.utils import Bunch
from skdatasets.repositories.base import dataset_from_dataframe
from .base import DatasetNotFoundError, fetch_zip
BASE_URL: Final = "https://physionet.org/static/published-projects"
INFO_STRING_SEMICOLONS_ONE_STR: Final = r"(\S*): (\S*)\s*"
INFO_STRING_SEMICOLONS_SEVERAL_REGEX: Final = re.compile(
rf"(?:{INFO_STRING_SEMICOLONS_ONE_STR})+",
)
INFO_STRING_SEMICOLONS_ONE_REGEX: Final = re.compile(
INFO_STRING_SEMICOLONS_ONE_STR,
)
class _ZipNameHTMLParser(HTMLParser):
"""Class for parsing the zip name in PhysioNet directory listing."""
def __init__(self, *, convert_charrefs: bool = True) -> None:
super().__init__(convert_charrefs=convert_charrefs)
self.zip_name: str | None = None
def handle_starttag(
self,
tag: str,
attrs: List[Tuple[str, str | None]],
) -> None:
if tag == "a":
for attr in attrs:
if attr[0] == "href" and attr[1] and attr[1].endswith(".zip"):
self.zip_name = attr[1]
def _get_zip_name_online(dataset_name: str) -> str:
"""Get the zip name of the dataset."""
parser = _ZipNameHTMLParser()
url_request = urllib.request.Request(url=f"{BASE_URL}/{dataset_name}")
try:
with urllib.request.urlopen(url_request) as url_file:
url_content = url_file.read().decode("utf-8")
except urllib.error.HTTPError as e:
if e.code == 404:
raise DatasetNotFoundError(dataset_name) from e
raise
parser.feed(url_content)
if parser.zip_name is None:
raise ValueError(f"No zip file found for dataset '{dataset_name}'")
return parser.zip_name
def _parse_info_string_value(value: str) -> Any:
if value.lower() == "nan":
return math.nan
try:
value = ast.literal_eval(value)
except Exception:
pass
return value
def _get_info_strings(comments: Sequence[str]) -> Mapping[str, Any]:
info_strings_semicolons = {}
info_strings_spaces = {}
for comment in comments:
if comment[0] not in {"-", "#"}:
if re.fullmatch(INFO_STRING_SEMICOLONS_SEVERAL_REGEX, comment):
for result in re.finditer(
INFO_STRING_SEMICOLONS_ONE_REGEX,
comment,
):
key = result.group(1)
if key[0] == "<" and key[-1] == ">":
key = key[1:-1]
info_strings_semicolons[key] = _parse_info_string_value(
result.group(2)
)
else:
split = comment.rsplit(maxsplit=1)
if len(split) == 2:
key, value = split
info_strings_spaces[key] = _parse_info_string_value(value)
if info_strings_semicolons:
return info_strings_semicolons
# Check for absurd things in spaces
if len(info_strings_spaces) == 1 or any(
key.count(" ") > 3 for key in info_strings_spaces
):
return {}
return info_strings_spaces
def _join_info_dicts(
dicts: Sequence[Mapping[str, Any]],
) -> Mapping[str, np.typing.NDArray[Any]]:
joined = {}
n_keys = len(dicts[0])
if not all(len(d) == n_keys for d in dicts):
return {}
for key in dicts[0]:
joined[key] = np.array([d[key] for d in dicts])
return joined
def _constant_attrs(register: wfdb.Record) -> Sequence[Any]:
return (register.n_sig, register.sig_name, register.units, register.fs)
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[False] = False,
as_frame: bool = False,
target_column: str | Sequence[str] | None = None,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[False] = False,
target_column: None = None,
) -> Tuple[np.typing.NDArray[Any], None]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[False] = False,
target_column: str | Sequence[str],
) -> Tuple[np.typing.NDArray[Any], np.typing.NDArray[Any]]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: None = None,
) -> Tuple[pd.DataFrame, None]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: str,
) -> Tuple[pd.DataFrame, pd.Series]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: Sequence[str],
) -> Tuple[pd.DataFrame, pd.DataFrame]:
pass
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: bool = False,
as_frame: bool = False,
target_column: str | Sequence[str] | None = None,
) -> (
Bunch
| Tuple[np.typing.NDArray[Any], np.typing.NDArray[Any] | None]
| Tuple[pd.DataFrame, pd.Series | pd.DataFrame | None]
):
zip_name = _get_zip_name_online(name)
path = fetch_zip(
dataname=name,
urlname=f"{BASE_URL}/{name}/{zip_name}",
subfolder="physionet",
data_home=data_home,
)
subpath = path / Path(zip_name).stem
if subpath.exists():
path = subpath
with open(path / "RECORDS") as records_file:
records = [
wfdb.io.rdrecord(str(path / record_name.rstrip("\n")))
for record_name in records_file
]
info_strings = [_get_info_strings(r.comments) for r in records]
info = _join_info_dicts(info_strings)
assert all(_constant_attrs(r) == _constant_attrs(records[0]) for r in records)
data = {
"signal": [r.p_signal for r in records],
}
dataframe = pd.DataFrame(
{**info, **data},
index=[r.record_name for r in records],
)
dataframe["signal"].attrs.update(
sig_name=records[0].sig_name,
units=records[0].units,
fs=records[0].fs,
)
return dataset_from_dataframe(
dataframe,
return_X_y=return_X_y,
as_frame=as_frame,
target_column=target_column,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/physionet.py | physionet.py | from __future__ import annotations
import ast
import math
import re
import urllib
from html.parser import HTMLParser
from pathlib import Path
from typing import (
Any,
Final,
List,
Literal,
Mapping,
Sequence,
Tuple,
overload,
)
import numpy as np
import pandas as pd
import wfdb.io
from sklearn.utils import Bunch
from skdatasets.repositories.base import dataset_from_dataframe
from .base import DatasetNotFoundError, fetch_zip
BASE_URL: Final = "https://physionet.org/static/published-projects"
INFO_STRING_SEMICOLONS_ONE_STR: Final = r"(\S*): (\S*)\s*"
INFO_STRING_SEMICOLONS_SEVERAL_REGEX: Final = re.compile(
rf"(?:{INFO_STRING_SEMICOLONS_ONE_STR})+",
)
INFO_STRING_SEMICOLONS_ONE_REGEX: Final = re.compile(
INFO_STRING_SEMICOLONS_ONE_STR,
)
class _ZipNameHTMLParser(HTMLParser):
"""Class for parsing the zip name in PhysioNet directory listing."""
def __init__(self, *, convert_charrefs: bool = True) -> None:
super().__init__(convert_charrefs=convert_charrefs)
self.zip_name: str | None = None
def handle_starttag(
self,
tag: str,
attrs: List[Tuple[str, str | None]],
) -> None:
if tag == "a":
for attr in attrs:
if attr[0] == "href" and attr[1] and attr[1].endswith(".zip"):
self.zip_name = attr[1]
def _get_zip_name_online(dataset_name: str) -> str:
"""Get the zip name of the dataset."""
parser = _ZipNameHTMLParser()
url_request = urllib.request.Request(url=f"{BASE_URL}/{dataset_name}")
try:
with urllib.request.urlopen(url_request) as url_file:
url_content = url_file.read().decode("utf-8")
except urllib.error.HTTPError as e:
if e.code == 404:
raise DatasetNotFoundError(dataset_name) from e
raise
parser.feed(url_content)
if parser.zip_name is None:
raise ValueError(f"No zip file found for dataset '{dataset_name}'")
return parser.zip_name
def _parse_info_string_value(value: str) -> Any:
if value.lower() == "nan":
return math.nan
try:
value = ast.literal_eval(value)
except Exception:
pass
return value
def _get_info_strings(comments: Sequence[str]) -> Mapping[str, Any]:
info_strings_semicolons = {}
info_strings_spaces = {}
for comment in comments:
if comment[0] not in {"-", "#"}:
if re.fullmatch(INFO_STRING_SEMICOLONS_SEVERAL_REGEX, comment):
for result in re.finditer(
INFO_STRING_SEMICOLONS_ONE_REGEX,
comment,
):
key = result.group(1)
if key[0] == "<" and key[-1] == ">":
key = key[1:-1]
info_strings_semicolons[key] = _parse_info_string_value(
result.group(2)
)
else:
split = comment.rsplit(maxsplit=1)
if len(split) == 2:
key, value = split
info_strings_spaces[key] = _parse_info_string_value(value)
if info_strings_semicolons:
return info_strings_semicolons
# Check for absurd things in spaces
if len(info_strings_spaces) == 1 or any(
key.count(" ") > 3 for key in info_strings_spaces
):
return {}
return info_strings_spaces
def _join_info_dicts(
dicts: Sequence[Mapping[str, Any]],
) -> Mapping[str, np.typing.NDArray[Any]]:
joined = {}
n_keys = len(dicts[0])
if not all(len(d) == n_keys for d in dicts):
return {}
for key in dicts[0]:
joined[key] = np.array([d[key] for d in dicts])
return joined
def _constant_attrs(register: wfdb.Record) -> Sequence[Any]:
return (register.n_sig, register.sig_name, register.units, register.fs)
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[False] = False,
as_frame: bool = False,
target_column: str | Sequence[str] | None = None,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[False] = False,
target_column: None = None,
) -> Tuple[np.typing.NDArray[Any], None]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[False] = False,
target_column: str | Sequence[str],
) -> Tuple[np.typing.NDArray[Any], np.typing.NDArray[Any]]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: None = None,
) -> Tuple[pd.DataFrame, None]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: str,
) -> Tuple[pd.DataFrame, pd.Series]:
pass
@overload
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: Literal[True],
as_frame: Literal[True],
target_column: Sequence[str],
) -> Tuple[pd.DataFrame, pd.DataFrame]:
pass
def fetch(
name: str,
*,
data_home: str | None = None,
return_X_y: bool = False,
as_frame: bool = False,
target_column: str | Sequence[str] | None = None,
) -> (
Bunch
| Tuple[np.typing.NDArray[Any], np.typing.NDArray[Any] | None]
| Tuple[pd.DataFrame, pd.Series | pd.DataFrame | None]
):
zip_name = _get_zip_name_online(name)
path = fetch_zip(
dataname=name,
urlname=f"{BASE_URL}/{name}/{zip_name}",
subfolder="physionet",
data_home=data_home,
)
subpath = path / Path(zip_name).stem
if subpath.exists():
path = subpath
with open(path / "RECORDS") as records_file:
records = [
wfdb.io.rdrecord(str(path / record_name.rstrip("\n")))
for record_name in records_file
]
info_strings = [_get_info_strings(r.comments) for r in records]
info = _join_info_dicts(info_strings)
assert all(_constant_attrs(r) == _constant_attrs(records[0]) for r in records)
data = {
"signal": [r.p_signal for r in records],
}
dataframe = pd.DataFrame(
{**info, **data},
index=[r.record_name for r in records],
)
dataframe["signal"].attrs.update(
sig_name=records[0].sig_name,
units=records[0].units,
fs=records[0].fs,
)
return dataset_from_dataframe(
dataframe,
return_X_y=return_X_y,
as_frame=as_frame,
target_column=target_column,
) | 0.786828 | 0.208038 |
from __future__ import annotations
from pathlib import Path
from typing import Final, Literal, Optional, Sequence, Tuple, Union, overload
import numpy as np
import scipy.io.arff
from sklearn.utils import Bunch
from .base import fetch_zip as _fetch_zip
BASE_URL: Final = "https://www.timeseriesclassification.com/aeon-toolkit/"
def _target_conversion(
target: np.typing.NDArray[Union[int, str]],
) -> Tuple[np.typing.NDArray[int], Sequence[str]]:
try:
target_data = target.astype(int)
target_names = np.unique(target_data).astype(str).tolist()
except ValueError:
target_names = np.unique(target).tolist()
target_data = np.searchsorted(target_names, target)
return target_data, target_names
def data_to_matrix(
struct_array: np.typing.NDArray[object],
) -> np.typing.NDArray[float]:
fields = struct_array.dtype.fields
assert fields
if len(fields.items()) == 1 and list(fields.items())[0][1][0] == np.dtype(
np.object_
):
attribute = struct_array[list(fields.items())[0][0]]
n_instances = len(attribute)
n_curves = len(attribute[0])
n_points = len(attribute[0][0])
attribute_new = np.zeros(n_instances, dtype=np.object_)
for i in range(n_instances):
transformed_matrix = np.zeros((n_curves, n_points))
for j in range(n_curves):
for k in range(n_points):
transformed_matrix[j][k] = attribute[i][j][k]
attribute_new[i] = transformed_matrix
return attribute_new
else:
return np.array(struct_array.tolist())
@overload
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
pass
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: bool = False,
) -> Union[Bunch, Tuple[np.typing.NDArray[float], np.typing.NDArray[int]], ]:
"""
Fetch UCR dataset.
Fetch a UCR dataset by name. More info at
http://www.timeseriesclassification.com/.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
url = BASE_URL + name
data_path = _fetch_zip(
name,
urlname=url + ".zip",
subfolder="ucr",
data_home=data_home,
)
description_filenames = [name, name + "Description", name + "_Info"]
path_file_descr: Optional[Path]
for f in description_filenames:
path_file_descr = (data_path / f).with_suffix(".txt")
if path_file_descr.exists():
break
else:
# No description is found
path_file_descr = None
path_file_train = (data_path / (name + "_TRAIN")).with_suffix(".arff")
path_file_test = (data_path / (name + "_TEST")).with_suffix(".arff")
DESCR = (
path_file_descr.read_text(
errors="surrogateescape") if path_file_descr else ""
)
train = scipy.io.arff.loadarff(path_file_train)
test = scipy.io.arff.loadarff(path_file_test)
dataset_name = train[1].name
column_names = np.array(train[1].names())
target_column_name = column_names[-1]
feature_names = column_names[column_names != target_column_name].tolist()
target_column = train[0][target_column_name].astype(str)
test_target_column = test[0][target_column_name].astype(str)
y_train, target_names = _target_conversion(target_column)
y_test, target_names_test = _target_conversion(test_target_column)
assert target_names == target_names_test
X_train = data_to_matrix(train[0][feature_names])
X_test = data_to_matrix(test[0][feature_names])
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=list(range(len(X_train))),
validation_indices=[],
test_indices=list(range(len(X_train), len(X))),
name=dataset_name,
DESCR=DESCR,
feature_names=feature_names,
target_names=target_names,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/ucr.py | ucr.py | from __future__ import annotations
from pathlib import Path
from typing import Final, Literal, Optional, Sequence, Tuple, Union, overload
import numpy as np
import scipy.io.arff
from sklearn.utils import Bunch
from .base import fetch_zip as _fetch_zip
BASE_URL: Final = "https://www.timeseriesclassification.com/aeon-toolkit/"
def _target_conversion(
target: np.typing.NDArray[Union[int, str]],
) -> Tuple[np.typing.NDArray[int], Sequence[str]]:
try:
target_data = target.astype(int)
target_names = np.unique(target_data).astype(str).tolist()
except ValueError:
target_names = np.unique(target).tolist()
target_data = np.searchsorted(target_names, target)
return target_data, target_names
def data_to_matrix(
struct_array: np.typing.NDArray[object],
) -> np.typing.NDArray[float]:
fields = struct_array.dtype.fields
assert fields
if len(fields.items()) == 1 and list(fields.items())[0][1][0] == np.dtype(
np.object_
):
attribute = struct_array[list(fields.items())[0][0]]
n_instances = len(attribute)
n_curves = len(attribute[0])
n_points = len(attribute[0][0])
attribute_new = np.zeros(n_instances, dtype=np.object_)
for i in range(n_instances):
transformed_matrix = np.zeros((n_curves, n_points))
for j in range(n_curves):
for k in range(n_points):
transformed_matrix[j][k] = attribute[i][j][k]
attribute_new[i] = transformed_matrix
return attribute_new
else:
return np.array(struct_array.tolist())
@overload
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[int]]:
pass
def fetch(
name: str,
*,
data_home: Optional[str] = None,
return_X_y: bool = False,
) -> Union[Bunch, Tuple[np.typing.NDArray[float], np.typing.NDArray[int]], ]:
"""
Fetch UCR dataset.
Fetch a UCR dataset by name. More info at
http://www.timeseriesclassification.com/.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
url = BASE_URL + name
data_path = _fetch_zip(
name,
urlname=url + ".zip",
subfolder="ucr",
data_home=data_home,
)
description_filenames = [name, name + "Description", name + "_Info"]
path_file_descr: Optional[Path]
for f in description_filenames:
path_file_descr = (data_path / f).with_suffix(".txt")
if path_file_descr.exists():
break
else:
# No description is found
path_file_descr = None
path_file_train = (data_path / (name + "_TRAIN")).with_suffix(".arff")
path_file_test = (data_path / (name + "_TEST")).with_suffix(".arff")
DESCR = (
path_file_descr.read_text(
errors="surrogateescape") if path_file_descr else ""
)
train = scipy.io.arff.loadarff(path_file_train)
test = scipy.io.arff.loadarff(path_file_test)
dataset_name = train[1].name
column_names = np.array(train[1].names())
target_column_name = column_names[-1]
feature_names = column_names[column_names != target_column_name].tolist()
target_column = train[0][target_column_name].astype(str)
test_target_column = test[0][target_column_name].astype(str)
y_train, target_names = _target_conversion(target_column)
y_test, target_names_test = _target_conversion(test_target_column)
assert target_names == target_names_test
X_train = data_to_matrix(train[0][feature_names])
X_test = data_to_matrix(test[0][feature_names])
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=list(range(len(X_train))),
validation_indices=[],
test_indices=list(range(len(X_train), len(X))),
name=dataset_name,
DESCR=DESCR,
feature_names=feature_names,
target_names=target_names,
) | 0.902603 | 0.418637 |
from __future__ import annotations
from pathlib import Path
from typing import Any, Literal, Optional, Tuple, Union, overload
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
from sklearn.utils import Bunch
from .base import fetch_file
BASE_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases"
def _load_csv(
fname: Path,
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[float, int, str]],]:
"""Load a csv with targets in the last column and features in the rest."""
data = np.genfromtxt(
fname,
dtype=str,
delimiter=",",
encoding=None,
**kwargs,
)
X = data[:, :-1]
try:
X = X.astype(float)
except ValueError:
pass
y = data[:, -1]
return X, y
def _fetch(
name: str,
data_home: Optional[str] = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
Optional[np.typing.NDArray[float]],
Optional[np.typing.NDArray[Union[float, int]]],
str,
np.typing.NDArray[str],
]:
"""Fetch dataset."""
subfolder = "uci"
filename_str = name + ".data"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
X, y = _load_csv(filename)
target_names = None
ordinal_encoder = OrdinalEncoder(dtype=np.int64)
if y.dtype.type is np.str_:
y = ordinal_encoder.fit_transform(y.reshape(-1, 1))[:, 0]
target_names = ordinal_encoder.categories_[0]
try:
filename_str = name + ".test"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
X_test: Optional[np.typing.NDArray[float]]
y_test: Optional[np.typing.NDArray[Union[float, int, str]]]
X_test, y_test = _load_csv(filename)
if y.dtype.type is np.str_:
y_test = ordinal_encoder.transform(y_test.reshape(-1, 1))[:, 0]
except Exception:
X_test = None
y_test = None
try:
filename_str = name + ".names"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
except Exception:
filename_str = name + ".info"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
with open(filename) as rst_file:
fdescr = rst_file.read()
return X, y, X_test, y_test, fdescr, target_names
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[float]]:
pass
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: bool = False,
) -> Union[Bunch, Tuple[np.typing.NDArray[float], np.typing.NDArray[float]],]:
"""
Fetch UCI dataset.
Fetch a UCI dataset by name. More info at
https://archive.ics.uci.edu/ml/datasets.html.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
X_train, y_train, X_test, y_test, DESCR, target_names = _fetch(
name,
data_home=data_home,
)
if X_test is None or y_test is None:
X = X_train
y = y_train
train_indices = None
test_indices = None
else:
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
train_indices = list(range(len(X_train)))
test_indices = list(range(len(X_train), len(X)))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=train_indices,
validation_indices=[],
test_indices=test_indices,
inner_cv=None,
outer_cv=None,
DESCR=DESCR,
target_names=target_names,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/uci.py | uci.py | from __future__ import annotations
from pathlib import Path
from typing import Any, Literal, Optional, Tuple, Union, overload
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
from sklearn.utils import Bunch
from .base import fetch_file
BASE_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases"
def _load_csv(
fname: Path,
**kwargs: Any,
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[float, int, str]],]:
"""Load a csv with targets in the last column and features in the rest."""
data = np.genfromtxt(
fname,
dtype=str,
delimiter=",",
encoding=None,
**kwargs,
)
X = data[:, :-1]
try:
X = X.astype(float)
except ValueError:
pass
y = data[:, -1]
return X, y
def _fetch(
name: str,
data_home: Optional[str] = None,
) -> Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
Optional[np.typing.NDArray[float]],
Optional[np.typing.NDArray[Union[float, int]]],
str,
np.typing.NDArray[str],
]:
"""Fetch dataset."""
subfolder = "uci"
filename_str = name + ".data"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
X, y = _load_csv(filename)
target_names = None
ordinal_encoder = OrdinalEncoder(dtype=np.int64)
if y.dtype.type is np.str_:
y = ordinal_encoder.fit_transform(y.reshape(-1, 1))[:, 0]
target_names = ordinal_encoder.categories_[0]
try:
filename_str = name + ".test"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
X_test: Optional[np.typing.NDArray[float]]
y_test: Optional[np.typing.NDArray[Union[float, int, str]]]
X_test, y_test = _load_csv(filename)
if y.dtype.type is np.str_:
y_test = ordinal_encoder.transform(y_test.reshape(-1, 1))[:, 0]
except Exception:
X_test = None
y_test = None
try:
filename_str = name + ".names"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
except Exception:
filename_str = name + ".info"
url = BASE_URL + "/" + name + "/" + filename_str
filename = fetch_file(
dataname=name,
urlname=url,
subfolder=subfolder,
data_home=data_home,
)
with open(filename) as rst_file:
fdescr = rst_file.read()
return X, y, X_test, y_test, fdescr, target_names
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[float]]:
pass
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: bool = False,
) -> Union[Bunch, Tuple[np.typing.NDArray[float], np.typing.NDArray[float]],]:
"""
Fetch UCI dataset.
Fetch a UCI dataset by name. More info at
https://archive.ics.uci.edu/ml/datasets.html.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
X_train, y_train, X_test, y_test, DESCR, target_names = _fetch(
name,
data_home=data_home,
)
if X_test is None or y_test is None:
X = X_train
y = y_train
train_indices = None
test_indices = None
else:
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
train_indices = list(range(len(X_train)))
test_indices = list(range(len(X_train), len(X)))
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=train_indices,
validation_indices=[],
test_indices=test_indices,
inner_cv=None,
outer_cv=None,
DESCR=DESCR,
target_names=target_names,
) | 0.903451 | 0.347094 |
from __future__ import annotations
import os
import pathlib
import re
import urllib
import warnings
from distutils.version import LooseVersion
from html.parser import HTMLParser
from pathlib import Path
from typing import (
Any,
Final,
List,
Literal,
Mapping,
Sequence,
Tuple,
TypedDict,
overload,
)
import numpy as np
import pandas as pd
from sklearn.datasets import get_data_home
from sklearn.utils import Bunch
import rdata
from .base import DatasetNotFoundError, fetch_tgz as _fetch_tgz
CRAN_URL: Final = "https://CRAN.R-project.org"
class _LatestVersionHTMLParser(HTMLParser):
"""Class for parsing the version in the CRAN package information page."""
def __init__(self, *, convert_charrefs: bool = True) -> None:
super().__init__(convert_charrefs=convert_charrefs)
self.last_is_version = False
self.version: str | None = None
self.version_regex = re.compile("(?i).*version.*")
self.handling_td = False
def handle_starttag(
self,
tag: str,
attrs: List[Tuple[str, str | None]],
) -> None:
if tag == "td":
self.handling_td = True
def handle_endtag(self, tag: str) -> None:
self.handling_td = False
def handle_data(self, data: str) -> None:
if self.handling_td:
if self.last_is_version:
self.version = data
self.last_is_version = False
elif self.version_regex.match(data):
self.last_is_version = True
def _get_latest_version_online(package_name: str, dataset_name: str) -> str:
"""Get the latest version of the package from CRAN."""
parser = _LatestVersionHTMLParser()
url_request = urllib.request.Request(
url=f"{CRAN_URL}/package={package_name}",
)
try:
with urllib.request.urlopen(url_request) as url_file:
url_content = url_file.read().decode("utf-8")
except urllib.error.HTTPError as e:
if e.code == 404:
raise DatasetNotFoundError(f"{package_name}/{dataset_name}") from e
raise
parser.feed(url_content)
if parser.version is None:
raise ValueError(f"Version of package {package_name} not found")
return parser.version
def _get_latest_version_offline(package_name: str) -> str | None:
"""
Get the latest downloaded version of the package.
Returns None if not found.
"""
home = pathlib.Path(get_data_home()) # Should allow providing data home?
downloaded_packages = tuple(home.glob(package_name + "_*.tar.gz"))
if downloaded_packages:
versions = [
LooseVersion(p.name[(len(package_name) + 1) : -len(".tar.gz")])
for p in downloaded_packages
]
versions.sort()
latest_version = versions[-1]
return str(latest_version)
return None
def _get_version(
package_name: str,
*,
dataset_name: str,
version: str | None = None,
) -> str:
"""
Get the version of the package.
If the version is specified, return it.
Otherwise, try to find the last version online.
If offline, try to find the downloaded version, if any.
"""
if version is None:
try:
version = _get_latest_version_online(
package_name,
dataset_name=dataset_name,
)
except (urllib.error.URLError, DatasetNotFoundError):
version = _get_latest_version_offline(package_name)
if version is None:
raise
return version
def _get_urls(
package_name: str,
*,
dataset_name: str,
version: str | None = None,
) -> Sequence[str]:
version = _get_version(package_name, dataset_name=dataset_name, version=version)
filename = f"{package_name}_{version}.tar.gz"
latest_url = f"{CRAN_URL}/src/contrib/{filename}"
archive_url = f"{CRAN_URL}/src/contrib/Archive/{package_name}/{filename}"
return (latest_url, archive_url)
def _download_package_data(
package_name: str,
*,
dataset_name: str = "*",
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
) -> Path:
if package_url is None:
url_list = _get_urls(
package_name,
dataset_name=dataset_name,
version=version,
)
else:
url_list = (package_url,)
if folder_name is None:
folder_name = os.path.basename(url_list[0])
if subdir is None:
subdir = "data"
for i, url in enumerate(url_list):
try:
directory = _fetch_tgz(folder_name, url, subfolder="cran")
break
except Exception:
# If it is the last url, reraise
if i >= len(url_list) - 1:
raise
data_path = directory / package_name / subdir
return data_path
def fetch_dataset(
dataset_name: str,
package_name: str,
*,
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
converter: rdata.conversion.Converter | None = None,
) -> Mapping[str, Any]:
"""
Fetch an R dataset.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
dataset_name: string
Name of the dataset, including extension if any.
package_name: string
Name of the R package where this dataset resides.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(
package_name,
dataset_name=dataset_name,
package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir,
)
file_path = data_path / dataset_name
if not file_path.suffix:
possible_names = list(data_path.glob(dataset_name + ".*"))
if len(possible_names) != 1:
raise FileNotFoundError(
f"Dataset {dataset_name} not found in " f"package {package_name}",
)
file_path = data_path / possible_names[0]
parsed = rdata.parser.parse_file(file_path)
return converter.convert(parsed)
def fetch_package(
package_name: str,
*,
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
converter: rdata.conversion.Converter | None = None,
ignore_errors: bool = False,
) -> Mapping[str, Any]:
"""
Fetch all datasets from a R package.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
package_name: string
Name of the R package.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
ignore_errors: boolean
If True, ignore the datasets producing errors and return the
remaining ones.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(
package_name,
package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir,
)
if not data_path.exists():
return {}
all_datasets = {}
for dataset in data_path.iterdir():
if dataset.suffix.lower() in [".rda", ".rdata"]:
try:
parsed = rdata.parser.parse_file(dataset)
converted = converter.convert(parsed)
all_datasets.update(converted)
except Exception:
if not ignore_errors:
raise
else:
warnings.warn(
f"Error loading dataset {dataset.name}",
stacklevel=2,
)
return all_datasets
class _DatasetArguments(TypedDict):
load_args: Tuple[Sequence[Any], Mapping[str, Any]]
sklearn_args: Tuple[Sequence[Any], Mapping[str, Any]]
datasets: Mapping[str, _DatasetArguments] = {
"geyser": {
"load_args": (["geyser.rda", "MASS"], {}),
"sklearn_args": ([], {"target_name": "waiting"}),
},
}
def _to_sklearn(
dataset: Mapping[str, Any],
*,
target_name: str,
) -> Bunch:
"""Transform R datasets to Sklearn format, if possible"""
assert len(dataset.keys()) == 1
name = tuple(dataset.keys())[0]
obj = dataset[name]
if isinstance(obj, pd.DataFrame):
feature_names = list(obj.keys())
feature_names.remove(target_name)
X = pd.get_dummies(obj[feature_names]).values
y = obj[target_name].values
else:
raise ValueError(
"Dataset not automatically convertible to Sklearn format",
)
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=None,
target_names=target_name,
feature_names=feature_names,
)
@overload
def fetch(
name: str,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Any]]:
pass
def fetch(
name: str,
*,
return_X_y: bool = False,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[Any]]:
"""
Load a dataset.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
load_args = datasets[name]["load_args"]
dataset = fetch_dataset(*load_args[0], **load_args[1])
sklearn_args = datasets[name]["sklearn_args"]
sklearn_dataset = _to_sklearn(dataset, *sklearn_args[0], **sklearn_args[1])
if return_X_y:
return sklearn_dataset.data, sklearn_dataset.target
return sklearn_dataset | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/cran.py | cran.py | from __future__ import annotations
import os
import pathlib
import re
import urllib
import warnings
from distutils.version import LooseVersion
from html.parser import HTMLParser
from pathlib import Path
from typing import (
Any,
Final,
List,
Literal,
Mapping,
Sequence,
Tuple,
TypedDict,
overload,
)
import numpy as np
import pandas as pd
from sklearn.datasets import get_data_home
from sklearn.utils import Bunch
import rdata
from .base import DatasetNotFoundError, fetch_tgz as _fetch_tgz
CRAN_URL: Final = "https://CRAN.R-project.org"
class _LatestVersionHTMLParser(HTMLParser):
"""Class for parsing the version in the CRAN package information page."""
def __init__(self, *, convert_charrefs: bool = True) -> None:
super().__init__(convert_charrefs=convert_charrefs)
self.last_is_version = False
self.version: str | None = None
self.version_regex = re.compile("(?i).*version.*")
self.handling_td = False
def handle_starttag(
self,
tag: str,
attrs: List[Tuple[str, str | None]],
) -> None:
if tag == "td":
self.handling_td = True
def handle_endtag(self, tag: str) -> None:
self.handling_td = False
def handle_data(self, data: str) -> None:
if self.handling_td:
if self.last_is_version:
self.version = data
self.last_is_version = False
elif self.version_regex.match(data):
self.last_is_version = True
def _get_latest_version_online(package_name: str, dataset_name: str) -> str:
"""Get the latest version of the package from CRAN."""
parser = _LatestVersionHTMLParser()
url_request = urllib.request.Request(
url=f"{CRAN_URL}/package={package_name}",
)
try:
with urllib.request.urlopen(url_request) as url_file:
url_content = url_file.read().decode("utf-8")
except urllib.error.HTTPError as e:
if e.code == 404:
raise DatasetNotFoundError(f"{package_name}/{dataset_name}") from e
raise
parser.feed(url_content)
if parser.version is None:
raise ValueError(f"Version of package {package_name} not found")
return parser.version
def _get_latest_version_offline(package_name: str) -> str | None:
"""
Get the latest downloaded version of the package.
Returns None if not found.
"""
home = pathlib.Path(get_data_home()) # Should allow providing data home?
downloaded_packages = tuple(home.glob(package_name + "_*.tar.gz"))
if downloaded_packages:
versions = [
LooseVersion(p.name[(len(package_name) + 1) : -len(".tar.gz")])
for p in downloaded_packages
]
versions.sort()
latest_version = versions[-1]
return str(latest_version)
return None
def _get_version(
package_name: str,
*,
dataset_name: str,
version: str | None = None,
) -> str:
"""
Get the version of the package.
If the version is specified, return it.
Otherwise, try to find the last version online.
If offline, try to find the downloaded version, if any.
"""
if version is None:
try:
version = _get_latest_version_online(
package_name,
dataset_name=dataset_name,
)
except (urllib.error.URLError, DatasetNotFoundError):
version = _get_latest_version_offline(package_name)
if version is None:
raise
return version
def _get_urls(
package_name: str,
*,
dataset_name: str,
version: str | None = None,
) -> Sequence[str]:
version = _get_version(package_name, dataset_name=dataset_name, version=version)
filename = f"{package_name}_{version}.tar.gz"
latest_url = f"{CRAN_URL}/src/contrib/{filename}"
archive_url = f"{CRAN_URL}/src/contrib/Archive/{package_name}/{filename}"
return (latest_url, archive_url)
def _download_package_data(
package_name: str,
*,
dataset_name: str = "*",
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
) -> Path:
if package_url is None:
url_list = _get_urls(
package_name,
dataset_name=dataset_name,
version=version,
)
else:
url_list = (package_url,)
if folder_name is None:
folder_name = os.path.basename(url_list[0])
if subdir is None:
subdir = "data"
for i, url in enumerate(url_list):
try:
directory = _fetch_tgz(folder_name, url, subfolder="cran")
break
except Exception:
# If it is the last url, reraise
if i >= len(url_list) - 1:
raise
data_path = directory / package_name / subdir
return data_path
def fetch_dataset(
dataset_name: str,
package_name: str,
*,
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
converter: rdata.conversion.Converter | None = None,
) -> Mapping[str, Any]:
"""
Fetch an R dataset.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
dataset_name: string
Name of the dataset, including extension if any.
package_name: string
Name of the R package where this dataset resides.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(
package_name,
dataset_name=dataset_name,
package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir,
)
file_path = data_path / dataset_name
if not file_path.suffix:
possible_names = list(data_path.glob(dataset_name + ".*"))
if len(possible_names) != 1:
raise FileNotFoundError(
f"Dataset {dataset_name} not found in " f"package {package_name}",
)
file_path = data_path / possible_names[0]
parsed = rdata.parser.parse_file(file_path)
return converter.convert(parsed)
def fetch_package(
package_name: str,
*,
package_url: str | None = None,
version: str | None = None,
folder_name: str | None = None,
subdir: str | None = None,
converter: rdata.conversion.Converter | None = None,
ignore_errors: bool = False,
) -> Mapping[str, Any]:
"""
Fetch all datasets from a R package.
Only .rda datasets in community packages can be downloaded for now.
R datasets do not have a fixed structure, so this function does not
attempt to force one.
Parameters
----------
package_name: string
Name of the R package.
package_url: string
Package url. If `None` it tries to obtain it from the package name.
version: string
If `package_url` is not specified, the version of the package to
download. By default is the latest one.
folder_name: string
Name of the folder where the downloaded package is stored. By default,
is the last component of `package_url`.
subdir: string
Subdirectory of the package containing the datasets. By default is
'data'.
converter: rdata.conversion.Converter
Object used to translate R objects into Python objects.
ignore_errors: boolean
If True, ignore the datasets producing errors and return the
remaining ones.
Returns
-------
data: dict
Dictionary-like object with all the data and metadata.
"""
if converter is None:
converter = rdata.conversion.SimpleConverter()
data_path = _download_package_data(
package_name,
package_url=package_url,
version=version,
folder_name=folder_name,
subdir=subdir,
)
if not data_path.exists():
return {}
all_datasets = {}
for dataset in data_path.iterdir():
if dataset.suffix.lower() in [".rda", ".rdata"]:
try:
parsed = rdata.parser.parse_file(dataset)
converted = converter.convert(parsed)
all_datasets.update(converted)
except Exception:
if not ignore_errors:
raise
else:
warnings.warn(
f"Error loading dataset {dataset.name}",
stacklevel=2,
)
return all_datasets
class _DatasetArguments(TypedDict):
load_args: Tuple[Sequence[Any], Mapping[str, Any]]
sklearn_args: Tuple[Sequence[Any], Mapping[str, Any]]
datasets: Mapping[str, _DatasetArguments] = {
"geyser": {
"load_args": (["geyser.rda", "MASS"], {}),
"sklearn_args": ([], {"target_name": "waiting"}),
},
}
def _to_sklearn(
dataset: Mapping[str, Any],
*,
target_name: str,
) -> Bunch:
"""Transform R datasets to Sklearn format, if possible"""
assert len(dataset.keys()) == 1
name = tuple(dataset.keys())[0]
obj = dataset[name]
if isinstance(obj, pd.DataFrame):
feature_names = list(obj.keys())
feature_names.remove(target_name)
X = pd.get_dummies(obj[feature_names]).values
y = obj[target_name].values
else:
raise ValueError(
"Dataset not automatically convertible to Sklearn format",
)
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=None,
target_names=target_name,
feature_names=feature_names,
)
@overload
def fetch(
name: str,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Any]]:
pass
def fetch(
name: str,
*,
return_X_y: bool = False,
) -> Bunch | Tuple[np.typing.NDArray[float], np.typing.NDArray[Any]]:
"""
Load a dataset.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
load_args = datasets[name]["load_args"]
dataset = fetch_dataset(*load_args[0], **load_args[1])
sklearn_args = datasets[name]["sklearn_args"]
sklearn_dataset = _to_sklearn(dataset, *sklearn_args[0], **sklearn_args[1])
if return_X_y:
return sklearn_dataset.data, sklearn_dataset.target
return sklearn_dataset | 0.818845 | 0.176636 |
from sklearn.datasets import (
fetch_20newsgroups,
fetch_20newsgroups_vectorized,
fetch_california_housing,
fetch_covtype,
fetch_kddcup99,
fetch_lfw_pairs,
fetch_lfw_people,
fetch_olivetti_faces,
fetch_rcv1,
load_breast_cancer,
load_diabetes,
load_digits,
load_iris,
load_linnerud,
load_wine,
make_biclusters,
make_blobs,
make_checkerboard,
make_circles,
make_classification,
make_friedman1,
make_friedman2,
make_friedman3,
make_gaussian_quantiles,
make_hastie_10_2,
make_low_rank_matrix,
make_moons,
make_multilabel_classification,
make_regression,
make_s_curve,
make_sparse_coded_signal,
make_sparse_spd_matrix,
make_sparse_uncorrelated,
make_spd_matrix,
make_swiss_roll,
)
DATASETS = {
"20newsgroups": fetch_20newsgroups,
"20newsgroups_vectorized": fetch_20newsgroups_vectorized,
"biclusters": make_biclusters,
"blobs": make_blobs,
"breast_cancer": load_breast_cancer,
"california_housing": fetch_california_housing,
"checkerboard": make_checkerboard,
"circles": make_circles,
"classification": make_classification,
"covtype": fetch_covtype,
"diabetes": load_diabetes,
"digits": load_digits,
"friedman1": make_friedman1,
"friedman2": make_friedman2,
"friedman3": make_friedman3,
"gaussian_quantiles": make_gaussian_quantiles,
"hastie_10_2": make_hastie_10_2,
"iris": load_iris,
"kddcup99": fetch_kddcup99,
"lfw_people": fetch_lfw_people,
"lfw_pairs": fetch_lfw_pairs,
"linnerud": load_linnerud,
"low_rank_matrix": make_low_rank_matrix,
"moons": make_moons,
"multilabel_classification": make_multilabel_classification,
"olivetti_faces": fetch_olivetti_faces,
"rcv1": fetch_rcv1,
"regression": make_regression,
"s_curve": make_s_curve,
"sparse_coded_signal": make_sparse_coded_signal,
"sparse_spd_matrix": make_sparse_spd_matrix,
"sparse_uncorrelated": make_sparse_uncorrelated,
"spd_matrix": make_spd_matrix,
"swiss_roll": make_swiss_roll,
"wine": load_wine,
}
def fetch(name, *, return_X_y=False, **kwargs):
"""Fetch Scikit-learn dataset.
Fetch a Scikit-learn dataset by name. More info at
http://scikit-learn.org/stable/datasets/index.html.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
**kwargs : dict
Optional key-value arguments. See
scikit-learn.org/stable/modules/classes.html#module-sklearn.datasets.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if return_X_y:
kwargs["return_X_y"] = True
data = DATASETS[name](**kwargs)
if not return_X_y:
data.train_indices = []
data.validation_indices = []
data.test_indices = []
data.inner_cv = None
data.outer_cv = None
return data | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/sklearn.py | sklearn.py | from sklearn.datasets import (
fetch_20newsgroups,
fetch_20newsgroups_vectorized,
fetch_california_housing,
fetch_covtype,
fetch_kddcup99,
fetch_lfw_pairs,
fetch_lfw_people,
fetch_olivetti_faces,
fetch_rcv1,
load_breast_cancer,
load_diabetes,
load_digits,
load_iris,
load_linnerud,
load_wine,
make_biclusters,
make_blobs,
make_checkerboard,
make_circles,
make_classification,
make_friedman1,
make_friedman2,
make_friedman3,
make_gaussian_quantiles,
make_hastie_10_2,
make_low_rank_matrix,
make_moons,
make_multilabel_classification,
make_regression,
make_s_curve,
make_sparse_coded_signal,
make_sparse_spd_matrix,
make_sparse_uncorrelated,
make_spd_matrix,
make_swiss_roll,
)
DATASETS = {
"20newsgroups": fetch_20newsgroups,
"20newsgroups_vectorized": fetch_20newsgroups_vectorized,
"biclusters": make_biclusters,
"blobs": make_blobs,
"breast_cancer": load_breast_cancer,
"california_housing": fetch_california_housing,
"checkerboard": make_checkerboard,
"circles": make_circles,
"classification": make_classification,
"covtype": fetch_covtype,
"diabetes": load_diabetes,
"digits": load_digits,
"friedman1": make_friedman1,
"friedman2": make_friedman2,
"friedman3": make_friedman3,
"gaussian_quantiles": make_gaussian_quantiles,
"hastie_10_2": make_hastie_10_2,
"iris": load_iris,
"kddcup99": fetch_kddcup99,
"lfw_people": fetch_lfw_people,
"lfw_pairs": fetch_lfw_pairs,
"linnerud": load_linnerud,
"low_rank_matrix": make_low_rank_matrix,
"moons": make_moons,
"multilabel_classification": make_multilabel_classification,
"olivetti_faces": fetch_olivetti_faces,
"rcv1": fetch_rcv1,
"regression": make_regression,
"s_curve": make_s_curve,
"sparse_coded_signal": make_sparse_coded_signal,
"sparse_spd_matrix": make_sparse_spd_matrix,
"sparse_uncorrelated": make_sparse_uncorrelated,
"spd_matrix": make_spd_matrix,
"swiss_roll": make_swiss_roll,
"wine": load_wine,
}
def fetch(name, *, return_X_y=False, **kwargs):
"""Fetch Scikit-learn dataset.
Fetch a Scikit-learn dataset by name. More info at
http://scikit-learn.org/stable/datasets/index.html.
Parameters
----------
name : string
Dataset name.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
**kwargs : dict
Optional key-value arguments. See
scikit-learn.org/stable/modules/classes.html#module-sklearn.datasets.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if return_X_y:
kwargs["return_X_y"] = True
data = DATASETS[name](**kwargs)
if not return_X_y:
data.train_indices = []
data.validation_indices = []
data.test_indices = []
data.inner_cv = None
data.outer_cv = None
return data | 0.826991 | 0.603026 |
from __future__ import annotations
import hashlib
from pathlib import Path
from typing import (
Final,
Iterator,
Literal,
Optional,
Sequence,
Tuple,
Union,
overload,
)
import numpy as np
from scipy.io import loadmat
from sklearn.utils import Bunch
from .base import fetch_file
DATASETS: Final = frozenset(
(
"banana",
"breast_cancer",
"diabetis",
"flare_solar",
"german",
"heart",
"image",
"ringnorm",
"splice",
"thyroid",
"titanic",
"twonorm",
"waveform",
)
)
class RaetschOuterCV(object):
"""Iterable over already separated CV partitions of the dataset."""
def __init__(
self,
X: np.typing.NDArray[float],
y: np.typing.NDArray[Union[int, float]],
train_splits: Sequence[np.typing.NDArray[int]],
test_splits: Sequence[np.typing.NDArray[int]],
) -> None:
self.X = X
self.y = y
self.train_splits = train_splits
self.test_splits = test_splits
def __iter__(
self,
) -> Iterator[
Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
]
]:
return (
(self.X[tr - 1], self.y[tr - 1], self.X[ts - 1], self.y[ts - 1])
for tr, ts in zip(self.train_splits, self.test_splits)
)
def _fetch_remote(data_home: Optional[str] = None) -> Path:
"""
Helper function to download the remote dataset into path.
Fetch the remote dataset, save into path using remote's filename and ensure
its integrity based on the SHA256 Checksum of the downloaded file.
Parameters
----------
dirname : string
Directory to save the file to.
Returns
-------
file_path: string
Full path of the created file.
"""
file_path = fetch_file(
"raetsch",
"https://github.com/tdiethe/gunnar_raetsch_benchmark_datasets"
"/raw/master/benchmarks.mat",
data_home=data_home,
)
sha256hash = hashlib.sha256()
with open(file_path, "rb") as f:
while True:
buffer = f.read(8192)
if not buffer:
break
sha256hash.update(buffer)
checksum = sha256hash.hexdigest()
remote_checksum = "47c19e4bc4716edc4077cfa5ea61edf4d02af4ec51a0ecfe035626ae8b561c75"
if remote_checksum != checksum:
raise IOError(
f"{file_path} has an SHA256 checksum ({checksum}) differing "
f"from expected ({remote_checksum}), file may be corrupted.",
)
return file_path
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
pass
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: bool = False,
) -> Union[
Bunch,
Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]],
]:
"""Fetch Gunnar Raetsch's dataset.
Fetch a Gunnar Raetsch's benchmark dataset by name. Availabe datasets are
'banana', 'breast_cancer', 'diabetis', 'flare_solar', 'german', 'heart',
'image', 'ringnorm', 'splice', 'thyroid', 'titanic', 'twonorm' and
'waveform'. More info at
https://github.com/tdiethe/gunnar_raetsch_benchmark_datasets.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if name not in DATASETS:
raise Exception("Avaliable datasets are " + str(list(DATASETS)))
filename = _fetch_remote(data_home=data_home)
X, y, train_splits, test_splits = loadmat(filename)[name][0][0]
if len(y.shape) == 2 and y.shape[1] == 1:
y = y.ravel()
cv = RaetschOuterCV(X, y, train_splits, test_splits)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=cv,
DESCR=name,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/repositories/raetsch.py | raetsch.py | from __future__ import annotations
import hashlib
from pathlib import Path
from typing import (
Final,
Iterator,
Literal,
Optional,
Sequence,
Tuple,
Union,
overload,
)
import numpy as np
from scipy.io import loadmat
from sklearn.utils import Bunch
from .base import fetch_file
DATASETS: Final = frozenset(
(
"banana",
"breast_cancer",
"diabetis",
"flare_solar",
"german",
"heart",
"image",
"ringnorm",
"splice",
"thyroid",
"titanic",
"twonorm",
"waveform",
)
)
class RaetschOuterCV(object):
"""Iterable over already separated CV partitions of the dataset."""
def __init__(
self,
X: np.typing.NDArray[float],
y: np.typing.NDArray[Union[int, float]],
train_splits: Sequence[np.typing.NDArray[int]],
test_splits: Sequence[np.typing.NDArray[int]],
) -> None:
self.X = X
self.y = y
self.train_splits = train_splits
self.test_splits = test_splits
def __iter__(
self,
) -> Iterator[
Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
np.typing.NDArray[float],
np.typing.NDArray[Union[int, float]],
]
]:
return (
(self.X[tr - 1], self.y[tr - 1], self.X[ts - 1], self.y[ts - 1])
for tr, ts in zip(self.train_splits, self.test_splits)
)
def _fetch_remote(data_home: Optional[str] = None) -> Path:
"""
Helper function to download the remote dataset into path.
Fetch the remote dataset, save into path using remote's filename and ensure
its integrity based on the SHA256 Checksum of the downloaded file.
Parameters
----------
dirname : string
Directory to save the file to.
Returns
-------
file_path: string
Full path of the created file.
"""
file_path = fetch_file(
"raetsch",
"https://github.com/tdiethe/gunnar_raetsch_benchmark_datasets"
"/raw/master/benchmarks.mat",
data_home=data_home,
)
sha256hash = hashlib.sha256()
with open(file_path, "rb") as f:
while True:
buffer = f.read(8192)
if not buffer:
break
sha256hash.update(buffer)
checksum = sha256hash.hexdigest()
remote_checksum = "47c19e4bc4716edc4077cfa5ea61edf4d02af4ec51a0ecfe035626ae8b561c75"
if remote_checksum != checksum:
raise IOError(
f"{file_path} has an SHA256 checksum ({checksum}) differing "
f"from expected ({remote_checksum}), file may be corrupted.",
)
return file_path
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[False] = False,
) -> Bunch:
pass
@overload
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: Literal[True],
) -> Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]]:
pass
def fetch(
name: str,
data_home: Optional[str] = None,
*,
return_X_y: bool = False,
) -> Union[
Bunch,
Tuple[np.typing.NDArray[float], np.typing.NDArray[Union[int, float]]],
]:
"""Fetch Gunnar Raetsch's dataset.
Fetch a Gunnar Raetsch's benchmark dataset by name. Availabe datasets are
'banana', 'breast_cancer', 'diabetis', 'flare_solar', 'german', 'heart',
'image', 'ringnorm', 'splice', 'thyroid', 'titanic', 'twonorm' and
'waveform'. More info at
https://github.com/tdiethe/gunnar_raetsch_benchmark_datasets.
Parameters
----------
name : string
Dataset name.
data_home : string or None, default None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in ‘~/scikit_learn_data’ subfolders.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
Returns
-------
data : Bunch
Dictionary-like object with all the data and metadata.
(data, target) : tuple if ``return_X_y`` is True
"""
if name not in DATASETS:
raise Exception("Avaliable datasets are " + str(list(DATASETS)))
filename = _fetch_remote(data_home=data_home)
X, y, train_splits, test_splits = loadmat(filename)[name][0][0]
if len(y.shape) == 2 and y.shape[1] == 1:
y = y.ravel()
cv = RaetschOuterCV(X, y, train_splits, test_splits)
if return_X_y:
return X, y
return Bunch(
data=X,
target=y,
train_indices=[],
validation_indices=[],
test_indices=[],
inner_cv=None,
outer_cv=cv,
DESCR=name,
) | 0.942573 | 0.394551 |
from __future__ import annotations
import itertools as it
from dataclasses import dataclass
from functools import reduce
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from scipy.stats import (
friedmanchisquare,
kruskal,
mannwhitneyu,
rankdata,
wilcoxon,
)
from scipy.stats.stats import ttest_ind_from_stats, ttest_rel
from statsmodels.sandbox.stats.multicomp import multipletests
CorrectionLike = Literal[
None,
"bonferroni",
"sidak",
"holm-sidak",
"holm",
"simes-hochberg",
"hommel",
"fdr_bh",
"fdr_by",
"fdr_tsbh",
"fdr_tsbky",
]
MultitestLike = Literal["kruskal", "friedmanchisquare"]
TestLike = Literal["mannwhitneyu", "wilcoxon"]
@dataclass
class SummaryRow:
values: np.typing.NDArray[Any]
greater_is_better: bool | None = None
@dataclass
class ScoreCell:
mean: float
std: float | None
rank: int
significant: bool
def average_rank(
ranks: np.typing.NDArray[np.integer[Any]],
**kwargs: Any,
) -> SummaryRow:
"""Compute rank averages."""
return SummaryRow(
values=np.mean(ranks, axis=0),
greater_is_better=False,
)
def average_mean_score(
means: np.typing.NDArray[np.floating[Any]],
greater_is_better: bool,
**kwargs: Any,
) -> SummaryRow:
"""Compute score mean averages."""
return SummaryRow(
values=np.mean(means, axis=0),
greater_is_better=greater_is_better,
)
def _is_significant(
scores1: np.typing.NDArray[np.floating[Any]],
scores2: np.typing.NDArray[np.floating[Any]],
mean1: np.typing.NDArray[np.floating[Any]],
mean2: np.typing.NDArray[np.floating[Any]],
std1: np.typing.NDArray[np.floating[Any]],
std2: np.typing.NDArray[np.floating[Any]],
*,
nobs: int | None = None,
two_sided: bool = True,
paired_test: bool = False,
significancy_level: float = 0.05,
) -> bool:
alternative = "two-sided" if two_sided else "greater"
if paired_test:
assert scores1.ndim == 1
assert scores2.ndim == 1
_, pvalue = ttest_rel(
scores1,
scores2,
axis=-1,
alternative=alternative,
)
else:
assert nobs
_, pvalue = ttest_ind_from_stats(
mean1=mean1,
std1=std1,
nobs1=nobs,
mean2=mean2,
std2=std2,
nobs2=nobs,
equal_var=False,
alternative=alternative,
)
return pvalue < significancy_level
def _all_significants(
scores: np.typing.NDArray[np.floating[Any]],
means: np.typing.NDArray[np.floating[Any]],
stds: np.typing.NDArray[np.floating[Any]] | None,
ranks: np.typing.NDArray[np.integer[Any]],
*,
nobs: int | None = None,
two_sided: bool = True,
paired_test: bool = False,
significancy_level: float = 0,
) -> np.typing.NDArray[np.bool_]:
significant_matrix = np.zeros_like(ranks, dtype=np.bool_)
if stds is None or significancy_level <= 0:
return significant_matrix
for row, (scores_row, mean_row, std_row, rank_row) in enumerate(
zip(scores, means, stds, ranks),
):
for column, (scores1, mean1, std1, rank1) in enumerate(
zip(scores_row, mean_row, std_row, rank_row),
):
# Compare every element with all the ones with immediate below rank
# It must be significantly better than all of them
index2 = np.flatnonzero(rank_row == (rank1 + 1))
is_significant = len(index2) > 0 and all(
_is_significant(
scores1,
scores_row[idx],
mean1,
mean_row[idx],
std1,
std_row[idx],
nobs=nobs,
two_sided=two_sided,
paired_test=paired_test,
significancy_level=significancy_level,
)
for idx in index2
)
if is_significant:
significant_matrix[row, column] = True
return significant_matrix
def _set_style_classes(
table: pd.DataFrame,
*,
all_ranks: np.typing.NDArray[np.integer[Any]],
significants: np.typing.NDArray[np.bool_],
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
rank_class_names = np.char.add(
"rank",
all_ranks.astype(str),
)
is_summary_row = np.zeros_like(all_ranks, dtype=np.bool_)
is_summary_row[-n_summary_rows:, :] = True
summary_rows_class_name = np.char.multiply(
"summary",
is_summary_row.astype(int),
)
significant_class_name = np.char.multiply(
"significant",
np.insert(
significants,
(len(significants),) * n_summary_rows,
0,
axis=0,
).astype(int),
)
styler = table.style.set_td_classes(
pd.DataFrame(
reduce(
np.char.add,
(
rank_class_names,
" ",
summary_rows_class_name,
" ",
significant_class_name,
),
),
index=table.index,
columns=table.columns,
),
)
return styler
def _set_style_formatter(
styler: pd.io.formats.style.Styler,
*,
precision: int,
show_rank: bool = True,
) -> pd.io.formats.style.Styler:
def _formatter(
data: object,
) -> str:
if isinstance(data, str):
return data
elif isinstance(data, int):
return str(int)
elif isinstance(data, float):
return f"{data:.{precision}f}"
elif isinstance(data, ScoreCell):
str_repr = f"{data.mean:.{precision}f}"
if data.std is not None:
str_repr += f" ± {data.std:.{precision}f}"
if show_rank:
precision_rank = 0 if isinstance(data.rank, int) else precision
str_repr += f" ({data.rank:.{precision_rank}f})"
return str_repr
else:
return ""
return styler.format(
_formatter,
)
def _set_default_style_html(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
last_rows_mask = np.zeros(len(styler.data), dtype=int)
last_rows_mask[-n_summary_rows:] = 1
styler = styler.set_table_styles(
[
{
"selector": ".summary",
"props": [("font-style", "italic")],
},
{
"selector": ".rank1",
"props": [("font-weight", "bold")],
},
{
"selector": ".rank2",
"props": [("text-decoration", "underline")],
},
{
"selector": ".significant::after",
"props": [
("content", '"*"'),
("width", "0px"),
("display", "inline-block"),
],
},
{
"selector": ".col_heading",
"props": [("font-weight", "bold")],
},
],
)
styler = styler.apply_index(
lambda _: np.char.multiply(
"font-style: italic; font-weight: bold",
last_rows_mask,
),
axis=0,
)
styler = styler.apply_index(
lambda idx: ["font-weight: bold"] * len(idx),
axis=1,
)
return styler
def _set_style_from_class(
styler: pd.io.formats.style.Styler,
class_name: str,
style: str,
) -> pd.io.formats.style.Styler:
style_matrix = np.full(styler.data.shape, style)
for row in range(style_matrix.shape[0]):
for column in range(style_matrix.shape[1]):
classes = styler.cell_context.get(
(row, column),
"",
).split()
if class_name not in classes:
style_matrix[row, column] = ""
return styler.apply(lambda x: style_matrix, axis=None)
def _set_default_style_latex(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
last_rows_mask = np.zeros(len(styler.data), dtype=int)
last_rows_mask[-n_summary_rows:] = 1
styler.set_table_styles(
[
{
"selector": r"newcommand{\summary}",
"props": r":[1]{\textit{#1}};",
},
{
"selector": r"newcommand{\significant}",
"props": r":[1]{#1*};",
},
{
"selector": r"newcommand{\rank}",
"props": (
r":[2]{\ifnum#1=1 \textbf{#2} \else "
r"\ifnum#1=2 \underline{#2} \else #2 \fi\fi};"
),
},
],
overwrite=False,
)
for rank in range(1, styler.data.shape[1] + 1):
styler = _set_style_from_class(
styler,
f"rank{rank}",
f"rank{{{rank}}}:--rwrap; ",
)
for class_name in ("summary", "significant"):
styler = _set_style_from_class(
styler,
class_name,
f"{class_name}:--rwrap; ",
)
styler = styler.apply_index(
lambda _: np.char.multiply(
"textbf:--rwrap;summary:--rwrap;",
last_rows_mask,
),
axis=0,
)
styler = styler.apply_index(
lambda idx: ["textbf:--rwrap"] * len(idx),
axis=1,
)
return styler
def _set_default_style(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
default_style: Literal["html", "latex", None],
) -> pd.io.formats.style.Styler:
if default_style == "html":
styler = _set_default_style_html(
styler,
n_summary_rows=n_summary_rows,
)
elif default_style == "latex":
styler = _set_default_style_latex(
styler,
n_summary_rows=n_summary_rows,
)
return styler
def scores_table(
scores: np.typing.ArrayLike,
stds: np.typing.ArrayLike | None = None,
*,
datasets: Sequence[str],
estimators: Sequence[str],
nobs: int | None = None,
greater_is_better: bool = True,
method: Literal["average", "min", "max", "dense", "ordinal"] = "min",
significancy_level: float = 0,
paired_test: bool = False,
two_sided: bool = True,
default_style: Literal["html", "latex", None] = "html",
precision: int = 2,
show_rank: bool = True,
summary_rows: Sequence[Tuple[str, Callable[..., SummaryRow]]] = (
("Average rank", average_rank),
),
) -> pd.io.formats.style.Styler:
"""
Scores table.
Prints a table where each row represents a dataset and each column
represents an estimator.
Parameters
----------
scores: array-like
Matrix of scores where each column represents a model.
Either the full matrix with all experiment results or the
matrix with the mean scores can be passed.
stds: array-like, default=None
Matrix of standard deviations where each column represents a
model. If ``scores`` is the full matrix with all results
this is automatically computed from it and should not be passed.
datasets: sequence of :external:class:`str`
List of dataset names.
estimators: sequence of :external:class:`str`
List of estimator names.
nobs: :external:class:`int`
Number of repetitions of the experiments. Used only for computing
significances when ``scores`` is not the full matrix.
greater_is_better: boolean, default=True
Whether a greater score is better (score) or worse
(loss).
method: {'average', 'min', 'max', 'dense', 'ordinal'}, default='average'
Method used to solve ties.
significancy_level: :external:class:`float`, default=0
Significancy level for considerin a result significant. If nonzero,
significancy is calculated using a t-test. In that case, if
``paired_test`` is ``True``, ``scores`` should be the full matrix
and a paired test is performed. Otherwise, the t-test assumes
independence, and either ``scores`` should be the full matrix
or ``nobs`` should be passed.
paired_test: :external:class:`bool`, default=False
Whether to perform a paired test or a test assuming independence.
If ``True``, ``scores`` should be the full matrix.
Otherwise, either ``scores`` should be the full matrix
or ``nobs`` should be passed.
two_sided: :external:class:`bool`, default=True
Whether to perform a two sided t-test or a one sided t-test.
default_style: {'html', 'latex', None}, default='html'
Default style for the table. Use ``None`` for no style. Note that
the CSS classes and textual formatting are always set.
precision: :external:class:`int`
Number of decimals used for floating point numbers.
summary_rows: sequence
List of (name, callable) tuples for additional summary rows.
By default, the rank average is computed.
Returns
-------
table: array-like
Table of mean and standard deviation of each estimator-dataset
pair. A ranking of estimators is also generated.
"""
scores = np.asanyarray(scores)
stds = None if stds is None else np.asanyarray(stds)
assert scores.ndim in {2, 3}
means = scores if scores.ndim == 2 else np.mean(scores, axis=-1)
if scores.ndim == 3:
assert stds is None
assert nobs is None
stds = np.std(scores, axis=-1)
nobs = scores.shape[-1]
ranks = np.asarray(
[
rankdata(-m, method=method)
if greater_is_better
else rankdata(m, method=method)
for m in means.round(precision)
]
)
significants = _all_significants(
scores,
means,
stds,
ranks,
nobs=nobs,
two_sided=two_sided,
paired_test=paired_test,
significancy_level=significancy_level,
)
table = pd.DataFrame(data=means, index=datasets, columns=estimators)
for i, d in enumerate(datasets):
for j, e in enumerate(estimators):
table.loc[d, e] = ScoreCell(
mean=means[i, j],
std=None if stds is None else stds[i, j],
rank=int(ranks[i, j]),
significant=significants[i, j],
)
# Create additional summary rows
additional_ranks = []
for name, summary_fun in summary_rows:
row = summary_fun(
scores=scores,
means=means,
stds=stds,
ranks=ranks,
greater_is_better=greater_is_better,
)
table.loc[name] = row.values
if row.greater_is_better is None:
additional_ranks.append(np.full(len(row.values), -1))
else:
additional_ranks.append(
rankdata(-row.values, method=method)
if row.greater_is_better
else rankdata(row.values, method=method),
)
styler = _set_style_classes(
table,
all_ranks=np.vstack([ranks] + additional_ranks),
significants=significants,
n_summary_rows=len(summary_rows),
)
styler = _set_style_formatter(
styler,
precision=precision,
show_rank=show_rank,
)
return _set_default_style(
styler,
n_summary_rows=len(summary_rows),
default_style=default_style,
)
def hypotheses_table(
samples: np.typing.ArrayLike,
models: Sequence[str],
*,
alpha: float = 0.05,
multitest: Optional[MultitestLike] = None,
test: TestLike = "wilcoxon",
correction: CorrectionLike = None,
multitest_args: Optional[Mapping[str, Any]] = None,
test_args: Optional[Mapping[str, Any]] = None,
) -> Tuple[Optional[pd.DataFrame], Optional[pd.DataFrame]]:
"""
Hypotheses table.
Prints a hypothesis table with a selected test and correction.
Parameters
----------
samples: array-like
Matrix of samples where each column represent a model.
models: array-like
Model names.
alpha: float in [0, 1], default=0.05
Significance level.
multitest: {'kruskal', 'friedmanchisquare'}, default=None
Ranking multitest used.
test: {'mannwhitneyu', 'wilcoxon'}, default='wilcoxon'
Ranking test used.
correction: {'bonferroni', 'sidak', 'holm-sidak', 'holm', \
'simes-hochberg', 'hommel', 'fdr_bh', 'fdr_by', 'fdr_tsbh', \
'fdr_tsbky'}, default=None
Method used to adjust the p-values.
multitest_args: dict
Optional ranking test arguments.
test_args: dict
Optional ranking test arguments.
Returns
-------
multitest_table: array-like
Table of p-value and rejection/non-rejection for the
multitest hypothesis.
test_table: array-like
Table of p-values and rejection/non-rejection for each test
hypothesis.
"""
if multitest_args is None:
multitest_args = {}
if test_args is None:
test_args = {}
samples = np.asanyarray(samples)
versus = list(it.combinations(range(len(models)), 2))
comparisons = [
f"{models[first]} vs {models[second]}" for first, second in versus
]
multitests = {
"kruskal": kruskal,
"friedmanchisquare": friedmanchisquare,
}
tests = {
"mannwhitneyu": mannwhitneyu,
"wilcoxon": wilcoxon,
}
multitest_table = None
if multitest is not None:
multitest_table = pd.DataFrame(
index=[multitest],
columns=["p-value", "Hypothesis"],
)
_, pvalue = multitests[multitest](
*samples.T,
**multitest_args,
)
reject_str = "Rejected" if pvalue <= alpha else "Not rejected"
multitest_table.loc[multitest] = ["{0:.2f}".format(pvalue), reject_str]
# If the multitest does not detect a significative difference,
# the individual tests are not meaningful, so skip them.
if pvalue > alpha:
return multitest_table, None
pvalues = [
tests[test](
samples[:, first],
samples[:, second],
**test_args,
)[1]
for first, second in versus
]
if correction is not None:
reject_bool, pvalues, _, _ = multipletests(
pvalues,
alpha,
method=correction,
)
reject = ["Rejected" if r else "Not rejected" for r in reject_bool]
else:
reject = [
"Rejected" if pvalue <= alpha else "Not rejected" for pvalue in pvalues
]
data = [("{0:.2f}".format(p), r) for p, r in zip(pvalues, reject)]
test_table = pd.DataFrame(
data,
index=comparisons,
columns=["p-value", "Hypothesis"],
)
return multitest_table, test_table | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/utils/scores.py | scores.py | from __future__ import annotations
import itertools as it
from dataclasses import dataclass
from functools import reduce
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, Tuple
import numpy as np
import pandas as pd
from scipy.stats import (
friedmanchisquare,
kruskal,
mannwhitneyu,
rankdata,
wilcoxon,
)
from scipy.stats.stats import ttest_ind_from_stats, ttest_rel
from statsmodels.sandbox.stats.multicomp import multipletests
CorrectionLike = Literal[
None,
"bonferroni",
"sidak",
"holm-sidak",
"holm",
"simes-hochberg",
"hommel",
"fdr_bh",
"fdr_by",
"fdr_tsbh",
"fdr_tsbky",
]
MultitestLike = Literal["kruskal", "friedmanchisquare"]
TestLike = Literal["mannwhitneyu", "wilcoxon"]
@dataclass
class SummaryRow:
values: np.typing.NDArray[Any]
greater_is_better: bool | None = None
@dataclass
class ScoreCell:
mean: float
std: float | None
rank: int
significant: bool
def average_rank(
ranks: np.typing.NDArray[np.integer[Any]],
**kwargs: Any,
) -> SummaryRow:
"""Compute rank averages."""
return SummaryRow(
values=np.mean(ranks, axis=0),
greater_is_better=False,
)
def average_mean_score(
means: np.typing.NDArray[np.floating[Any]],
greater_is_better: bool,
**kwargs: Any,
) -> SummaryRow:
"""Compute score mean averages."""
return SummaryRow(
values=np.mean(means, axis=0),
greater_is_better=greater_is_better,
)
def _is_significant(
scores1: np.typing.NDArray[np.floating[Any]],
scores2: np.typing.NDArray[np.floating[Any]],
mean1: np.typing.NDArray[np.floating[Any]],
mean2: np.typing.NDArray[np.floating[Any]],
std1: np.typing.NDArray[np.floating[Any]],
std2: np.typing.NDArray[np.floating[Any]],
*,
nobs: int | None = None,
two_sided: bool = True,
paired_test: bool = False,
significancy_level: float = 0.05,
) -> bool:
alternative = "two-sided" if two_sided else "greater"
if paired_test:
assert scores1.ndim == 1
assert scores2.ndim == 1
_, pvalue = ttest_rel(
scores1,
scores2,
axis=-1,
alternative=alternative,
)
else:
assert nobs
_, pvalue = ttest_ind_from_stats(
mean1=mean1,
std1=std1,
nobs1=nobs,
mean2=mean2,
std2=std2,
nobs2=nobs,
equal_var=False,
alternative=alternative,
)
return pvalue < significancy_level
def _all_significants(
scores: np.typing.NDArray[np.floating[Any]],
means: np.typing.NDArray[np.floating[Any]],
stds: np.typing.NDArray[np.floating[Any]] | None,
ranks: np.typing.NDArray[np.integer[Any]],
*,
nobs: int | None = None,
two_sided: bool = True,
paired_test: bool = False,
significancy_level: float = 0,
) -> np.typing.NDArray[np.bool_]:
significant_matrix = np.zeros_like(ranks, dtype=np.bool_)
if stds is None or significancy_level <= 0:
return significant_matrix
for row, (scores_row, mean_row, std_row, rank_row) in enumerate(
zip(scores, means, stds, ranks),
):
for column, (scores1, mean1, std1, rank1) in enumerate(
zip(scores_row, mean_row, std_row, rank_row),
):
# Compare every element with all the ones with immediate below rank
# It must be significantly better than all of them
index2 = np.flatnonzero(rank_row == (rank1 + 1))
is_significant = len(index2) > 0 and all(
_is_significant(
scores1,
scores_row[idx],
mean1,
mean_row[idx],
std1,
std_row[idx],
nobs=nobs,
two_sided=two_sided,
paired_test=paired_test,
significancy_level=significancy_level,
)
for idx in index2
)
if is_significant:
significant_matrix[row, column] = True
return significant_matrix
def _set_style_classes(
table: pd.DataFrame,
*,
all_ranks: np.typing.NDArray[np.integer[Any]],
significants: np.typing.NDArray[np.bool_],
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
rank_class_names = np.char.add(
"rank",
all_ranks.astype(str),
)
is_summary_row = np.zeros_like(all_ranks, dtype=np.bool_)
is_summary_row[-n_summary_rows:, :] = True
summary_rows_class_name = np.char.multiply(
"summary",
is_summary_row.astype(int),
)
significant_class_name = np.char.multiply(
"significant",
np.insert(
significants,
(len(significants),) * n_summary_rows,
0,
axis=0,
).astype(int),
)
styler = table.style.set_td_classes(
pd.DataFrame(
reduce(
np.char.add,
(
rank_class_names,
" ",
summary_rows_class_name,
" ",
significant_class_name,
),
),
index=table.index,
columns=table.columns,
),
)
return styler
def _set_style_formatter(
styler: pd.io.formats.style.Styler,
*,
precision: int,
show_rank: bool = True,
) -> pd.io.formats.style.Styler:
def _formatter(
data: object,
) -> str:
if isinstance(data, str):
return data
elif isinstance(data, int):
return str(int)
elif isinstance(data, float):
return f"{data:.{precision}f}"
elif isinstance(data, ScoreCell):
str_repr = f"{data.mean:.{precision}f}"
if data.std is not None:
str_repr += f" ± {data.std:.{precision}f}"
if show_rank:
precision_rank = 0 if isinstance(data.rank, int) else precision
str_repr += f" ({data.rank:.{precision_rank}f})"
return str_repr
else:
return ""
return styler.format(
_formatter,
)
def _set_default_style_html(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
last_rows_mask = np.zeros(len(styler.data), dtype=int)
last_rows_mask[-n_summary_rows:] = 1
styler = styler.set_table_styles(
[
{
"selector": ".summary",
"props": [("font-style", "italic")],
},
{
"selector": ".rank1",
"props": [("font-weight", "bold")],
},
{
"selector": ".rank2",
"props": [("text-decoration", "underline")],
},
{
"selector": ".significant::after",
"props": [
("content", '"*"'),
("width", "0px"),
("display", "inline-block"),
],
},
{
"selector": ".col_heading",
"props": [("font-weight", "bold")],
},
],
)
styler = styler.apply_index(
lambda _: np.char.multiply(
"font-style: italic; font-weight: bold",
last_rows_mask,
),
axis=0,
)
styler = styler.apply_index(
lambda idx: ["font-weight: bold"] * len(idx),
axis=1,
)
return styler
def _set_style_from_class(
styler: pd.io.formats.style.Styler,
class_name: str,
style: str,
) -> pd.io.formats.style.Styler:
style_matrix = np.full(styler.data.shape, style)
for row in range(style_matrix.shape[0]):
for column in range(style_matrix.shape[1]):
classes = styler.cell_context.get(
(row, column),
"",
).split()
if class_name not in classes:
style_matrix[row, column] = ""
return styler.apply(lambda x: style_matrix, axis=None)
def _set_default_style_latex(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
) -> pd.io.formats.style.Styler:
last_rows_mask = np.zeros(len(styler.data), dtype=int)
last_rows_mask[-n_summary_rows:] = 1
styler.set_table_styles(
[
{
"selector": r"newcommand{\summary}",
"props": r":[1]{\textit{#1}};",
},
{
"selector": r"newcommand{\significant}",
"props": r":[1]{#1*};",
},
{
"selector": r"newcommand{\rank}",
"props": (
r":[2]{\ifnum#1=1 \textbf{#2} \else "
r"\ifnum#1=2 \underline{#2} \else #2 \fi\fi};"
),
},
],
overwrite=False,
)
for rank in range(1, styler.data.shape[1] + 1):
styler = _set_style_from_class(
styler,
f"rank{rank}",
f"rank{{{rank}}}:--rwrap; ",
)
for class_name in ("summary", "significant"):
styler = _set_style_from_class(
styler,
class_name,
f"{class_name}:--rwrap; ",
)
styler = styler.apply_index(
lambda _: np.char.multiply(
"textbf:--rwrap;summary:--rwrap;",
last_rows_mask,
),
axis=0,
)
styler = styler.apply_index(
lambda idx: ["textbf:--rwrap"] * len(idx),
axis=1,
)
return styler
def _set_default_style(
styler: pd.io.formats.style.Styler,
*,
n_summary_rows: int,
default_style: Literal["html", "latex", None],
) -> pd.io.formats.style.Styler:
if default_style == "html":
styler = _set_default_style_html(
styler,
n_summary_rows=n_summary_rows,
)
elif default_style == "latex":
styler = _set_default_style_latex(
styler,
n_summary_rows=n_summary_rows,
)
return styler
def scores_table(
scores: np.typing.ArrayLike,
stds: np.typing.ArrayLike | None = None,
*,
datasets: Sequence[str],
estimators: Sequence[str],
nobs: int | None = None,
greater_is_better: bool = True,
method: Literal["average", "min", "max", "dense", "ordinal"] = "min",
significancy_level: float = 0,
paired_test: bool = False,
two_sided: bool = True,
default_style: Literal["html", "latex", None] = "html",
precision: int = 2,
show_rank: bool = True,
summary_rows: Sequence[Tuple[str, Callable[..., SummaryRow]]] = (
("Average rank", average_rank),
),
) -> pd.io.formats.style.Styler:
"""
Scores table.
Prints a table where each row represents a dataset and each column
represents an estimator.
Parameters
----------
scores: array-like
Matrix of scores where each column represents a model.
Either the full matrix with all experiment results or the
matrix with the mean scores can be passed.
stds: array-like, default=None
Matrix of standard deviations where each column represents a
model. If ``scores`` is the full matrix with all results
this is automatically computed from it and should not be passed.
datasets: sequence of :external:class:`str`
List of dataset names.
estimators: sequence of :external:class:`str`
List of estimator names.
nobs: :external:class:`int`
Number of repetitions of the experiments. Used only for computing
significances when ``scores`` is not the full matrix.
greater_is_better: boolean, default=True
Whether a greater score is better (score) or worse
(loss).
method: {'average', 'min', 'max', 'dense', 'ordinal'}, default='average'
Method used to solve ties.
significancy_level: :external:class:`float`, default=0
Significancy level for considerin a result significant. If nonzero,
significancy is calculated using a t-test. In that case, if
``paired_test`` is ``True``, ``scores`` should be the full matrix
and a paired test is performed. Otherwise, the t-test assumes
independence, and either ``scores`` should be the full matrix
or ``nobs`` should be passed.
paired_test: :external:class:`bool`, default=False
Whether to perform a paired test or a test assuming independence.
If ``True``, ``scores`` should be the full matrix.
Otherwise, either ``scores`` should be the full matrix
or ``nobs`` should be passed.
two_sided: :external:class:`bool`, default=True
Whether to perform a two sided t-test or a one sided t-test.
default_style: {'html', 'latex', None}, default='html'
Default style for the table. Use ``None`` for no style. Note that
the CSS classes and textual formatting are always set.
precision: :external:class:`int`
Number of decimals used for floating point numbers.
summary_rows: sequence
List of (name, callable) tuples for additional summary rows.
By default, the rank average is computed.
Returns
-------
table: array-like
Table of mean and standard deviation of each estimator-dataset
pair. A ranking of estimators is also generated.
"""
scores = np.asanyarray(scores)
stds = None if stds is None else np.asanyarray(stds)
assert scores.ndim in {2, 3}
means = scores if scores.ndim == 2 else np.mean(scores, axis=-1)
if scores.ndim == 3:
assert stds is None
assert nobs is None
stds = np.std(scores, axis=-1)
nobs = scores.shape[-1]
ranks = np.asarray(
[
rankdata(-m, method=method)
if greater_is_better
else rankdata(m, method=method)
for m in means.round(precision)
]
)
significants = _all_significants(
scores,
means,
stds,
ranks,
nobs=nobs,
two_sided=two_sided,
paired_test=paired_test,
significancy_level=significancy_level,
)
table = pd.DataFrame(data=means, index=datasets, columns=estimators)
for i, d in enumerate(datasets):
for j, e in enumerate(estimators):
table.loc[d, e] = ScoreCell(
mean=means[i, j],
std=None if stds is None else stds[i, j],
rank=int(ranks[i, j]),
significant=significants[i, j],
)
# Create additional summary rows
additional_ranks = []
for name, summary_fun in summary_rows:
row = summary_fun(
scores=scores,
means=means,
stds=stds,
ranks=ranks,
greater_is_better=greater_is_better,
)
table.loc[name] = row.values
if row.greater_is_better is None:
additional_ranks.append(np.full(len(row.values), -1))
else:
additional_ranks.append(
rankdata(-row.values, method=method)
if row.greater_is_better
else rankdata(row.values, method=method),
)
styler = _set_style_classes(
table,
all_ranks=np.vstack([ranks] + additional_ranks),
significants=significants,
n_summary_rows=len(summary_rows),
)
styler = _set_style_formatter(
styler,
precision=precision,
show_rank=show_rank,
)
return _set_default_style(
styler,
n_summary_rows=len(summary_rows),
default_style=default_style,
)
def hypotheses_table(
samples: np.typing.ArrayLike,
models: Sequence[str],
*,
alpha: float = 0.05,
multitest: Optional[MultitestLike] = None,
test: TestLike = "wilcoxon",
correction: CorrectionLike = None,
multitest_args: Optional[Mapping[str, Any]] = None,
test_args: Optional[Mapping[str, Any]] = None,
) -> Tuple[Optional[pd.DataFrame], Optional[pd.DataFrame]]:
"""
Hypotheses table.
Prints a hypothesis table with a selected test and correction.
Parameters
----------
samples: array-like
Matrix of samples where each column represent a model.
models: array-like
Model names.
alpha: float in [0, 1], default=0.05
Significance level.
multitest: {'kruskal', 'friedmanchisquare'}, default=None
Ranking multitest used.
test: {'mannwhitneyu', 'wilcoxon'}, default='wilcoxon'
Ranking test used.
correction: {'bonferroni', 'sidak', 'holm-sidak', 'holm', \
'simes-hochberg', 'hommel', 'fdr_bh', 'fdr_by', 'fdr_tsbh', \
'fdr_tsbky'}, default=None
Method used to adjust the p-values.
multitest_args: dict
Optional ranking test arguments.
test_args: dict
Optional ranking test arguments.
Returns
-------
multitest_table: array-like
Table of p-value and rejection/non-rejection for the
multitest hypothesis.
test_table: array-like
Table of p-values and rejection/non-rejection for each test
hypothesis.
"""
if multitest_args is None:
multitest_args = {}
if test_args is None:
test_args = {}
samples = np.asanyarray(samples)
versus = list(it.combinations(range(len(models)), 2))
comparisons = [
f"{models[first]} vs {models[second]}" for first, second in versus
]
multitests = {
"kruskal": kruskal,
"friedmanchisquare": friedmanchisquare,
}
tests = {
"mannwhitneyu": mannwhitneyu,
"wilcoxon": wilcoxon,
}
multitest_table = None
if multitest is not None:
multitest_table = pd.DataFrame(
index=[multitest],
columns=["p-value", "Hypothesis"],
)
_, pvalue = multitests[multitest](
*samples.T,
**multitest_args,
)
reject_str = "Rejected" if pvalue <= alpha else "Not rejected"
multitest_table.loc[multitest] = ["{0:.2f}".format(pvalue), reject_str]
# If the multitest does not detect a significative difference,
# the individual tests are not meaningful, so skip them.
if pvalue > alpha:
return multitest_table, None
pvalues = [
tests[test](
samples[:, first],
samples[:, second],
**test_args,
)[1]
for first, second in versus
]
if correction is not None:
reject_bool, pvalues, _, _ = multipletests(
pvalues,
alpha,
method=correction,
)
reject = ["Rejected" if r else "Not rejected" for r in reject_bool]
else:
reject = [
"Rejected" if pvalue <= alpha else "Not rejected" for pvalue in pvalues
]
data = [("{0:.2f}".format(p), r) for p, r in zip(pvalues, reject)]
test_table = pd.DataFrame(
data,
index=comparisons,
columns=["p-value", "Hypothesis"],
)
return multitest_table, test_table | 0.914295 | 0.377311 |
from __future__ import annotations
import itertools
from contextlib import contextmanager
from dataclasses import dataclass
from time import perf_counter, sleep
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
)
from warnings import warn
import numpy as np
from sacred import Experiment, Ingredient
from sacred.observers import FileStorageObserver, MongoObserver, RunObserver
from sklearn.base import BaseEstimator, is_classifier
from sklearn.metrics import check_scoring
from sklearn.model_selection import check_cv
from sklearn.utils import Bunch
from incense import ExperimentLoader, FileSystemExperimentLoader
from incense.experiment import FileSystemExperiment
SelfType = TypeVar("SelfType")
class DataLike(Protocol):
def __getitem__(
self: SelfType,
key: np.typing.NDArray[int],
) -> SelfType:
pass
def __len__(self) -> int:
pass
DataType = TypeVar("DataType", bound=DataLike, contravariant=True)
TargetType = TypeVar("TargetType", bound=DataLike)
IndicesType = Tuple[np.typing.NDArray[int], np.typing.NDArray[int]]
ExplicitSplitType = Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
]
ConfigLike = Union[
Mapping[str, Any],
str,
]
ScorerLike = Union[
str,
Callable[[BaseEstimator, DataType, TargetType], float],
None,
]
class EstimatorProtocol(Protocol[DataType, TargetType]):
def fit(self: SelfType, X: DataType, y: TargetType) -> SelfType:
pass
def predict(self, X: DataType) -> TargetType:
pass
class CVSplitter(Protocol):
def split(
self,
X: np.typing.NDArray[float],
y: None = None,
groups: None = None,
) -> Iterable[IndicesType]:
pass
def get_n_splits(
self,
X: np.typing.NDArray[float],
y: None = None,
groups: None = None,
) -> int:
pass
CVLike = Union[
CVSplitter,
Iterable[IndicesType],
int,
None,
]
EstimatorLike = Union[
EstimatorProtocol[Any, Any],
Callable[..., EstimatorProtocol[Any, Any]],
Tuple[Callable[..., EstimatorProtocol[Any, Any]], ConfigLike],
]
DatasetLike = Union[
Bunch,
Callable[..., Bunch],
Tuple[Callable[..., Bunch], ConfigLike],
]
@dataclass
class ScoresInfo:
r"""
Class containing the scores of several related experiments.
Attributes
----------
dataset_names : Sequence of :external:class:`str`
Name of the datasets, with the same order in which are present
in the rows of the scores.
estimator_names : Sequence of :external:class:`str`
Name of the estimators, with the same order in which are present
in the columns of the scores.
scores : :external:class:`numpy.ndarray`
Test scores. It has size ``n_datasets`` :math:`\times` ``n_estimators``
:math:`\times` ``n_partitions``.
scores_mean : :external:class:`numpy.ndarray`
Test score means. It has size ``n_datasets``
:math:`\times` ``n_estimators``.
scores_std : :external:class:`numpy.ndarray`
Test score standard deviations. It has size ``n_datasets``
:math:`\times` ``n_estimators``.
See Also
--------
fetch_scores
"""
dataset_names: Sequence[str]
estimator_names: Sequence[str]
scores: np.typing.NDArray[float]
scores_mean: np.typing.NDArray[float]
scores_std: np.typing.NDArray[float]
def _append_info(experiment: Experiment, name: str, value: Any) -> None:
info_list = experiment.info.get(name, [])
info_list.append(value)
experiment.info[name] = info_list
@contextmanager
def _add_timing(experiment: Experiment, name: str) -> Iterator[None]:
initial_time = perf_counter()
try:
yield None
finally:
final_time = perf_counter()
elapsed_time = final_time - initial_time
_append_info(experiment, name, elapsed_time)
def _iterate_outer_cv(
outer_cv: CVLike | Iterable[Tuple[DataType, TargetType, DataType, TargetType]],
estimator: EstimatorProtocol[DataType, TargetType],
X: DataType,
y: TargetType,
) -> Iterable[Tuple[DataType, TargetType, DataType, TargetType]]:
"""Iterate over multiple partitions."""
if isinstance(outer_cv, Iterable):
outer_cv, cv_copy = itertools.tee(outer_cv)
if len(next(cv_copy)) == 4:
yield from outer_cv
cv = check_cv(outer_cv, y, classifier=is_classifier(estimator))
yield from (
(X[train], y[train], X[test], y[test]) for train, test in cv.split(X, y)
)
def _benchmark_from_data(
experiment: Experiment,
*,
estimator: BaseEstimator,
X_train: DataType,
y_train: TargetType,
X_test: DataType,
y_test: TargetType,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> None:
scoring_fun = check_scoring(estimator, scoring)
with _add_timing(experiment, "fit_time"):
estimator.fit(X_train, y_train)
if save_estimator:
_append_info(experiment, "fitted_estimator", estimator)
best_params = getattr(estimator, "best_params_", None)
if best_params:
_append_info(experiment, "search_best_params", best_params)
best_score = getattr(estimator, "best_score_", None)
if best_params:
_append_info(experiment, "search_best_score", best_score)
with _add_timing(experiment, "score_time"):
test_score = scoring_fun(estimator, X_test, y_test)
_append_info(experiment, "test_score", float(test_score))
if save_train:
train_score = scoring_fun(estimator, X_train, y_train)
_append_info(experiment, "train_score", float(train_score))
for output in ("transform", "predict"):
method = getattr(estimator, output, None)
if method is not None:
with _add_timing(experiment, f"{output}_time"):
_append_info(experiment, f"{output}", method(X_test))
def _compute_means(experiment: Experiment) -> None:
experiment.info["score_mean"] = float(np.nanmean(experiment.info["test_score"]))
experiment.info["score_std"] = float(np.nanstd(experiment.info["test_score"]))
def _benchmark_one(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> None:
"""Use only one predefined partition."""
X = data.data
y = data.target
train_indices = getattr(data, "train_indices", [])
validation_indices = getattr(data, "validation_indices", [])
test_indices = getattr(data, "test_indices", [])
X_train_val = X[train_indices + validation_indices] if train_indices else X
y_train_val = y[train_indices + validation_indices] if train_indices else y
X_test = X[test_indices]
y_test = y[test_indices]
_benchmark_from_data(
experiment=experiment,
estimator=estimator,
X_train=X_train_val,
y_train=y_train_val,
X_test=X_test,
y_test=y_test,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
_compute_means(experiment)
def _benchmark_partitions(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
outer_cv: CVLike | Literal["dataset"] = None,
) -> None:
"""Use several partitions."""
outer_cv = data.outer_cv if outer_cv == "dataset" else outer_cv
for X_train, y_train, X_test, y_test in _iterate_outer_cv(
outer_cv=outer_cv,
estimator=estimator,
X=data.data,
y=data.target,
):
_benchmark_from_data(
experiment=experiment,
estimator=estimator,
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
_compute_means(experiment)
def _benchmark(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
outer_cv: CVLike | Literal[False, "dataset"] = None,
) -> None:
"""Run the experiment."""
if outer_cv is False:
_benchmark_one(
experiment=experiment,
estimator=estimator,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
else:
_benchmark_partitions(
experiment=experiment,
estimator=estimator,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
outer_cv=outer_cv,
)
def experiment(
dataset: Callable[..., Bunch],
estimator: Callable[..., BaseEstimator],
*,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Experiment:
"""
Prepare a Scikit-learn experiment as a Sacred experiment.
Prepare a Scikit-learn experiment indicating a dataset and an estimator and
return it as a Sacred experiment.
Parameters
----------
dataset : function
Dataset fetch function. Might receive any argument. Must return a
:external:class:`sklearn.utils.Bunch` with ``data``, ``target``
(might be ``None``), ``inner_cv`` (might be ``None``) and ``outer_cv``
(might be ``None``).
estimator : function
Estimator initialization function. Might receive any keyword argument.
Must return an initialized sklearn-compatible estimator.
Returns
-------
experiment : Experiment
Sacred experiment, ready to be run.
"""
dataset_ingredient = Ingredient("dataset")
dataset = dataset_ingredient.capture(dataset)
estimator_ingredient = Ingredient("estimator")
estimator = estimator_ingredient.capture(estimator)
experiment = Experiment(
ingredients=(
dataset_ingredient,
estimator_ingredient,
),
)
@experiment.main
def run() -> None:
"""Run the experiment."""
data = dataset()
# Metaparameter search
cv = getattr(data, "inner_cv", None)
try:
e = estimator(cv=cv)
except TypeError as exception:
warn(f"The estimator does not accept cv: {exception}")
e = estimator()
# Model assessment
_benchmark(
experiment=experiment,
estimator=e,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
# Ensure that everything is in the info dict at the end
# See https://github.com/IDSIA/sacred/issues/830
sleep(experiment.current_run.beat_interval + 1)
return experiment
def _get_estimator_function(
experiment: Experiment,
estimator: EstimatorLike,
) -> Callable[..., EstimatorProtocol[Any, Any]]:
if hasattr(estimator, "fit"):
def estimator_function() -> EstimatorProtocol:
return estimator
else:
estimator_function = estimator
return experiment.capture(estimator_function)
def _get_dataset_function(
experiment: Experiment,
dataset: DatasetLike,
) -> Callable[..., Bunch]:
if callable(dataset):
dataset_function = dataset
else:
def dataset_function() -> Bunch:
return dataset
return experiment.capture(dataset_function)
def _create_one_experiment(
*,
estimator_name: str,
estimator: EstimatorLike,
dataset_name: str,
dataset: DatasetLike,
storage: RunObserver,
config: ConfigLike,
inner_cv: CVLike | Literal[False, "dataset"] = None,
outer_cv: CVLike | Literal[False, "dataset"] = None,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Experiment:
experiment = Experiment()
experiment.add_config(config)
experiment.add_config({"estimator_name": estimator_name})
if isinstance(estimator, tuple):
estimator, estimator_config = estimator
experiment.add_config(estimator_config)
experiment.add_config({"dataset_name": dataset_name})
if isinstance(dataset, tuple):
dataset, dataset_config = dataset
experiment.add_config(dataset_config)
experiment.observers.append(storage)
estimator_function = _get_estimator_function(experiment, estimator)
dataset_function = _get_dataset_function(experiment, dataset)
@experiment.main
def run() -> None:
"""Run the experiment."""
dataset = dataset_function()
# Metaparameter search
cv = dataset.inner_cv if inner_cv == "dataset" else inner_cv
estimator = estimator_function()
if hasattr(estimator, "cv") and cv is not False:
estimator.cv = cv
# Model assessment
_benchmark(
experiment=experiment,
estimator=estimator,
data=dataset,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
outer_cv=outer_cv,
)
return experiment
def create_experiments(
*,
datasets: Mapping[str, DatasetLike],
estimators: Mapping[str, EstimatorLike],
storage: RunObserver | str,
config: ConfigLike | None = None,
inner_cv: CVLike | Literal[False, "dataset"] = False,
outer_cv: CVLike | Literal[False, "dataset"] = None,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Sequence[Experiment]:
"""
Create several Sacred experiments.
It receives a set of estimators and datasets, and create Sacred experiment
objects for them.
Parameters
----------
datasets : Mapping
Mapping where each key is the name for a dataset and each value
is either:
* A :external:class:`sklearn.utils.Bunch` with the fields explained
in :doc:`/structure`. Only ``data`` and ``target`` are
mandatory.
* A function receiving arbitrary config values and returning a
:external:class:`sklearn.utils.Bunch` object like the one explained
above.
* A tuple with such a function and additional configuration (either
a mapping or a filename).
estimators : Mapping
Mapping where each key is the name for a estimator and each value
is either:
* A scikit-learn compatible estimator.
* A function receiving arbitrary config values and returning a
scikit-learn compatible estimator.
* A tuple with such a function and additional configuration (either
a mapping or a filename).
storage : :external:class:`sacred.observers.RunObserver` or :class:`str`
Where the experiments will be stored. Either a Sacred observer, for
example to store in a Mongo database, or the name of a directory, to
use a file observer.
config : Mapping, :class:`str` or ``None``, default ``None``
A mapping or filename with additional configuration for the experiment.
inner_cv : CV-like object, ``"datasets"`` or ``False``, default ``False``
For estimators that perform cross validation (they have a ``cv``
parameter) this sets the cross validation strategy, as follows:
* If ``False`` the original value of ``cv`` is unchanged.
* If ``"dataset"``, the :external:class:`sklearn.utils.Bunch` objects
for the datasets must have a ``inner_cv`` attribute, which will
be the one used.
* Otherwise, ``cv`` is changed to this value.
outer_cv : CV-like object, ``"datasets"`` or ``False``, default ``None``
The strategy used to evaluate different partitions of the data, as
follows:
* If ``False`` use only one partition: the one specified in the
dataset. Thus the :external:class:`sklearn.utils.Bunch` objects
for the datasets should have defined at least a train and a test
partition.
* If ``"dataset"``, the :external:class:`sklearn.utils.Bunch` objects
for the datasets must have a ``outer_cv`` attribute, which will
be the one used.
* Otherwise, this will be passed to
:external:func:`sklearn.model_selection.check_cv` and the resulting
cross validator will be used to define the partitions.
scoring : string, callable or ``None``, default ``None``
Scoring method used to measure the performance of the estimator.
If a callable, it should have the signature `scorer(estimator, X, y)`.
If ``None`` it uses the ``scorer`` method of the estimator.
save_estimator : bool, default ``False``
Whether to save the fitted estimator. This is useful for debugging
and for obtaining extra information in some cases, but for some
estimators it could consume much storage.
save_train : bool, default ``False``
If ``True``, compute and store also the score over the train data.
Returns
-------
experiments : Sequence of :external:class:`sacred.Experiment`
Sequence of Sacred experiments, ready to be run.
See Also
--------
run_experiments
fetch_scores
"""
if isinstance(storage, str):
storage = FileStorageObserver(storage)
if config is None:
config = {}
return [
_create_one_experiment(
estimator_name=estimator_name,
estimator=estimator,
dataset_name=dataset_name,
dataset=dataset,
storage=storage,
config=config,
inner_cv=inner_cv,
outer_cv=outer_cv,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
for estimator_name, estimator in estimators.items()
for dataset_name, dataset in datasets.items()
]
def run_experiments(
experiments: Sequence[Experiment],
) -> Sequence[int]:
"""
Run Sacred experiments.
Parameters
----------
experiments : Sequence of :external:class:`sacred.Experiment`
Sequence of Sacred experiments to be run.
Returns
-------
ids : Sequence of :external:class:`int`
Sequence of identifiers for each experiment.
See Also
--------
create_experiments
fetch_scores
"""
return [e.run()._id for e in experiments]
def _loader_from_observer(
storage: RunObserver | str,
) -> ExperimentLoader | FileSystemExperimentLoader:
if isinstance(storage, str):
return FileSystemExperimentLoader(storage)
elif isinstance(storage, FileStorageObserver):
return FileSystemExperimentLoader(storage.basedir)
elif isinstance(storage, MongoObserver):
database = storage.runs.database
client = database.client
url, port = list(
client.topology_description.server_descriptions().keys(),
)[0]
return ExperimentLoader(
mongo_uri=f"mongodb://{url}:{port}/",
db_name=database.name,
unpickle=False,
)
raise ValueError(f"Observer {storage} is not supported.")
def _get_experiments(
*,
storage: RunObserver | str,
ids: Sequence[int] | None = None,
dataset_names: Sequence[str] | None = None,
estimator_names: Sequence[str] | None = None,
) -> Sequence[Experiment]:
loader = _loader_from_observer(storage)
if (
(ids, dataset_names, estimator_names) == (None, None, None)
or isinstance(loader, FileSystemExperimentLoader)
and ids is None
):
find_all_fun = getattr(
loader,
"find_all",
lambda: [
FileSystemExperiment.from_run_dir(run_dir)
for run_dir in loader._runs_dir.iterdir()
],
)
experiments = find_all_fun()
elif (dataset_names, estimator_names) == (None, None) or isinstance(
loader, FileSystemExperimentLoader
):
load_ids_fun = getattr(
loader,
"find_by_ids",
lambda id_seq: [
loader.find_by_id(experiment_id) for experiment_id in id_seq
],
)
experiments = load_ids_fun(ids)
else:
conditions: List[
Mapping[
str,
Mapping[str, Sequence[Any]],
]
] = []
if ids is not None:
conditions.append({"_id": {"$in": ids}})
if estimator_names is not None:
conditions.append({"config.estimator_name": {"$in": estimator_names}})
if dataset_names is not None:
conditions.append({"config.dataset_name": {"$in": dataset_names}})
query = {"$and": conditions}
experiments = loader.find(query)
if isinstance(loader, FileSystemExperimentLoader):
# Filter experiments by dataset and estimator names
experiments = [
e
for e in experiments
if (
(
estimator_names is None
or e.config["estimator_name"] in estimator_names
)
and (dataset_names is None or e.config["dataset_name"] in dataset_names)
)
]
return experiments
def fetch_scores(
*,
storage: RunObserver | str,
ids: Sequence[int] | None = None,
dataset_names: Sequence[str] | None = None,
estimator_names: Sequence[str] | None = None,
) -> ScoresInfo:
"""
Fetch scores from Sacred experiments.
By default, it retrieves every experiment. The parameters ``ids``,
``estimator_names`` and ``dataset_names`` can be used to restrict the
number of experiments returned.
Parameters
----------
storage : :external:class:`sacred.observers.RunObserver` or :class:`str`
Where the experiments are stored. Either a Sacred observer, for
example for a Mongo database, or the name of a directory, to
use a file observer.
ids : Sequence of :external:class:`int` or ``None``, default ``None``
If not ``None``, return only experiments whose id is contained
in the sequence.
dataset_names : Sequence of :class:`str` or ``None``, default ``None``
If not ``None``, return only experiments whose dataset names are
contained in the sequence.
The order of the names is also the one used for datasets when
combining the results.
estimator_names : Sequence of :class:`str` or ``None``, default ``None``
If not ``None``, return only experiments whose estimator names are
contained in the sequence.
The order of the names is also the one used for estimators when
combining the results.
Returns
-------
info : :class:`ScoresInfo`
Class containing information about experiments scores.
See Also
--------
run_experiments
fetch_scores
"""
experiments = _get_experiments(
storage=storage,
ids=ids,
dataset_names=dataset_names,
estimator_names=estimator_names,
)
dict_experiments: Dict[
str,
Dict[str, Tuple[np.typing.NDArray[float], float, float]],
] = {}
estimator_list = []
dataset_list = []
nobs = 0
for experiment in experiments:
estimator_name = experiment.config["estimator_name"]
if estimator_name not in estimator_list:
estimator_list.append(estimator_name)
dataset_name = experiment.config["dataset_name"]
if dataset_name not in dataset_list:
dataset_list.append(dataset_name)
scores = experiment.info.get("test_score", np.array([]))
score_mean = experiment.info.get("score_mean", np.nan)
score_std = experiment.info.get("score_std", np.nan)
nobs = max(nobs, len(scores))
assert np.isnan(score_mean) or score_mean == np.mean(scores)
assert np.isnan(score_std) or score_std == np.std(scores)
if estimator_name not in dict_experiments:
dict_experiments[estimator_name] = {}
if dataset_name in dict_experiments[estimator_name]:
raise ValueError(
f"Repeated experiment: ({estimator_name}, {dataset_name})",
)
dict_experiments[estimator_name][dataset_name] = (
scores,
score_mean,
score_std,
)
estimator_names = (
tuple(estimator_list) if estimator_names is None else estimator_names
)
dataset_names = tuple(dataset_list) if dataset_names is None else dataset_names
matrix_shape = (len(dataset_names), len(estimator_names))
scores = np.full(matrix_shape + (nobs,), np.nan)
scores_mean = np.full(matrix_shape, np.nan)
scores_std = np.full(matrix_shape, np.nan)
for i, dataset_name in enumerate(dataset_names):
for j, estimator_name in enumerate(estimator_names):
dict_estimator = dict_experiments.get(estimator_name, {})
s, mean, std = dict_estimator.get(
dataset_name,
(np.array([]), np.nan, np.nan),
)
if len(s) == nobs:
scores[i, j] = s
scores_mean[i, j] = mean
scores_std[i, j] = std
scores = np.array(scores.tolist())
return ScoresInfo(
dataset_names=dataset_names,
estimator_names=estimator_names,
scores=scores,
scores_mean=scores_mean,
scores_std=scores_std,
) | scikit-datasets | /scikit_datasets-0.2.4-py3-none-any.whl/skdatasets/utils/experiment.py | experiment.py | from __future__ import annotations
import itertools
from contextlib import contextmanager
from dataclasses import dataclass
from time import perf_counter, sleep
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Protocol,
Sequence,
Tuple,
TypeVar,
Union,
)
from warnings import warn
import numpy as np
from sacred import Experiment, Ingredient
from sacred.observers import FileStorageObserver, MongoObserver, RunObserver
from sklearn.base import BaseEstimator, is_classifier
from sklearn.metrics import check_scoring
from sklearn.model_selection import check_cv
from sklearn.utils import Bunch
from incense import ExperimentLoader, FileSystemExperimentLoader
from incense.experiment import FileSystemExperiment
SelfType = TypeVar("SelfType")
class DataLike(Protocol):
def __getitem__(
self: SelfType,
key: np.typing.NDArray[int],
) -> SelfType:
pass
def __len__(self) -> int:
pass
DataType = TypeVar("DataType", bound=DataLike, contravariant=True)
TargetType = TypeVar("TargetType", bound=DataLike)
IndicesType = Tuple[np.typing.NDArray[int], np.typing.NDArray[int]]
ExplicitSplitType = Tuple[
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
np.typing.NDArray[float],
np.typing.NDArray[Union[float, int]],
]
ConfigLike = Union[
Mapping[str, Any],
str,
]
ScorerLike = Union[
str,
Callable[[BaseEstimator, DataType, TargetType], float],
None,
]
class EstimatorProtocol(Protocol[DataType, TargetType]):
def fit(self: SelfType, X: DataType, y: TargetType) -> SelfType:
pass
def predict(self, X: DataType) -> TargetType:
pass
class CVSplitter(Protocol):
def split(
self,
X: np.typing.NDArray[float],
y: None = None,
groups: None = None,
) -> Iterable[IndicesType]:
pass
def get_n_splits(
self,
X: np.typing.NDArray[float],
y: None = None,
groups: None = None,
) -> int:
pass
CVLike = Union[
CVSplitter,
Iterable[IndicesType],
int,
None,
]
EstimatorLike = Union[
EstimatorProtocol[Any, Any],
Callable[..., EstimatorProtocol[Any, Any]],
Tuple[Callable[..., EstimatorProtocol[Any, Any]], ConfigLike],
]
DatasetLike = Union[
Bunch,
Callable[..., Bunch],
Tuple[Callable[..., Bunch], ConfigLike],
]
@dataclass
class ScoresInfo:
r"""
Class containing the scores of several related experiments.
Attributes
----------
dataset_names : Sequence of :external:class:`str`
Name of the datasets, with the same order in which are present
in the rows of the scores.
estimator_names : Sequence of :external:class:`str`
Name of the estimators, with the same order in which are present
in the columns of the scores.
scores : :external:class:`numpy.ndarray`
Test scores. It has size ``n_datasets`` :math:`\times` ``n_estimators``
:math:`\times` ``n_partitions``.
scores_mean : :external:class:`numpy.ndarray`
Test score means. It has size ``n_datasets``
:math:`\times` ``n_estimators``.
scores_std : :external:class:`numpy.ndarray`
Test score standard deviations. It has size ``n_datasets``
:math:`\times` ``n_estimators``.
See Also
--------
fetch_scores
"""
dataset_names: Sequence[str]
estimator_names: Sequence[str]
scores: np.typing.NDArray[float]
scores_mean: np.typing.NDArray[float]
scores_std: np.typing.NDArray[float]
def _append_info(experiment: Experiment, name: str, value: Any) -> None:
info_list = experiment.info.get(name, [])
info_list.append(value)
experiment.info[name] = info_list
@contextmanager
def _add_timing(experiment: Experiment, name: str) -> Iterator[None]:
initial_time = perf_counter()
try:
yield None
finally:
final_time = perf_counter()
elapsed_time = final_time - initial_time
_append_info(experiment, name, elapsed_time)
def _iterate_outer_cv(
outer_cv: CVLike | Iterable[Tuple[DataType, TargetType, DataType, TargetType]],
estimator: EstimatorProtocol[DataType, TargetType],
X: DataType,
y: TargetType,
) -> Iterable[Tuple[DataType, TargetType, DataType, TargetType]]:
"""Iterate over multiple partitions."""
if isinstance(outer_cv, Iterable):
outer_cv, cv_copy = itertools.tee(outer_cv)
if len(next(cv_copy)) == 4:
yield from outer_cv
cv = check_cv(outer_cv, y, classifier=is_classifier(estimator))
yield from (
(X[train], y[train], X[test], y[test]) for train, test in cv.split(X, y)
)
def _benchmark_from_data(
experiment: Experiment,
*,
estimator: BaseEstimator,
X_train: DataType,
y_train: TargetType,
X_test: DataType,
y_test: TargetType,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> None:
scoring_fun = check_scoring(estimator, scoring)
with _add_timing(experiment, "fit_time"):
estimator.fit(X_train, y_train)
if save_estimator:
_append_info(experiment, "fitted_estimator", estimator)
best_params = getattr(estimator, "best_params_", None)
if best_params:
_append_info(experiment, "search_best_params", best_params)
best_score = getattr(estimator, "best_score_", None)
if best_params:
_append_info(experiment, "search_best_score", best_score)
with _add_timing(experiment, "score_time"):
test_score = scoring_fun(estimator, X_test, y_test)
_append_info(experiment, "test_score", float(test_score))
if save_train:
train_score = scoring_fun(estimator, X_train, y_train)
_append_info(experiment, "train_score", float(train_score))
for output in ("transform", "predict"):
method = getattr(estimator, output, None)
if method is not None:
with _add_timing(experiment, f"{output}_time"):
_append_info(experiment, f"{output}", method(X_test))
def _compute_means(experiment: Experiment) -> None:
experiment.info["score_mean"] = float(np.nanmean(experiment.info["test_score"]))
experiment.info["score_std"] = float(np.nanstd(experiment.info["test_score"]))
def _benchmark_one(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> None:
"""Use only one predefined partition."""
X = data.data
y = data.target
train_indices = getattr(data, "train_indices", [])
validation_indices = getattr(data, "validation_indices", [])
test_indices = getattr(data, "test_indices", [])
X_train_val = X[train_indices + validation_indices] if train_indices else X
y_train_val = y[train_indices + validation_indices] if train_indices else y
X_test = X[test_indices]
y_test = y[test_indices]
_benchmark_from_data(
experiment=experiment,
estimator=estimator,
X_train=X_train_val,
y_train=y_train_val,
X_test=X_test,
y_test=y_test,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
_compute_means(experiment)
def _benchmark_partitions(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
outer_cv: CVLike | Literal["dataset"] = None,
) -> None:
"""Use several partitions."""
outer_cv = data.outer_cv if outer_cv == "dataset" else outer_cv
for X_train, y_train, X_test, y_test in _iterate_outer_cv(
outer_cv=outer_cv,
estimator=estimator,
X=data.data,
y=data.target,
):
_benchmark_from_data(
experiment=experiment,
estimator=estimator,
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
_compute_means(experiment)
def _benchmark(
experiment: Experiment,
*,
estimator: BaseEstimator,
data: Bunch,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
outer_cv: CVLike | Literal[False, "dataset"] = None,
) -> None:
"""Run the experiment."""
if outer_cv is False:
_benchmark_one(
experiment=experiment,
estimator=estimator,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
else:
_benchmark_partitions(
experiment=experiment,
estimator=estimator,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
outer_cv=outer_cv,
)
def experiment(
dataset: Callable[..., Bunch],
estimator: Callable[..., BaseEstimator],
*,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Experiment:
"""
Prepare a Scikit-learn experiment as a Sacred experiment.
Prepare a Scikit-learn experiment indicating a dataset and an estimator and
return it as a Sacred experiment.
Parameters
----------
dataset : function
Dataset fetch function. Might receive any argument. Must return a
:external:class:`sklearn.utils.Bunch` with ``data``, ``target``
(might be ``None``), ``inner_cv`` (might be ``None``) and ``outer_cv``
(might be ``None``).
estimator : function
Estimator initialization function. Might receive any keyword argument.
Must return an initialized sklearn-compatible estimator.
Returns
-------
experiment : Experiment
Sacred experiment, ready to be run.
"""
dataset_ingredient = Ingredient("dataset")
dataset = dataset_ingredient.capture(dataset)
estimator_ingredient = Ingredient("estimator")
estimator = estimator_ingredient.capture(estimator)
experiment = Experiment(
ingredients=(
dataset_ingredient,
estimator_ingredient,
),
)
@experiment.main
def run() -> None:
"""Run the experiment."""
data = dataset()
# Metaparameter search
cv = getattr(data, "inner_cv", None)
try:
e = estimator(cv=cv)
except TypeError as exception:
warn(f"The estimator does not accept cv: {exception}")
e = estimator()
# Model assessment
_benchmark(
experiment=experiment,
estimator=e,
data=data,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
# Ensure that everything is in the info dict at the end
# See https://github.com/IDSIA/sacred/issues/830
sleep(experiment.current_run.beat_interval + 1)
return experiment
def _get_estimator_function(
experiment: Experiment,
estimator: EstimatorLike,
) -> Callable[..., EstimatorProtocol[Any, Any]]:
if hasattr(estimator, "fit"):
def estimator_function() -> EstimatorProtocol:
return estimator
else:
estimator_function = estimator
return experiment.capture(estimator_function)
def _get_dataset_function(
experiment: Experiment,
dataset: DatasetLike,
) -> Callable[..., Bunch]:
if callable(dataset):
dataset_function = dataset
else:
def dataset_function() -> Bunch:
return dataset
return experiment.capture(dataset_function)
def _create_one_experiment(
*,
estimator_name: str,
estimator: EstimatorLike,
dataset_name: str,
dataset: DatasetLike,
storage: RunObserver,
config: ConfigLike,
inner_cv: CVLike | Literal[False, "dataset"] = None,
outer_cv: CVLike | Literal[False, "dataset"] = None,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Experiment:
experiment = Experiment()
experiment.add_config(config)
experiment.add_config({"estimator_name": estimator_name})
if isinstance(estimator, tuple):
estimator, estimator_config = estimator
experiment.add_config(estimator_config)
experiment.add_config({"dataset_name": dataset_name})
if isinstance(dataset, tuple):
dataset, dataset_config = dataset
experiment.add_config(dataset_config)
experiment.observers.append(storage)
estimator_function = _get_estimator_function(experiment, estimator)
dataset_function = _get_dataset_function(experiment, dataset)
@experiment.main
def run() -> None:
"""Run the experiment."""
dataset = dataset_function()
# Metaparameter search
cv = dataset.inner_cv if inner_cv == "dataset" else inner_cv
estimator = estimator_function()
if hasattr(estimator, "cv") and cv is not False:
estimator.cv = cv
# Model assessment
_benchmark(
experiment=experiment,
estimator=estimator,
data=dataset,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
outer_cv=outer_cv,
)
return experiment
def create_experiments(
*,
datasets: Mapping[str, DatasetLike],
estimators: Mapping[str, EstimatorLike],
storage: RunObserver | str,
config: ConfigLike | None = None,
inner_cv: CVLike | Literal[False, "dataset"] = False,
outer_cv: CVLike | Literal[False, "dataset"] = None,
scoring: ScorerLike[DataType, TargetType] = None,
save_estimator: bool = False,
save_train: bool = False,
) -> Sequence[Experiment]:
"""
Create several Sacred experiments.
It receives a set of estimators and datasets, and create Sacred experiment
objects for them.
Parameters
----------
datasets : Mapping
Mapping where each key is the name for a dataset and each value
is either:
* A :external:class:`sklearn.utils.Bunch` with the fields explained
in :doc:`/structure`. Only ``data`` and ``target`` are
mandatory.
* A function receiving arbitrary config values and returning a
:external:class:`sklearn.utils.Bunch` object like the one explained
above.
* A tuple with such a function and additional configuration (either
a mapping or a filename).
estimators : Mapping
Mapping where each key is the name for a estimator and each value
is either:
* A scikit-learn compatible estimator.
* A function receiving arbitrary config values and returning a
scikit-learn compatible estimator.
* A tuple with such a function and additional configuration (either
a mapping or a filename).
storage : :external:class:`sacred.observers.RunObserver` or :class:`str`
Where the experiments will be stored. Either a Sacred observer, for
example to store in a Mongo database, or the name of a directory, to
use a file observer.
config : Mapping, :class:`str` or ``None``, default ``None``
A mapping or filename with additional configuration for the experiment.
inner_cv : CV-like object, ``"datasets"`` or ``False``, default ``False``
For estimators that perform cross validation (they have a ``cv``
parameter) this sets the cross validation strategy, as follows:
* If ``False`` the original value of ``cv`` is unchanged.
* If ``"dataset"``, the :external:class:`sklearn.utils.Bunch` objects
for the datasets must have a ``inner_cv`` attribute, which will
be the one used.
* Otherwise, ``cv`` is changed to this value.
outer_cv : CV-like object, ``"datasets"`` or ``False``, default ``None``
The strategy used to evaluate different partitions of the data, as
follows:
* If ``False`` use only one partition: the one specified in the
dataset. Thus the :external:class:`sklearn.utils.Bunch` objects
for the datasets should have defined at least a train and a test
partition.
* If ``"dataset"``, the :external:class:`sklearn.utils.Bunch` objects
for the datasets must have a ``outer_cv`` attribute, which will
be the one used.
* Otherwise, this will be passed to
:external:func:`sklearn.model_selection.check_cv` and the resulting
cross validator will be used to define the partitions.
scoring : string, callable or ``None``, default ``None``
Scoring method used to measure the performance of the estimator.
If a callable, it should have the signature `scorer(estimator, X, y)`.
If ``None`` it uses the ``scorer`` method of the estimator.
save_estimator : bool, default ``False``
Whether to save the fitted estimator. This is useful for debugging
and for obtaining extra information in some cases, but for some
estimators it could consume much storage.
save_train : bool, default ``False``
If ``True``, compute and store also the score over the train data.
Returns
-------
experiments : Sequence of :external:class:`sacred.Experiment`
Sequence of Sacred experiments, ready to be run.
See Also
--------
run_experiments
fetch_scores
"""
if isinstance(storage, str):
storage = FileStorageObserver(storage)
if config is None:
config = {}
return [
_create_one_experiment(
estimator_name=estimator_name,
estimator=estimator,
dataset_name=dataset_name,
dataset=dataset,
storage=storage,
config=config,
inner_cv=inner_cv,
outer_cv=outer_cv,
scoring=scoring,
save_estimator=save_estimator,
save_train=save_train,
)
for estimator_name, estimator in estimators.items()
for dataset_name, dataset in datasets.items()
]
def run_experiments(
experiments: Sequence[Experiment],
) -> Sequence[int]:
"""
Run Sacred experiments.
Parameters
----------
experiments : Sequence of :external:class:`sacred.Experiment`
Sequence of Sacred experiments to be run.
Returns
-------
ids : Sequence of :external:class:`int`
Sequence of identifiers for each experiment.
See Also
--------
create_experiments
fetch_scores
"""
return [e.run()._id for e in experiments]
def _loader_from_observer(
storage: RunObserver | str,
) -> ExperimentLoader | FileSystemExperimentLoader:
if isinstance(storage, str):
return FileSystemExperimentLoader(storage)
elif isinstance(storage, FileStorageObserver):
return FileSystemExperimentLoader(storage.basedir)
elif isinstance(storage, MongoObserver):
database = storage.runs.database
client = database.client
url, port = list(
client.topology_description.server_descriptions().keys(),
)[0]
return ExperimentLoader(
mongo_uri=f"mongodb://{url}:{port}/",
db_name=database.name,
unpickle=False,
)
raise ValueError(f"Observer {storage} is not supported.")
def _get_experiments(
*,
storage: RunObserver | str,
ids: Sequence[int] | None = None,
dataset_names: Sequence[str] | None = None,
estimator_names: Sequence[str] | None = None,
) -> Sequence[Experiment]:
loader = _loader_from_observer(storage)
if (
(ids, dataset_names, estimator_names) == (None, None, None)
or isinstance(loader, FileSystemExperimentLoader)
and ids is None
):
find_all_fun = getattr(
loader,
"find_all",
lambda: [
FileSystemExperiment.from_run_dir(run_dir)
for run_dir in loader._runs_dir.iterdir()
],
)
experiments = find_all_fun()
elif (dataset_names, estimator_names) == (None, None) or isinstance(
loader, FileSystemExperimentLoader
):
load_ids_fun = getattr(
loader,
"find_by_ids",
lambda id_seq: [
loader.find_by_id(experiment_id) for experiment_id in id_seq
],
)
experiments = load_ids_fun(ids)
else:
conditions: List[
Mapping[
str,
Mapping[str, Sequence[Any]],
]
] = []
if ids is not None:
conditions.append({"_id": {"$in": ids}})
if estimator_names is not None:
conditions.append({"config.estimator_name": {"$in": estimator_names}})
if dataset_names is not None:
conditions.append({"config.dataset_name": {"$in": dataset_names}})
query = {"$and": conditions}
experiments = loader.find(query)
if isinstance(loader, FileSystemExperimentLoader):
# Filter experiments by dataset and estimator names
experiments = [
e
for e in experiments
if (
(
estimator_names is None
or e.config["estimator_name"] in estimator_names
)
and (dataset_names is None or e.config["dataset_name"] in dataset_names)
)
]
return experiments
def fetch_scores(
*,
storage: RunObserver | str,
ids: Sequence[int] | None = None,
dataset_names: Sequence[str] | None = None,
estimator_names: Sequence[str] | None = None,
) -> ScoresInfo:
"""
Fetch scores from Sacred experiments.
By default, it retrieves every experiment. The parameters ``ids``,
``estimator_names`` and ``dataset_names`` can be used to restrict the
number of experiments returned.
Parameters
----------
storage : :external:class:`sacred.observers.RunObserver` or :class:`str`
Where the experiments are stored. Either a Sacred observer, for
example for a Mongo database, or the name of a directory, to
use a file observer.
ids : Sequence of :external:class:`int` or ``None``, default ``None``
If not ``None``, return only experiments whose id is contained
in the sequence.
dataset_names : Sequence of :class:`str` or ``None``, default ``None``
If not ``None``, return only experiments whose dataset names are
contained in the sequence.
The order of the names is also the one used for datasets when
combining the results.
estimator_names : Sequence of :class:`str` or ``None``, default ``None``
If not ``None``, return only experiments whose estimator names are
contained in the sequence.
The order of the names is also the one used for estimators when
combining the results.
Returns
-------
info : :class:`ScoresInfo`
Class containing information about experiments scores.
See Also
--------
run_experiments
fetch_scores
"""
experiments = _get_experiments(
storage=storage,
ids=ids,
dataset_names=dataset_names,
estimator_names=estimator_names,
)
dict_experiments: Dict[
str,
Dict[str, Tuple[np.typing.NDArray[float], float, float]],
] = {}
estimator_list = []
dataset_list = []
nobs = 0
for experiment in experiments:
estimator_name = experiment.config["estimator_name"]
if estimator_name not in estimator_list:
estimator_list.append(estimator_name)
dataset_name = experiment.config["dataset_name"]
if dataset_name not in dataset_list:
dataset_list.append(dataset_name)
scores = experiment.info.get("test_score", np.array([]))
score_mean = experiment.info.get("score_mean", np.nan)
score_std = experiment.info.get("score_std", np.nan)
nobs = max(nobs, len(scores))
assert np.isnan(score_mean) or score_mean == np.mean(scores)
assert np.isnan(score_std) or score_std == np.std(scores)
if estimator_name not in dict_experiments:
dict_experiments[estimator_name] = {}
if dataset_name in dict_experiments[estimator_name]:
raise ValueError(
f"Repeated experiment: ({estimator_name}, {dataset_name})",
)
dict_experiments[estimator_name][dataset_name] = (
scores,
score_mean,
score_std,
)
estimator_names = (
tuple(estimator_list) if estimator_names is None else estimator_names
)
dataset_names = tuple(dataset_list) if dataset_names is None else dataset_names
matrix_shape = (len(dataset_names), len(estimator_names))
scores = np.full(matrix_shape + (nobs,), np.nan)
scores_mean = np.full(matrix_shape, np.nan)
scores_std = np.full(matrix_shape, np.nan)
for i, dataset_name in enumerate(dataset_names):
for j, estimator_name in enumerate(estimator_names):
dict_estimator = dict_experiments.get(estimator_name, {})
s, mean, std = dict_estimator.get(
dataset_name,
(np.array([]), np.nan, np.nan),
)
if len(s) == nobs:
scores[i, j] = s
scores_mean[i, j] = mean
scores_std[i, j] = std
scores = np.array(scores.tolist())
return ScoresInfo(
dataset_names=dataset_names,
estimator_names=estimator_names,
scores=scores,
scores_mean=scores_mean,
scores_std=scores_std,
) | 0.889939 | 0.398699 |
# scikit-dda
Scikit-learn-compatible Deep Discriminant Analysis
## Status
[![Build Status](https://travis-ci.com/daviddiazvico/scikit-dda.svg?branch=master)](https://travis-ci.com/daviddiazvico/scikit-dda)
[![Maintainability](https://api.codeclimate.com/v1/badges/a37c9ee152b41a0cb577/maintainability)](https://codeclimate.com/github/daviddiazvico/scikit-dda/maintainability)
[![Test Coverage](https://api.codeclimate.com/v1/badges/a37c9ee152b41a0cb577/test_coverage)](https://codeclimate.com/github/daviddiazvico/scikit-dda/test_coverage)
## Installation
Available in [PyPI](https://pypi.python.org/pypi?:action=display&name=scikit-dda)
```
pip install scikit-dda
```
## Documentation
Autogenerated and hosted in [GitHub Pages](https://daviddiazvico.github.io/scikit-dda/)
## Distribution
Run the following command from the project home to create the distribution
```
python setup.py sdist bdist_wheel
```
and upload the package to [testPyPI](https://testpypi.python.org/)
```
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
```
or [PyPI](https://pypi.python.org/)
```
twine upload dist/*
```
## Citation
If you find scikit-dda useful, please cite it in your publications. You can use this [BibTeX](http://www.bibtex.org/) entry:
```
@misc{scikit-dda,
title={scikit-dda},
author={Diaz-Vico, David},
year={2019},
publisher={GitHub},
howpublished={\url{https://github.com/daviddiazvico/scikit-dda}}}
``` | scikit-dda | /scikit-dda-0.1.1.tar.gz/scikit-dda-0.1.1/README.md | README.md | pip install scikit-dda
python setup.py sdist bdist_wheel
twine upload --repository-url https://test.pypi.org/legacy/ dist/*
twine upload dist/*
@misc{scikit-dda,
title={scikit-dda},
author={Diaz-Vico, David},
year={2019},
publisher={GitHub},
howpublished={\url{https://github.com/daviddiazvico/scikit-dda}}} | 0.574395 | 0.98163 |
_ __ _ __ __ _ __
_____ _____ (_)/ /__ (_)/ /_ ____/ /___ _____ (_)____/ /___
/ ___// ___// // //_// // __/______ / __ // _ \ / ___// // __ // _ \
(__ )/ /__ / // ,< / // /_ /_____// /_/ // __// /__ / // /_/ // __/
/____/ \___//_//_/|_|/_/ \__/ \__,_/ \___/ \___//_/ \__,_/ \___/
<br>
<p align="center">
<a href="https://github.com/airbus/scikit-decide/actions/workflows/build.yml?query=branch%3Amaster">
<img src="https://img.shields.io/github/actions/workflow/status/airbus/scikit-decide/build.yml?branch=master&logo=github&label=CI%20status" alt="actions status">
</a>
<a href="https://github.com/airbus/scikit-decide/tags">
<img src="https://img.shields.io/github/tag/airbus/scikit-decide.svg?label=current%20version" alt="version">
</a>
<a href="https://github.com/airbus/scikit-decide/stargazers">
<img src="https://img.shields.io/github/stars/airbus/scikit-decide.svg" alt="stars">
</a>
<a href="https://github.com/airbus/scikit-decide/network">
<img src="https://img.shields.io/github/forks/airbus/scikit-decide.svg" alt="forks">
</a>
</p>
<br>
# Scikit-decide for Python
Scikit-decide is an AI framework for Reinforcement Learning, Automated Planning and Scheduling.
## Installation
Quick version:
```shell
pip install scikit-decide[all]
```
For more details, see the [online documentation](https://airbus.github.io/scikit-decide/install).
## Documentation
The latest documentation is available [online](https://airbus.github.io/scikit-decide).
## Examples
Some educational notebooks are available in `notebooks/` folder.
Links to launch them online with [binder](https://mybinder.org/) are provided in the
[Notebooks section](https://airbus.github.io/scikit-decide/notebooks) of the online documentation.
More examples can be found as Python scripts in the `examples/` folder, showing how to import or define a domain,
and how to run or solve it. Most of the examples rely on scikit-decide Hub, an extensible catalog of domains/solvers.
## Contributing
See more about how to contribute in the [online documentation](https://airbus.github.io/scikit-decide/contribute).
| scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/README.md | README.md | pip install scikit-decide[all] | 0.591015 | 0.282923 |
"""This module contains base classes for quickly building solvers."""
from __future__ import annotations
from typing import Callable, List
from skdecide.builders.solver.policy import DeterministicPolicies
from skdecide.core import D, autocast_all, autocastable
from skdecide.domains import Domain
__all__ = ["Solver", "DeterministicPolicySolver"]
# MAIN BASE CLASS
class Solver:
"""This is the highest level solver class (inheriting top-level class for each mandatory solver characteristic).
This helper class can be used as the main base class for solvers.
Typical use:
```python
class MySolver(Solver, ...)
```
with "..." replaced when needed by a number of classes from following domain characteristics (the ones in
parentheses are optional):
- **(assessability)**: Utilities -> QValues
- **(policy)**: Policies -> UncertainPolicies -> DeterministicPolicies
- **(restorability)**: Restorable
"""
T_domain = Domain
@classmethod
def get_domain_requirements(cls) -> List[type]:
"""Get domain requirements for this solver class to be applicable.
Domain requirements are classes from the #skdecide.builders.domain package that the domain needs to inherit from.
# Returns
A list of classes to inherit from.
"""
return cls._get_domain_requirements()
@classmethod
def _get_domain_requirements(cls) -> List[type]:
"""Get domain requirements for this solver class to be applicable.
Domain requirements are classes from the #skdecide.builders.domain package that the domain needs to inherit from.
# Returns
A list of classes to inherit from.
"""
def is_domain_builder(
cls,
): # detected by having only single-'base class' ancestors until root
remove_ancestors = []
while True:
bases = cls.__bases__
if len(bases) == 0:
return True, remove_ancestors
elif len(bases) == 1:
cls = bases[0]
remove_ancestors.append(cls)
else:
return False, []
i = 0
sorted_ancestors = list(cls.T_domain.__mro__[:-1])
while i < len(sorted_ancestors):
ancestor = sorted_ancestors[i]
is_builder, remove_ancestors = is_domain_builder(ancestor)
if is_builder:
sorted_ancestors = [
a for a in sorted_ancestors if a not in remove_ancestors
]
i += 1
else:
sorted_ancestors.remove(ancestor)
return sorted_ancestors
@classmethod
def check_domain(cls, domain: Domain) -> bool:
"""Check whether a domain is compliant with this solver type.
By default, #Solver.check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define
specific checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all
domain requirements are met.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the solver type (False otherwise).
"""
return cls._check_domain(domain)
@classmethod
def _check_domain(cls, domain: Domain) -> bool:
"""Check whether a domain is compliant with this solver type.
By default, #Solver._check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define specific
checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all domain
requirements are met.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the solver type (False otherwise).
"""
check_requirements = all(
isinstance(domain, req) for req in cls._get_domain_requirements()
)
return check_requirements and cls._check_domain_additional(domain)
@classmethod
def _check_domain_additional(cls, domain: D) -> bool:
"""Check whether the given domain is compliant with the specific requirements of this solver type (i.e. the
ones in addition to "domain requirements").
This is a helper function called by default from #Solver._check_domain(). It focuses on specific checks, as
opposed to taking also into account the domain requirements for the latter.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the specific requirements of this solver type (False otherwise).
"""
return True
def reset(self) -> None:
"""Reset whatever is needed on this solver before running a new episode.
This function does nothing by default but can be overridden if needed (e.g. to reset the hidden state of a LSTM
policy network, which carries information about past observations seen in the previous episode).
"""
return self._reset()
def _reset(self) -> None:
"""Reset whatever is needed on this solver before running a new episode.
This function does nothing by default but can be overridden if needed (e.g. to reset the hidden state of a LSTM
policy network, which carries information about past observations seen in the previous episode).
"""
pass
def solve(self, domain_factory: Callable[[], Domain]) -> None:
"""Run the solving process.
By default, #Solver.solve() provides some boilerplate code and internally calls #Solver._solve(). The
boilerplate code transforms the domain factory to auto-cast the new domains to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (can be just a domain class).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
return self._solve(domain_factory)
def _solve(self, domain_factory: Callable[[], Domain]) -> None:
"""Run the solving process.
By default, #Solver._solve() provides some boilerplate code and internally calls #Solver._solve_domain(). The
boilerplate code transforms the domain factory to auto-cast the new domains to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (can be just a domain class).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
def cast_domain_factory():
domain = domain_factory()
autocast_all(domain, domain, self.T_domain)
return domain
return self._solve_domain(cast_domain_factory)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
"""Run the solving process.
This is a helper function called by default from #Solver._solve(), the difference being that the domain factory
here returns domains auto-cast to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (auto-cast to expected level).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
raise NotImplementedError
@autocastable
def solve_from(self, memory: D.T_memory[D.T_state]) -> None:
"""Run the solving process from a given state.
!!! tip
Create the domain first by calling the @Solver.reset() method
# Parameters
memory: The source memory (state or history) of the transition.
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
return self._solve_from(memory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
"""Run the solving process from a given state.
!!! tip
Create the domain first by calling the @Solver.reset() method
# Parameters
memory: The source memory (state or history) of the transition.
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
pass
def _initialize(self):
"""Runs long-lasting initialization code here, or code to be executed at the
entering of a 'with' context statement.
"""
pass
def _cleanup(self):
"""Runs cleanup code here, or code to be executed at the exit of a
'with' context statement.
"""
pass
def __enter__(self):
"""Allow for calling the solver within a 'with' context statement.
Note that some solvers require such context statements to properly
clean their status before exiting the Python interpreter, thus it
is a good habit to always call solvers within a 'with' statement.
"""
self._initialize()
return self
def __exit__(self, type, value, tb):
"""Allow for calling the solver within a 'with' context statement.
Note that some solvers require such context statements to properly
clean their status before exiting the Python interpreter, thus it
is a good habit to always call solvers within a 'with' statement.
"""
self._cleanup()
# ALTERNATE BASE CLASSES (for typical combinations)
class DeterministicPolicySolver(Solver, DeterministicPolicies):
"""This is a typical deterministic policy solver class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Solver
- DeterministicPolicies
Typical use:
```python
class MySolver(DeterministicPolicySolver)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class MySolver(DeterministicPolicySolver, QValues)
```
"""
pass | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/solvers.py | solvers.py |
"""This module contains base classes for quickly building solvers."""
from __future__ import annotations
from typing import Callable, List
from skdecide.builders.solver.policy import DeterministicPolicies
from skdecide.core import D, autocast_all, autocastable
from skdecide.domains import Domain
__all__ = ["Solver", "DeterministicPolicySolver"]
# MAIN BASE CLASS
class Solver:
"""This is the highest level solver class (inheriting top-level class for each mandatory solver characteristic).
This helper class can be used as the main base class for solvers.
Typical use:
```python
class MySolver(Solver, ...)
```
with "..." replaced when needed by a number of classes from following domain characteristics (the ones in
parentheses are optional):
- **(assessability)**: Utilities -> QValues
- **(policy)**: Policies -> UncertainPolicies -> DeterministicPolicies
- **(restorability)**: Restorable
"""
T_domain = Domain
@classmethod
def get_domain_requirements(cls) -> List[type]:
"""Get domain requirements for this solver class to be applicable.
Domain requirements are classes from the #skdecide.builders.domain package that the domain needs to inherit from.
# Returns
A list of classes to inherit from.
"""
return cls._get_domain_requirements()
@classmethod
def _get_domain_requirements(cls) -> List[type]:
"""Get domain requirements for this solver class to be applicable.
Domain requirements are classes from the #skdecide.builders.domain package that the domain needs to inherit from.
# Returns
A list of classes to inherit from.
"""
def is_domain_builder(
cls,
): # detected by having only single-'base class' ancestors until root
remove_ancestors = []
while True:
bases = cls.__bases__
if len(bases) == 0:
return True, remove_ancestors
elif len(bases) == 1:
cls = bases[0]
remove_ancestors.append(cls)
else:
return False, []
i = 0
sorted_ancestors = list(cls.T_domain.__mro__[:-1])
while i < len(sorted_ancestors):
ancestor = sorted_ancestors[i]
is_builder, remove_ancestors = is_domain_builder(ancestor)
if is_builder:
sorted_ancestors = [
a for a in sorted_ancestors if a not in remove_ancestors
]
i += 1
else:
sorted_ancestors.remove(ancestor)
return sorted_ancestors
@classmethod
def check_domain(cls, domain: Domain) -> bool:
"""Check whether a domain is compliant with this solver type.
By default, #Solver.check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define
specific checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all
domain requirements are met.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the solver type (False otherwise).
"""
return cls._check_domain(domain)
@classmethod
def _check_domain(cls, domain: Domain) -> bool:
"""Check whether a domain is compliant with this solver type.
By default, #Solver._check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define specific
checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all domain
requirements are met.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the solver type (False otherwise).
"""
check_requirements = all(
isinstance(domain, req) for req in cls._get_domain_requirements()
)
return check_requirements and cls._check_domain_additional(domain)
@classmethod
def _check_domain_additional(cls, domain: D) -> bool:
"""Check whether the given domain is compliant with the specific requirements of this solver type (i.e. the
ones in addition to "domain requirements").
This is a helper function called by default from #Solver._check_domain(). It focuses on specific checks, as
opposed to taking also into account the domain requirements for the latter.
# Parameters
domain: The domain to check.
# Returns
True if the domain is compliant with the specific requirements of this solver type (False otherwise).
"""
return True
def reset(self) -> None:
"""Reset whatever is needed on this solver before running a new episode.
This function does nothing by default but can be overridden if needed (e.g. to reset the hidden state of a LSTM
policy network, which carries information about past observations seen in the previous episode).
"""
return self._reset()
def _reset(self) -> None:
"""Reset whatever is needed on this solver before running a new episode.
This function does nothing by default but can be overridden if needed (e.g. to reset the hidden state of a LSTM
policy network, which carries information about past observations seen in the previous episode).
"""
pass
def solve(self, domain_factory: Callable[[], Domain]) -> None:
"""Run the solving process.
By default, #Solver.solve() provides some boilerplate code and internally calls #Solver._solve(). The
boilerplate code transforms the domain factory to auto-cast the new domains to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (can be just a domain class).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
return self._solve(domain_factory)
def _solve(self, domain_factory: Callable[[], Domain]) -> None:
"""Run the solving process.
By default, #Solver._solve() provides some boilerplate code and internally calls #Solver._solve_domain(). The
boilerplate code transforms the domain factory to auto-cast the new domains to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (can be just a domain class).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
def cast_domain_factory():
domain = domain_factory()
autocast_all(domain, domain, self.T_domain)
return domain
return self._solve_domain(cast_domain_factory)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
"""Run the solving process.
This is a helper function called by default from #Solver._solve(), the difference being that the domain factory
here returns domains auto-cast to the level expected by the solver.
# Parameters
domain_factory: A callable with no argument returning the domain to solve (auto-cast to expected level).
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
raise NotImplementedError
@autocastable
def solve_from(self, memory: D.T_memory[D.T_state]) -> None:
"""Run the solving process from a given state.
!!! tip
Create the domain first by calling the @Solver.reset() method
# Parameters
memory: The source memory (state or history) of the transition.
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
return self._solve_from(memory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
"""Run the solving process from a given state.
!!! tip
Create the domain first by calling the @Solver.reset() method
# Parameters
memory: The source memory (state or history) of the transition.
!!! tip
The nature of the solutions produced here depends on other solver's characteristics like
#policy and #assessibility.
"""
pass
def _initialize(self):
"""Runs long-lasting initialization code here, or code to be executed at the
entering of a 'with' context statement.
"""
pass
def _cleanup(self):
"""Runs cleanup code here, or code to be executed at the exit of a
'with' context statement.
"""
pass
def __enter__(self):
"""Allow for calling the solver within a 'with' context statement.
Note that some solvers require such context statements to properly
clean their status before exiting the Python interpreter, thus it
is a good habit to always call solvers within a 'with' statement.
"""
self._initialize()
return self
def __exit__(self, type, value, tb):
"""Allow for calling the solver within a 'with' context statement.
Note that some solvers require such context statements to properly
clean their status before exiting the Python interpreter, thus it
is a good habit to always call solvers within a 'with' statement.
"""
self._cleanup()
# ALTERNATE BASE CLASSES (for typical combinations)
class DeterministicPolicySolver(Solver, DeterministicPolicies):
"""This is a typical deterministic policy solver class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Solver
- DeterministicPolicies
Typical use:
```python
class MySolver(DeterministicPolicySolver)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class MySolver(DeterministicPolicySolver, QValues)
```
"""
pass | 0.966851 | 0.709538 |
"""This module contains base classes for quickly building domains."""
from __future__ import annotations
import logging
import os
import tempfile
from typing import Callable, NewType, Optional
# Following import is required to make Enum objects serializable
# (useful when multiprocessing and pickling domains that use Enum classes)
import dill
from pathos.helpers import mp
from pynng import Push0
dill.settings["byref"] = True
from skdecide.builders.domain.agent import MultiAgent, SingleAgent
from skdecide.builders.domain.concurrency import Parallel, Sequential
from skdecide.builders.domain.dynamics import (
DeterministicTransitions,
EnumerableTransitions,
Environment,
Simulation,
)
from skdecide.builders.domain.events import Actions, Events
from skdecide.builders.domain.goals import Goals
from skdecide.builders.domain.initialization import (
DeterministicInitialized,
Initializable,
UncertainInitialized,
)
from skdecide.builders.domain.memory import History, Markovian
from skdecide.builders.domain.observability import (
FullyObservable,
PartiallyObservable,
TransformedObservable,
)
from skdecide.builders.domain.value import PositiveCosts, Rewards
from skdecide.core import autocast_all
if (
False
): # trick to avoid circular import & IDE error ("Unresolved reference 'Solver'")
from skdecide.solvers import Solver
__all__ = [
"Domain",
"RLDomain",
"MultiAgentRLDomain",
"StatelessSimulatorDomain",
"MDPDomain",
"POMDPDomain",
"GoalMDPDomain",
"GoalPOMDPDomain",
"DeterministicPlanningDomain",
]
logger = logging.getLogger("skdecide.domains")
logger.setLevel(logging.INFO)
if not len(logger.handlers):
ch = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s | %(name)s | %(levelname)s | %(message)s"
)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.propagate = False
# MAIN BASE CLASS
class Domain(
MultiAgent, Parallel, Environment, Events, History, PartiallyObservable, Rewards
):
"""This is the highest level domain class (inheriting top-level class for each mandatory domain characteristic).
This helper class can be used as the main base class for domains.
Typical use:
```python
class D(Domain, ...)
```
with "..." replaced when needed by a number of classes from following domain characteristics (the ones in
parentheses are optional):
- **agent**: MultiAgent -> SingleAgent
- **concurrency**: Parallel -> Sequential
- **(constraints)**: Constrained
- **dynamics**: Environment -> Simulation -> UncertainTransitions -> EnumerableTransitions
-> DeterministicTransitions
- **events**: Events -> Actions -> UnrestrictedActions
- **(goals)**: Goals
- **(initialization)**: Initializable -> UncertainInitialized -> DeterministicInitialized
- **memory**: History -> FiniteHistory -> Markovian -> Memoryless
- **observability**: PartiallyObservable -> TransformedObservable -> FullyObservable
- **(renderability)**: Renderable
- **value**: Rewards -> PositiveCosts
"""
T_state = NewType("T_state", object)
T_observation = NewType("T_observation", object)
T_event = NewType("T_event", object)
T_value = NewType("T_value", object)
T_predicate = NewType("T_predicate", object)
T_info = NewType("T_info", object)
@classmethod
def solve_with(
cls,
solver: Solver,
domain_factory: Optional[Callable[[], Domain]] = None,
load_path: Optional[str] = None,
) -> Solver:
"""Solve the domain with a new or loaded solver and return it auto-cast to the level of the domain.
By default, #Solver.check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define
specific checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all
domain requirements are met.
# Parameters
solver: The solver.
domain_factory: A callable with no argument returning the domain to solve (factory is the domain class if None).
load_path: The path to restore the solver state from (if None, the solving process will be launched instead).
# Returns
The new solver (auto-cast to the level of the domain).
"""
if domain_factory is None:
domain_factory = cls
if load_path is not None:
# TODO: avoid repeating this code somehow (identical in solver.solve(...))? Is factory necessary (vs cls)?
def cast_domain_factory():
domain = domain_factory()
autocast_all(domain, domain, solver.T_domain)
return domain
solver.load(load_path, cast_domain_factory)
else:
solver.solve(domain_factory)
autocast_all(solver, solver.T_domain, cls)
return solver
# ALTERNATE BASE CLASSES (for typical combinations)
class RLDomain(
Domain,
SingleAgent,
Sequential,
Environment,
Actions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical Reinforcement Learning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- Environment
- Actions
- Initializable
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(RLDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class MultiAgentRLDomain(
Domain,
MultiAgent,
Sequential,
Environment,
Actions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical multi-agent Reinforcement Learning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- MultiAgent
- Sequential
- Environment
- Actions
- Initializable
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(RLDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class StatelessSimulatorDomain(
Domain,
SingleAgent,
Sequential,
Simulation,
Actions,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical stateless simulator domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- Simulation
- Actions
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(StatelessSimulatorDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class MDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
):
"""This is a typical Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- DeterministicInitialized
- Markovian
- FullyObservable
- Rewards
Typical use:
```python
class D(MDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class POMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
UncertainInitialized,
Markovian,
PartiallyObservable,
Rewards,
):
"""This is a typical Partially Observable Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- UncertainInitialized
- Markovian
- PartiallyObservable
- Rewards
Typical use:
```python
class D(POMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class GoalMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
DeterministicInitialized,
Markovian,
FullyObservable,
PositiveCosts,
):
"""This is a typical Goal Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- Goals
- DeterministicInitialized
- Markovian
- FullyObservable
- PositiveCosts
Typical use:
```python
class D(GoalMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class GoalPOMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
UncertainInitialized,
Markovian,
PartiallyObservable,
PositiveCosts,
):
"""This is a typical Goal Partially Observable Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- Goals
- UncertainInitialized
- Markovian
- PartiallyObservable
- PositiveCosts
Typical use:
```python
class D(GoalPOMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class DeterministicPlanningDomain(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
DeterministicInitialized,
Markovian,
FullyObservable,
PositiveCosts,
):
"""This is a typical deterministic planning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- DeterministicTransitions
- Actions
- Goals
- DeterministicInitialized
- Markovian
- FullyObservable
- PositiveCosts
Typical use:
```python
class D(DeterministicPlanningDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class ParallelDomain:
"""Base class for creating and launching n domains in separate processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain i is chosen and its id is returned to the incoming request.
"""
def __init__(
self, domain_factory, lambdas=None, nb_domains=os.cpu_count(), ipc_notify=False
):
self._domain_factory = domain_factory
self._lambdas = lambdas
self._active_domains = mp.Array(
"b", [False for i in range(nb_domains)], lock=True
)
self._initializations = [
mp.Value("b", False, lock=True) for i in range(nb_domains)
]
self._conditions = [mp.Condition() for i in range(nb_domains)]
self._temp_connections = [None] * nb_domains
self._ipc_connections = [None] * nb_domains
self._processes = [None] * nb_domains
self._ipc_notify = ipc_notify
def open_ipc_connection(self, i):
self._temp_connections[i] = tempfile.NamedTemporaryFile(delete=True)
self._ipc_connections[i] = "ipc://" + self._temp_connections[i].name + ".ipc"
def close_ipc_connection(self, i):
self._temp_connections[i].close()
self._ipc_connections[i] = None
def _launch_processes(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def __enter__(self):
self._launch_processes()
return self
def __exit__(self, type, value, tb):
self.close()
def launch(self, i, name, *args):
raise NotImplementedError
def get_proc_connections(self): # process connections for use in python
raise NotImplementedError
def get_ipc_connections(self): # inter-process connections for use with C++
return self._ipc_connections
def get_parallel_capacity(self):
return self.nb_domains()
def nb_domains(self):
return len(self._processes)
def wake_up_domain(self, i=None):
if i is None:
while True:
for j, v in enumerate(self._active_domains):
if not v:
self._active_domains[j] = True
return j
else:
self._active_domains[i] = True
return i
def reset(self, i=None):
return self.launch(i, "reset")
def get_initial_state_distribution(self, i=None):
return self.launch(i, "get_initial_state_distribution")
def get_initial_state(self, i=None):
return self.launch(i, "get_initial_state")
def get_observation_space(self, i=None):
return self.launch(i, "get_observation_space")
def is_observation(self, observation, i=None):
return self.launch(i, "is_observation", observation)
def get_observation_distribution(self, state, action, i=None):
return self.launch(i, "get_observation_distribution", state, action)
def get_observation(self, state, action, i=None):
return self.launch(i, "get_observation", state, action)
def get_enabled_events(self, memory, i=None):
return self.launch(i, "get_enabled_events", memory)
def is_enabled_event(self, event, memory, i=None):
return self.launch(i, "is_enabled_event", event, memory)
def get_action_space(self, i=None):
return self.launch(i, "get_action_space")
def is_action(self, event, i=None):
return self.launch(i, "is_action", event)
def get_applicable_actions(self, memory, i=None):
return self.launch(i, "get_applicable_actions", memory)
def is_applicable_action(self, action, memory, i=None):
return self.launch(i, "is_applicable_action", action, memory)
def step(self, action, i=None):
return self.launch(i, "step", action)
def sample(self, memory, action, i=None):
return self.launch(i, "sample", memory, action)
def get_next_state_distribution(self, memory, action, i=None):
return self.launch(i, "get_next_state_distribution", memory, action)
def get_next_state(self, memory, action, i=None):
return self.launch(i, "get_next_state", memory, action)
def get_transition_value(self, memory, action, next_state, i=None):
return self.launch(i, "get_transition_value", memory, action, next_state)
def is_transition_value_dependent_on_next_state(self, i=None):
return self.launch(i, "is_transition_value_dependent_on_next_state")
def get_goals(self, i=None):
return self.launch(i, "get_goals")
def is_goal(self, observation, i=None):
return self.launch(i, "is_goal", observation)
def is_terminal(self, state, i=None):
return self.launch(i, "is_terminal", state)
def check_value(self, value, i=None):
return self.launch(i, "check_value", value)
def render(self, memory, i=None):
return self.launch(i, "render", memory)
# Call a lambda function (usually involves the original domain)
def call(self, i, lambda_id, *args):
return self.launch(i, lambda_id, *args)
# The original sequential domain may have methods we don't know
def __getattr__(self, name):
def method(*args, i=None):
return self.launch(i, name, *args)
return method
# Bypass __getattr_.method() when serializing the class.
# Required on Windows when spawning the main process.
def __getstate__(self):
d = self.__dict__.copy()
del d["_temp_connections"] # we cannot serialize a file
return d
# Bypass __getattr_.method() when serializing the class.
# Required on Windows when spawning the main process.
def __setstate__(self, state):
self.__dict__ = state
# TODO: reopen the temp connection from _ipc_connections
def _launch_domain_server_(
domain_factory, lambdas, i, job_results, conn, init, cond, ipc_conn, logger
):
domain = domain_factory()
if ipc_conn is not None:
pusher = Push0()
pusher.dial(ipc_conn)
with cond:
init.value = True
cond.notify_all() # inform the parent process that we are ready to process requests
while True:
job = conn.recv()
job_results[i] = None
if job is None:
if ipc_conn is not None:
pusher.close()
conn.close()
break
else:
try:
if isinstance(job[0], str): # job[0] is a domain class' method
r = getattr(domain, job[0])(*job[1])
else: # job[0] is a lambda function
r = lambdas[job[0]](domain, *job[1])
job_results[i] = r
if ipc_conn is not None:
pusher.send(b"0") # send success
conn.send("0") # send success
except Exception as e:
logger.error(rf"/!\ Unable to perform job {job[0]}: {e}")
if ipc_conn is not None:
pusher.send(str(e).encode(encoding="UTF-8")) # send error message
else:
conn.send(str(e)) # send failure (!= 0)
class PipeParallelDomain(ParallelDomain):
"""This class can be used to create and launch n domains in separate processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain i is chosen and its id is returned to the incoming request.
"""
def __init__(
self, domain_factory, lambdas=None, nb_domains=os.cpu_count(), ipc_notify=False
):
super().__init__(domain_factory, lambdas, nb_domains, ipc_notify)
self._manager = mp.Manager()
self._waiting_jobs = [None] * nb_domains
self._job_results = self._manager.list([None for i in range(nb_domains)])
logger.info(rf"Using {nb_domains} parallel piped domains")
def get_proc_connections(self):
return self._waiting_jobs
def launch(self, i, function, *args):
if not any(self._processes):
self._launch_processes()
try:
mi = self.wake_up_domain(i)
self._waiting_jobs[mi].send((function, args))
return mi
except Exception as e:
if isinstance(function, str):
logger.error(rf"/!\ Unable to launch job {function}: {e}")
else:
logger.error(rf"/!\ Unable to launch job lambdas[{function}]: {e}")
def get_result(self, i):
self._waiting_jobs[i].recv()
r = self._job_results[i]
self._job_results[i] = None
self._active_domains[i] = False
return r
def _launch_processes(self):
for i in range(len(self._job_results)):
self.open_ipc_connection(i)
pparent, pchild = mp.Pipe()
self._waiting_jobs[i] = pparent
self._processes[i] = mp.Process(
target=_launch_domain_server_,
args=[
self._domain_factory,
self._lambdas,
i,
self._job_results,
pchild,
self._initializations[i],
self._conditions[i],
self._ipc_connections[i] if self._ipc_notify else None,
logger,
],
)
self._processes[i].start()
# Waits for all jobs to be launched and waiting each for requests
for i in range(len(self._job_results)):
with self._conditions[i]:
self._conditions[i].wait_for(
lambda: bool(self._initializations[i].value) == True
)
def close(self):
for i in range(len(self._job_results)):
self._initializations[i].value = False
self._waiting_jobs[i].send(None)
self._processes[i].join()
self._processes[i].close()
self._waiting_jobs[i].close()
self._processes[i] = None
self.close_ipc_connection(i)
def _shm_launch_domain_server_(
domain_factory,
lambdas,
i,
shm_proxy,
shm_registers,
shm_types,
shm_sizes,
rsize,
shm_arrays,
shm_lambdas,
shm_names,
shm_params,
init,
activation,
done,
cond,
ipc_conn,
logger,
):
domain = domain_factory()
if ipc_conn is not None:
pusher = Push0()
pusher.dial(ipc_conn)
with cond:
init.value = True
cond.notify_all() # inform the parent process that we are ready to process requests
def get_string(s):
for i, c in enumerate(s):
if c == b"\x00":
return s[:i].decode()
return s.decode()
while True:
with cond:
cond.wait_for(lambda: bool(activation.value) == True)
activation.value = False
if int(shm_lambdas[i].value) == -1 and shm_names[i][0] == b"\x00":
if ipc_conn is not None:
pusher.close()
break
else:
try:
job_args = []
for p in shm_params[i]:
if p >= 0:
sz = shm_sizes[shm_types[p].__name__]
if sz > 1:
si = (i * rsize) + p
job_args.append(
shm_proxy.decode(
shm_types[p], shm_arrays[si : (si + sz)]
)
)
else:
job_args.append(
shm_proxy.decode(
shm_types[p], shm_arrays[(i * rsize) + p]
)
)
else:
break # no more args
if (
int(shm_lambdas[i].value) == -1
): # we are working with a domain class' method
result = getattr(domain, get_string(shm_names[i]))(*job_args)
else: # we are working with a lambda function
result = lambdas[int(shm_lambdas[i].value)](domain, *job_args)
shm_params[i][:] = [-1] * len(shm_params[i])
if type(result) is not tuple:
result = (result,)
if result[0] is not None:
type_counters = {}
for j, r in enumerate(result):
res_name = type(r).__name__
(start, end) = shm_registers[res_name]
if res_name in type_counters:
type_counters[res_name] += 1
k = type_counters[res_name]
if k >= end:
raise IndexError(
"""No more available register for type {}.
Please increase the number of registers
for that type.""".format(
res_name
)
)
else:
type_counters[res_name] = start
k = start
shm_params[i][j] = k
sz = shm_sizes[res_name]
if sz > 1:
si = (i * rsize) + k
shm_proxy.encode(r, shm_arrays[si : (si + sz)])
else:
shm_proxy.encode(r, shm_arrays[(i * rsize) + k])
if ipc_conn is not None:
pusher.send(b"0") # send success
except Exception as e:
if int(shm_lambdas[i].value) == -1:
logger.error(
rf"/!\ Unable to perform job {get_string(shm_names[i])}: {e}"
)
else:
logger.error(
rf"/!\ Unable to perform job {int(shm_lambdas[i].value)}: {e}"
)
if ipc_conn is not None:
pusher.send(str(e).encode(encoding="UTF-8")) # send error message
with cond:
done.value = True
cond.notify_all() # send finished status (no success nor failure information)
class ShmParallelDomain(ParallelDomain):
"""This class can be used to create and launch n domains in separate processes
with shared memory between the Python processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain is chosen and its id is returned to the incoming request.
"""
def __init__(
self,
domain_factory,
shm_proxy,
lambdas=None,
nb_domains=os.cpu_count(),
ipc_notify=False,
):
super().__init__(domain_factory, lambdas, nb_domains, ipc_notify)
self._activations = [mp.Value("b", False, lock=True) for i in range(nb_domains)]
self._dones = [mp.Value("b", False, lock=True) for i in range(nb_domains)]
self._shm_proxy = shm_proxy
self._shm_registers = (
{}
) # Maps from registered method parameter types to vectorized array ranges
self._shm_types = {} # Maps from register index to type
self._shm_sizes = (
{}
) # Maps from register method parameter types to number of arrays encoding each type
self._shm_arrays = [] # Methods' vectorized parameters
self._rsize = 0 # Total size of the register (updated below)
self._shm_lambdas = [None] * nb_domains # Vectorized lambdas' ids
self._shm_names = [None] * nb_domains # Vectorized methods' names
self._shm_params = [
None
] * nb_domains # Indices of methods' vectorized parameters
for i in range(nb_domains):
j = 0
for r in shm_proxy.register():
for k in range(r[1]):
m = shm_proxy.initialize(r[0])
if type(m) == list or type(m) == tuple:
if (
i == 0 and k == 0
): # do it once for all the domains and redundant initializers
self._shm_sizes[r[0].__name__] = len(m)
self._shm_registers[r[0].__name__] = (
j,
j + (r[1] * len(m)),
)
self._shm_types.update(
{
kk: r[0]
for kk in range(j, j + (r[1] * len(m)), len(m))
}
)
self._rsize += r[1] * len(m)
self._shm_arrays.extend(m)
j += len(m)
else:
if (
i == 0 and k == 0
): # do it once for all the domains and redundant initializers
self._shm_sizes[r[0].__name__] = 1
self._shm_registers[r[0].__name__] = (j, j + r[1])
self._shm_types.update(
{kk: r[0] for kk in range(j, j + r[1])}
)
self._rsize += r[1]
self._shm_arrays.append(m)
j += 1
self._shm_lambdas[i] = mp.Value("i", -1, lock=True)
self._shm_names[i] = mp.Array("c", bytearray(100))
self._shm_params[i] = mp.Array(
"i", [-1] * sum(r[1] for r in shm_proxy.register())
)
logger.info(rf"Using {nb_domains} parallel shared memory domains")
def get_proc_connections(self):
return (self._activations, self._conditions)
def launch(self, i, function, *args):
if not any(self._processes):
self._launch_processes()
try:
mi = self.wake_up_domain(i)
if isinstance(function, str): # function is a domain class' method
self._shm_lambdas[mi].value = -1
self._shm_names[mi][:] = bytearray(
function, encoding="utf-8"
) + bytearray(len(self._shm_names[mi]) - len(function))
else: # function is a lambda id
self._shm_lambdas[mi].value = int(function)
self._shm_names[mi][:] = bytearray(
len(self._shm_names[mi])
) # reset with null bytes
self._shm_params[mi][:] = [-1] * len(self._shm_params[mi])
type_counters = {}
for j, a in enumerate(args):
arg_name = type(a).__name__
(start, end) = self._shm_registers[arg_name]
if arg_name in type_counters:
type_counters[arg_name] += self._shm_sizes[arg_name]
k = type_counters[arg_name]
if k >= end:
raise IndexError(
"""No more available register for type {}.
Please increase the number of registers
for that type.""".format(
arg_name
)
)
else:
type_counters[arg_name] = start
k = start
self._shm_params[mi][j] = k
sz = self._shm_sizes[arg_name]
if sz > 1:
si = (mi * self._rsize) + k
self._shm_proxy.encode(a, self._shm_arrays[si : (si + sz)])
else:
self._shm_proxy.encode(a, self._shm_arrays[(mi * self._rsize) + k])
with self._conditions[mi]:
self._activations[mi].value = True
self._conditions[mi].notify_all()
return mi
except Exception as e:
if isinstance(function, str):
logger.error(rf"/!\ Unable to launch job {function}: {e}")
else:
logger.error(rf"/!\ Unable to launch job lambdas[{function}]: {e}")
def get_result(self, i):
with self._conditions[i]:
self._conditions[i].wait_for(lambda: bool(self._dones[i].value) == True)
self._dones[i].value = False
results = []
for r in self._shm_params[i]:
if r >= 0:
sz = self._shm_sizes[self._shm_types[r].__name__]
if sz > 1:
si = (i * self._rsize) + r
results.append(
self._shm_proxy.decode(
self._shm_types[r], self._shm_arrays[si : (si + sz)]
)
)
else:
results.append(
self._shm_proxy.decode(
self._shm_types[r], self._shm_arrays[(i * self._rsize) + r]
)
)
else:
break # no more params
self._active_domains[i] = False
return results if len(results) > 1 else results[0] if len(results) > 0 else None
def _launch_processes(self):
for i in range(len(self._processes)):
self.open_ipc_connection(i)
self._processes[i] = mp.Process(
target=_shm_launch_domain_server_,
args=[
self._domain_factory,
self._lambdas,
i,
self._shm_proxy.copy(),
dict(self._shm_registers),
dict(self._shm_types),
dict(self._shm_sizes),
self._rsize,
list(self._shm_arrays),
list(self._shm_lambdas),
list(self._shm_names),
list(self._shm_params),
self._initializations[i],
self._activations[i],
self._dones[i],
self._conditions[i],
self._ipc_connections[i] if self._ipc_notify else None,
logger,
],
)
self._processes[i].start()
# Waits for all jobs to be launched and waiting each for requests
for i in range(len(self._processes)):
with self._conditions[i]:
self._conditions[i].wait_for(
lambda: bool(self._initializations[i].value) == True
)
def close(self):
for i in range(len(self._processes)):
self._initializations[i].value = False
self._shm_lambdas[i].value = -1
self._shm_names[i][:] = bytearray(
len(self._shm_names[i])
) # reset with null bytes
self._shm_params[i][:] = [-1] * len(self._shm_params[i])
with self._conditions[i]:
self._activations[i].value = True
self._conditions[i].notify_all()
self._processes[i].join()
self._processes[i].close()
self._processes[i] = None
self.close_ipc_connection(i) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/domains.py | domains.py |
"""This module contains base classes for quickly building domains."""
from __future__ import annotations
import logging
import os
import tempfile
from typing import Callable, NewType, Optional
# Following import is required to make Enum objects serializable
# (useful when multiprocessing and pickling domains that use Enum classes)
import dill
from pathos.helpers import mp
from pynng import Push0
dill.settings["byref"] = True
from skdecide.builders.domain.agent import MultiAgent, SingleAgent
from skdecide.builders.domain.concurrency import Parallel, Sequential
from skdecide.builders.domain.dynamics import (
DeterministicTransitions,
EnumerableTransitions,
Environment,
Simulation,
)
from skdecide.builders.domain.events import Actions, Events
from skdecide.builders.domain.goals import Goals
from skdecide.builders.domain.initialization import (
DeterministicInitialized,
Initializable,
UncertainInitialized,
)
from skdecide.builders.domain.memory import History, Markovian
from skdecide.builders.domain.observability import (
FullyObservable,
PartiallyObservable,
TransformedObservable,
)
from skdecide.builders.domain.value import PositiveCosts, Rewards
from skdecide.core import autocast_all
if (
False
): # trick to avoid circular import & IDE error ("Unresolved reference 'Solver'")
from skdecide.solvers import Solver
__all__ = [
"Domain",
"RLDomain",
"MultiAgentRLDomain",
"StatelessSimulatorDomain",
"MDPDomain",
"POMDPDomain",
"GoalMDPDomain",
"GoalPOMDPDomain",
"DeterministicPlanningDomain",
]
logger = logging.getLogger("skdecide.domains")
logger.setLevel(logging.INFO)
if not len(logger.handlers):
ch = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(
"%(asctime)s | %(name)s | %(levelname)s | %(message)s"
)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
logger.propagate = False
# MAIN BASE CLASS
class Domain(
MultiAgent, Parallel, Environment, Events, History, PartiallyObservable, Rewards
):
"""This is the highest level domain class (inheriting top-level class for each mandatory domain characteristic).
This helper class can be used as the main base class for domains.
Typical use:
```python
class D(Domain, ...)
```
with "..." replaced when needed by a number of classes from following domain characteristics (the ones in
parentheses are optional):
- **agent**: MultiAgent -> SingleAgent
- **concurrency**: Parallel -> Sequential
- **(constraints)**: Constrained
- **dynamics**: Environment -> Simulation -> UncertainTransitions -> EnumerableTransitions
-> DeterministicTransitions
- **events**: Events -> Actions -> UnrestrictedActions
- **(goals)**: Goals
- **(initialization)**: Initializable -> UncertainInitialized -> DeterministicInitialized
- **memory**: History -> FiniteHistory -> Markovian -> Memoryless
- **observability**: PartiallyObservable -> TransformedObservable -> FullyObservable
- **(renderability)**: Renderable
- **value**: Rewards -> PositiveCosts
"""
T_state = NewType("T_state", object)
T_observation = NewType("T_observation", object)
T_event = NewType("T_event", object)
T_value = NewType("T_value", object)
T_predicate = NewType("T_predicate", object)
T_info = NewType("T_info", object)
@classmethod
def solve_with(
cls,
solver: Solver,
domain_factory: Optional[Callable[[], Domain]] = None,
load_path: Optional[str] = None,
) -> Solver:
"""Solve the domain with a new or loaded solver and return it auto-cast to the level of the domain.
By default, #Solver.check_domain() provides some boilerplate code and internally
calls #Solver._check_domain_additional() (which returns True by default but can be overridden to define
specific checks in addition to the "domain requirements"). The boilerplate code automatically checks whether all
domain requirements are met.
# Parameters
solver: The solver.
domain_factory: A callable with no argument returning the domain to solve (factory is the domain class if None).
load_path: The path to restore the solver state from (if None, the solving process will be launched instead).
# Returns
The new solver (auto-cast to the level of the domain).
"""
if domain_factory is None:
domain_factory = cls
if load_path is not None:
# TODO: avoid repeating this code somehow (identical in solver.solve(...))? Is factory necessary (vs cls)?
def cast_domain_factory():
domain = domain_factory()
autocast_all(domain, domain, solver.T_domain)
return domain
solver.load(load_path, cast_domain_factory)
else:
solver.solve(domain_factory)
autocast_all(solver, solver.T_domain, cls)
return solver
# ALTERNATE BASE CLASSES (for typical combinations)
class RLDomain(
Domain,
SingleAgent,
Sequential,
Environment,
Actions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical Reinforcement Learning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- Environment
- Actions
- Initializable
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(RLDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class MultiAgentRLDomain(
Domain,
MultiAgent,
Sequential,
Environment,
Actions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical multi-agent Reinforcement Learning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- MultiAgent
- Sequential
- Environment
- Actions
- Initializable
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(RLDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class StatelessSimulatorDomain(
Domain,
SingleAgent,
Sequential,
Simulation,
Actions,
Markovian,
TransformedObservable,
Rewards,
):
"""This is a typical stateless simulator domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- Simulation
- Actions
- Markovian
- TransformedObservable
- Rewards
Typical use:
```python
class D(StatelessSimulatorDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class MDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
):
"""This is a typical Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- DeterministicInitialized
- Markovian
- FullyObservable
- Rewards
Typical use:
```python
class D(MDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class POMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
UncertainInitialized,
Markovian,
PartiallyObservable,
Rewards,
):
"""This is a typical Partially Observable Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- UncertainInitialized
- Markovian
- PartiallyObservable
- Rewards
Typical use:
```python
class D(POMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class GoalMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
DeterministicInitialized,
Markovian,
FullyObservable,
PositiveCosts,
):
"""This is a typical Goal Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- Goals
- DeterministicInitialized
- Markovian
- FullyObservable
- PositiveCosts
Typical use:
```python
class D(GoalMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class GoalPOMDPDomain(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
UncertainInitialized,
Markovian,
PartiallyObservable,
PositiveCosts,
):
"""This is a typical Goal Partially Observable Markov Decision Process domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- EnumerableTransitions
- Actions
- Goals
- UncertainInitialized
- Markovian
- PartiallyObservable
- PositiveCosts
Typical use:
```python
class D(GoalPOMDPDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class DeterministicPlanningDomain(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
DeterministicInitialized,
Markovian,
FullyObservable,
PositiveCosts,
):
"""This is a typical deterministic planning domain class.
This helper class can be used as an alternate base class for domains, inheriting the following:
- Domain
- SingleAgent
- Sequential
- DeterministicTransitions
- Actions
- Goals
- DeterministicInitialized
- Markovian
- FullyObservable
- PositiveCosts
Typical use:
```python
class D(DeterministicPlanningDomain)
```
!!! tip
It is also possible to refine any alternate base class, like for instance:
```python
class D(RLDomain, FullyObservable)
```
"""
pass
class ParallelDomain:
"""Base class for creating and launching n domains in separate processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain i is chosen and its id is returned to the incoming request.
"""
def __init__(
self, domain_factory, lambdas=None, nb_domains=os.cpu_count(), ipc_notify=False
):
self._domain_factory = domain_factory
self._lambdas = lambdas
self._active_domains = mp.Array(
"b", [False for i in range(nb_domains)], lock=True
)
self._initializations = [
mp.Value("b", False, lock=True) for i in range(nb_domains)
]
self._conditions = [mp.Condition() for i in range(nb_domains)]
self._temp_connections = [None] * nb_domains
self._ipc_connections = [None] * nb_domains
self._processes = [None] * nb_domains
self._ipc_notify = ipc_notify
def open_ipc_connection(self, i):
self._temp_connections[i] = tempfile.NamedTemporaryFile(delete=True)
self._ipc_connections[i] = "ipc://" + self._temp_connections[i].name + ".ipc"
def close_ipc_connection(self, i):
self._temp_connections[i].close()
self._ipc_connections[i] = None
def _launch_processes(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def __enter__(self):
self._launch_processes()
return self
def __exit__(self, type, value, tb):
self.close()
def launch(self, i, name, *args):
raise NotImplementedError
def get_proc_connections(self): # process connections for use in python
raise NotImplementedError
def get_ipc_connections(self): # inter-process connections for use with C++
return self._ipc_connections
def get_parallel_capacity(self):
return self.nb_domains()
def nb_domains(self):
return len(self._processes)
def wake_up_domain(self, i=None):
if i is None:
while True:
for j, v in enumerate(self._active_domains):
if not v:
self._active_domains[j] = True
return j
else:
self._active_domains[i] = True
return i
def reset(self, i=None):
return self.launch(i, "reset")
def get_initial_state_distribution(self, i=None):
return self.launch(i, "get_initial_state_distribution")
def get_initial_state(self, i=None):
return self.launch(i, "get_initial_state")
def get_observation_space(self, i=None):
return self.launch(i, "get_observation_space")
def is_observation(self, observation, i=None):
return self.launch(i, "is_observation", observation)
def get_observation_distribution(self, state, action, i=None):
return self.launch(i, "get_observation_distribution", state, action)
def get_observation(self, state, action, i=None):
return self.launch(i, "get_observation", state, action)
def get_enabled_events(self, memory, i=None):
return self.launch(i, "get_enabled_events", memory)
def is_enabled_event(self, event, memory, i=None):
return self.launch(i, "is_enabled_event", event, memory)
def get_action_space(self, i=None):
return self.launch(i, "get_action_space")
def is_action(self, event, i=None):
return self.launch(i, "is_action", event)
def get_applicable_actions(self, memory, i=None):
return self.launch(i, "get_applicable_actions", memory)
def is_applicable_action(self, action, memory, i=None):
return self.launch(i, "is_applicable_action", action, memory)
def step(self, action, i=None):
return self.launch(i, "step", action)
def sample(self, memory, action, i=None):
return self.launch(i, "sample", memory, action)
def get_next_state_distribution(self, memory, action, i=None):
return self.launch(i, "get_next_state_distribution", memory, action)
def get_next_state(self, memory, action, i=None):
return self.launch(i, "get_next_state", memory, action)
def get_transition_value(self, memory, action, next_state, i=None):
return self.launch(i, "get_transition_value", memory, action, next_state)
def is_transition_value_dependent_on_next_state(self, i=None):
return self.launch(i, "is_transition_value_dependent_on_next_state")
def get_goals(self, i=None):
return self.launch(i, "get_goals")
def is_goal(self, observation, i=None):
return self.launch(i, "is_goal", observation)
def is_terminal(self, state, i=None):
return self.launch(i, "is_terminal", state)
def check_value(self, value, i=None):
return self.launch(i, "check_value", value)
def render(self, memory, i=None):
return self.launch(i, "render", memory)
# Call a lambda function (usually involves the original domain)
def call(self, i, lambda_id, *args):
return self.launch(i, lambda_id, *args)
# The original sequential domain may have methods we don't know
def __getattr__(self, name):
def method(*args, i=None):
return self.launch(i, name, *args)
return method
# Bypass __getattr_.method() when serializing the class.
# Required on Windows when spawning the main process.
def __getstate__(self):
d = self.__dict__.copy()
del d["_temp_connections"] # we cannot serialize a file
return d
# Bypass __getattr_.method() when serializing the class.
# Required on Windows when spawning the main process.
def __setstate__(self, state):
self.__dict__ = state
# TODO: reopen the temp connection from _ipc_connections
def _launch_domain_server_(
domain_factory, lambdas, i, job_results, conn, init, cond, ipc_conn, logger
):
domain = domain_factory()
if ipc_conn is not None:
pusher = Push0()
pusher.dial(ipc_conn)
with cond:
init.value = True
cond.notify_all() # inform the parent process that we are ready to process requests
while True:
job = conn.recv()
job_results[i] = None
if job is None:
if ipc_conn is not None:
pusher.close()
conn.close()
break
else:
try:
if isinstance(job[0], str): # job[0] is a domain class' method
r = getattr(domain, job[0])(*job[1])
else: # job[0] is a lambda function
r = lambdas[job[0]](domain, *job[1])
job_results[i] = r
if ipc_conn is not None:
pusher.send(b"0") # send success
conn.send("0") # send success
except Exception as e:
logger.error(rf"/!\ Unable to perform job {job[0]}: {e}")
if ipc_conn is not None:
pusher.send(str(e).encode(encoding="UTF-8")) # send error message
else:
conn.send(str(e)) # send failure (!= 0)
class PipeParallelDomain(ParallelDomain):
"""This class can be used to create and launch n domains in separate processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain i is chosen and its id is returned to the incoming request.
"""
def __init__(
self, domain_factory, lambdas=None, nb_domains=os.cpu_count(), ipc_notify=False
):
super().__init__(domain_factory, lambdas, nb_domains, ipc_notify)
self._manager = mp.Manager()
self._waiting_jobs = [None] * nb_domains
self._job_results = self._manager.list([None for i in range(nb_domains)])
logger.info(rf"Using {nb_domains} parallel piped domains")
def get_proc_connections(self):
return self._waiting_jobs
def launch(self, i, function, *args):
if not any(self._processes):
self._launch_processes()
try:
mi = self.wake_up_domain(i)
self._waiting_jobs[mi].send((function, args))
return mi
except Exception as e:
if isinstance(function, str):
logger.error(rf"/!\ Unable to launch job {function}: {e}")
else:
logger.error(rf"/!\ Unable to launch job lambdas[{function}]: {e}")
def get_result(self, i):
self._waiting_jobs[i].recv()
r = self._job_results[i]
self._job_results[i] = None
self._active_domains[i] = False
return r
def _launch_processes(self):
for i in range(len(self._job_results)):
self.open_ipc_connection(i)
pparent, pchild = mp.Pipe()
self._waiting_jobs[i] = pparent
self._processes[i] = mp.Process(
target=_launch_domain_server_,
args=[
self._domain_factory,
self._lambdas,
i,
self._job_results,
pchild,
self._initializations[i],
self._conditions[i],
self._ipc_connections[i] if self._ipc_notify else None,
logger,
],
)
self._processes[i].start()
# Waits for all jobs to be launched and waiting each for requests
for i in range(len(self._job_results)):
with self._conditions[i]:
self._conditions[i].wait_for(
lambda: bool(self._initializations[i].value) == True
)
def close(self):
for i in range(len(self._job_results)):
self._initializations[i].value = False
self._waiting_jobs[i].send(None)
self._processes[i].join()
self._processes[i].close()
self._waiting_jobs[i].close()
self._processes[i] = None
self.close_ipc_connection(i)
def _shm_launch_domain_server_(
domain_factory,
lambdas,
i,
shm_proxy,
shm_registers,
shm_types,
shm_sizes,
rsize,
shm_arrays,
shm_lambdas,
shm_names,
shm_params,
init,
activation,
done,
cond,
ipc_conn,
logger,
):
domain = domain_factory()
if ipc_conn is not None:
pusher = Push0()
pusher.dial(ipc_conn)
with cond:
init.value = True
cond.notify_all() # inform the parent process that we are ready to process requests
def get_string(s):
for i, c in enumerate(s):
if c == b"\x00":
return s[:i].decode()
return s.decode()
while True:
with cond:
cond.wait_for(lambda: bool(activation.value) == True)
activation.value = False
if int(shm_lambdas[i].value) == -1 and shm_names[i][0] == b"\x00":
if ipc_conn is not None:
pusher.close()
break
else:
try:
job_args = []
for p in shm_params[i]:
if p >= 0:
sz = shm_sizes[shm_types[p].__name__]
if sz > 1:
si = (i * rsize) + p
job_args.append(
shm_proxy.decode(
shm_types[p], shm_arrays[si : (si + sz)]
)
)
else:
job_args.append(
shm_proxy.decode(
shm_types[p], shm_arrays[(i * rsize) + p]
)
)
else:
break # no more args
if (
int(shm_lambdas[i].value) == -1
): # we are working with a domain class' method
result = getattr(domain, get_string(shm_names[i]))(*job_args)
else: # we are working with a lambda function
result = lambdas[int(shm_lambdas[i].value)](domain, *job_args)
shm_params[i][:] = [-1] * len(shm_params[i])
if type(result) is not tuple:
result = (result,)
if result[0] is not None:
type_counters = {}
for j, r in enumerate(result):
res_name = type(r).__name__
(start, end) = shm_registers[res_name]
if res_name in type_counters:
type_counters[res_name] += 1
k = type_counters[res_name]
if k >= end:
raise IndexError(
"""No more available register for type {}.
Please increase the number of registers
for that type.""".format(
res_name
)
)
else:
type_counters[res_name] = start
k = start
shm_params[i][j] = k
sz = shm_sizes[res_name]
if sz > 1:
si = (i * rsize) + k
shm_proxy.encode(r, shm_arrays[si : (si + sz)])
else:
shm_proxy.encode(r, shm_arrays[(i * rsize) + k])
if ipc_conn is not None:
pusher.send(b"0") # send success
except Exception as e:
if int(shm_lambdas[i].value) == -1:
logger.error(
rf"/!\ Unable to perform job {get_string(shm_names[i])}: {e}"
)
else:
logger.error(
rf"/!\ Unable to perform job {int(shm_lambdas[i].value)}: {e}"
)
if ipc_conn is not None:
pusher.send(str(e).encode(encoding="UTF-8")) # send error message
with cond:
done.value = True
cond.notify_all() # send finished status (no success nor failure information)
class ShmParallelDomain(ParallelDomain):
"""This class can be used to create and launch n domains in separate processes
with shared memory between the Python processes.
Each domain listens for incoming domain requests.
Each request can indicate which domain should serve it, otherwise the first available
domain is chosen and its id is returned to the incoming request.
"""
def __init__(
self,
domain_factory,
shm_proxy,
lambdas=None,
nb_domains=os.cpu_count(),
ipc_notify=False,
):
super().__init__(domain_factory, lambdas, nb_domains, ipc_notify)
self._activations = [mp.Value("b", False, lock=True) for i in range(nb_domains)]
self._dones = [mp.Value("b", False, lock=True) for i in range(nb_domains)]
self._shm_proxy = shm_proxy
self._shm_registers = (
{}
) # Maps from registered method parameter types to vectorized array ranges
self._shm_types = {} # Maps from register index to type
self._shm_sizes = (
{}
) # Maps from register method parameter types to number of arrays encoding each type
self._shm_arrays = [] # Methods' vectorized parameters
self._rsize = 0 # Total size of the register (updated below)
self._shm_lambdas = [None] * nb_domains # Vectorized lambdas' ids
self._shm_names = [None] * nb_domains # Vectorized methods' names
self._shm_params = [
None
] * nb_domains # Indices of methods' vectorized parameters
for i in range(nb_domains):
j = 0
for r in shm_proxy.register():
for k in range(r[1]):
m = shm_proxy.initialize(r[0])
if type(m) == list or type(m) == tuple:
if (
i == 0 and k == 0
): # do it once for all the domains and redundant initializers
self._shm_sizes[r[0].__name__] = len(m)
self._shm_registers[r[0].__name__] = (
j,
j + (r[1] * len(m)),
)
self._shm_types.update(
{
kk: r[0]
for kk in range(j, j + (r[1] * len(m)), len(m))
}
)
self._rsize += r[1] * len(m)
self._shm_arrays.extend(m)
j += len(m)
else:
if (
i == 0 and k == 0
): # do it once for all the domains and redundant initializers
self._shm_sizes[r[0].__name__] = 1
self._shm_registers[r[0].__name__] = (j, j + r[1])
self._shm_types.update(
{kk: r[0] for kk in range(j, j + r[1])}
)
self._rsize += r[1]
self._shm_arrays.append(m)
j += 1
self._shm_lambdas[i] = mp.Value("i", -1, lock=True)
self._shm_names[i] = mp.Array("c", bytearray(100))
self._shm_params[i] = mp.Array(
"i", [-1] * sum(r[1] for r in shm_proxy.register())
)
logger.info(rf"Using {nb_domains} parallel shared memory domains")
def get_proc_connections(self):
return (self._activations, self._conditions)
def launch(self, i, function, *args):
if not any(self._processes):
self._launch_processes()
try:
mi = self.wake_up_domain(i)
if isinstance(function, str): # function is a domain class' method
self._shm_lambdas[mi].value = -1
self._shm_names[mi][:] = bytearray(
function, encoding="utf-8"
) + bytearray(len(self._shm_names[mi]) - len(function))
else: # function is a lambda id
self._shm_lambdas[mi].value = int(function)
self._shm_names[mi][:] = bytearray(
len(self._shm_names[mi])
) # reset with null bytes
self._shm_params[mi][:] = [-1] * len(self._shm_params[mi])
type_counters = {}
for j, a in enumerate(args):
arg_name = type(a).__name__
(start, end) = self._shm_registers[arg_name]
if arg_name in type_counters:
type_counters[arg_name] += self._shm_sizes[arg_name]
k = type_counters[arg_name]
if k >= end:
raise IndexError(
"""No more available register for type {}.
Please increase the number of registers
for that type.""".format(
arg_name
)
)
else:
type_counters[arg_name] = start
k = start
self._shm_params[mi][j] = k
sz = self._shm_sizes[arg_name]
if sz > 1:
si = (mi * self._rsize) + k
self._shm_proxy.encode(a, self._shm_arrays[si : (si + sz)])
else:
self._shm_proxy.encode(a, self._shm_arrays[(mi * self._rsize) + k])
with self._conditions[mi]:
self._activations[mi].value = True
self._conditions[mi].notify_all()
return mi
except Exception as e:
if isinstance(function, str):
logger.error(rf"/!\ Unable to launch job {function}: {e}")
else:
logger.error(rf"/!\ Unable to launch job lambdas[{function}]: {e}")
def get_result(self, i):
with self._conditions[i]:
self._conditions[i].wait_for(lambda: bool(self._dones[i].value) == True)
self._dones[i].value = False
results = []
for r in self._shm_params[i]:
if r >= 0:
sz = self._shm_sizes[self._shm_types[r].__name__]
if sz > 1:
si = (i * self._rsize) + r
results.append(
self._shm_proxy.decode(
self._shm_types[r], self._shm_arrays[si : (si + sz)]
)
)
else:
results.append(
self._shm_proxy.decode(
self._shm_types[r], self._shm_arrays[(i * self._rsize) + r]
)
)
else:
break # no more params
self._active_domains[i] = False
return results if len(results) > 1 else results[0] if len(results) > 0 else None
def _launch_processes(self):
for i in range(len(self._processes)):
self.open_ipc_connection(i)
self._processes[i] = mp.Process(
target=_shm_launch_domain_server_,
args=[
self._domain_factory,
self._lambdas,
i,
self._shm_proxy.copy(),
dict(self._shm_registers),
dict(self._shm_types),
dict(self._shm_sizes),
self._rsize,
list(self._shm_arrays),
list(self._shm_lambdas),
list(self._shm_names),
list(self._shm_params),
self._initializations[i],
self._activations[i],
self._dones[i],
self._conditions[i],
self._ipc_connections[i] if self._ipc_notify else None,
logger,
],
)
self._processes[i].start()
# Waits for all jobs to be launched and waiting each for requests
for i in range(len(self._processes)):
with self._conditions[i]:
self._conditions[i].wait_for(
lambda: bool(self._initializations[i].value) == True
)
def close(self):
for i in range(len(self._processes)):
self._initializations[i].value = False
self._shm_lambdas[i].value = -1
self._shm_names[i][:] = bytearray(
len(self._shm_names[i])
) # reset with null bytes
self._shm_params[i][:] = [-1] * len(self._shm_params[i])
with self._conditions[i]:
self._activations[i].value = True
self._conditions[i].notify_all()
self._processes[i].join()
self._processes[i].close()
self._processes[i] = None
self.close_ipc_connection(i) | 0.904049 | 0.572932 |
from __future__ import annotations
from skdecide.core import D, Value, autocastable
__all__ = ["Rewards", "PositiveCosts"]
class Rewards:
"""A domain must inherit this class if it sends rewards (positive and/or negative)."""
@autocastable
def check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its reward specification.
!!! tip
This function returns always True by default because any kind of reward should be accepted at this level.
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return self._check_value(value)
def _check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its reward specification.
!!! tip
This function returns always True by default because any kind of reward should be accepted at this level.
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return True
class PositiveCosts(Rewards):
"""A domain must inherit this class if it sends only positive costs (i.e. negative rewards).
Having only positive costs is a required assumption for certain solvers to work, such as classical planners.
"""
def _check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its cost specification (must be positive).
!!! tip
This function calls #PositiveCost._is_positive() to determine if a value is positive (can be overridden for
advanced value types).
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return self._is_positive(value.cost)
def _is_positive(self, cost: D.T_value) -> bool:
"""Determine if a value is positive (can be overridden for advanced value types).
# Parameters
cost: The cost to evaluate.
# Returns
True if the cost is positive (False otherwise).
"""
return cost >= 0 | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/value.py | value.py |
from __future__ import annotations
from skdecide.core import D, Value, autocastable
__all__ = ["Rewards", "PositiveCosts"]
class Rewards:
"""A domain must inherit this class if it sends rewards (positive and/or negative)."""
@autocastable
def check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its reward specification.
!!! tip
This function returns always True by default because any kind of reward should be accepted at this level.
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return self._check_value(value)
def _check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its reward specification.
!!! tip
This function returns always True by default because any kind of reward should be accepted at this level.
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return True
class PositiveCosts(Rewards):
"""A domain must inherit this class if it sends only positive costs (i.e. negative rewards).
Having only positive costs is a required assumption for certain solvers to work, such as classical planners.
"""
def _check_value(self, value: Value[D.T_value]) -> bool:
"""Check that a value is compliant with its cost specification (must be positive).
!!! tip
This function calls #PositiveCost._is_positive() to determine if a value is positive (can be overridden for
advanced value types).
# Parameters
value: The value to check.
# Returns
True if the value is compliant (False otherwise).
"""
return self._is_positive(value.cost)
def _is_positive(self, cost: D.T_value) -> bool:
"""Determine if a value is positive (can be overridden for advanced value types).
# Parameters
cost: The cost to evaluate.
# Returns
True if the cost is positive (False otherwise).
"""
return cost >= 0 | 0.930836 | 0.498352 |
from __future__ import annotations
import functools
from skdecide.core import D, Distribution, SingleValueDistribution, autocastable
__all__ = ["Initializable", "UncertainInitialized", "DeterministicInitialized"]
class Initializable:
"""A domain must inherit this class if it can be initialized."""
@autocastable
def reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable.reset() provides some boilerplate code and internally calls #Initializable._reset()
(which returns an initial state). The boilerplate code automatically stores the initial state into the #_memory
attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
return self._reset()
def _reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable._reset() provides some boilerplate code and internally
calls #Initializable._state_reset() (which returns an initial state). The boilerplate code automatically stores
the initial state into the #_memory attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
initial_state = self._state_reset()
self._memory = self._init_memory(initial_state)
initial_observation = self._get_observation_distribution(initial_state).sample()
return initial_observation
def _state_reset(self) -> D.T_state:
"""Reset the state of the environment and return an initial state.
This is a helper function called by default from #Initializable._reset(). It focuses on the state level, as
opposed to the observation one for the latter.
# Returns
An initial state.
"""
raise NotImplementedError
class UncertainInitialized(Initializable):
"""A domain must inherit this class if its states are initialized according to a probability distribution known as
white-box."""
def _state_reset(self) -> D.T_state:
initial_state = self._get_initial_state_distribution().sample()
return initial_state
@autocastable
def get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized.get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution()
@functools.lru_cache()
def _get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized._get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution_()
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
"""Get the probability distribution of initial states.
This is a helper function called by default from #UncertainInitialized._get_initial_state_distribution(), the
difference being that the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The probability distribution of initial states.
"""
raise NotImplementedError
class DeterministicInitialized(UncertainInitialized):
"""A domain must inherit this class if it has a deterministic initial state known as white-box."""
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
return SingleValueDistribution(self._get_initial_state())
@autocastable
def get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized.get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state()
@functools.lru_cache()
def _get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized._get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state_()
def _get_initial_state_(self) -> D.T_state:
"""Get the initial state.
This is a helper function called by default from #DeterministicInitialized._get_initial_state(), the difference
being that the result is not cached here.
# Returns
The initial state.
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/initialization.py | initialization.py |
from __future__ import annotations
import functools
from skdecide.core import D, Distribution, SingleValueDistribution, autocastable
__all__ = ["Initializable", "UncertainInitialized", "DeterministicInitialized"]
class Initializable:
"""A domain must inherit this class if it can be initialized."""
@autocastable
def reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable.reset() provides some boilerplate code and internally calls #Initializable._reset()
(which returns an initial state). The boilerplate code automatically stores the initial state into the #_memory
attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
return self._reset()
def _reset(self) -> D.T_agent[D.T_observation]:
"""Reset the state of the environment and return an initial observation.
By default, #Initializable._reset() provides some boilerplate code and internally
calls #Initializable._state_reset() (which returns an initial state). The boilerplate code automatically stores
the initial state into the #_memory attribute and samples a corresponding observation.
# Returns
An initial observation.
"""
initial_state = self._state_reset()
self._memory = self._init_memory(initial_state)
initial_observation = self._get_observation_distribution(initial_state).sample()
return initial_observation
def _state_reset(self) -> D.T_state:
"""Reset the state of the environment and return an initial state.
This is a helper function called by default from #Initializable._reset(). It focuses on the state level, as
opposed to the observation one for the latter.
# Returns
An initial state.
"""
raise NotImplementedError
class UncertainInitialized(Initializable):
"""A domain must inherit this class if its states are initialized according to a probability distribution known as
white-box."""
def _state_reset(self) -> D.T_state:
initial_state = self._get_initial_state_distribution().sample()
return initial_state
@autocastable
def get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized.get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution()
@functools.lru_cache()
def _get_initial_state_distribution(self) -> Distribution[D.T_state]:
"""Get the (cached) probability distribution of initial states.
By default, #UncertainInitialized._get_initial_state_distribution() internally
calls #UncertainInitialized._get_initial_state_distribution_() the first time and automatically caches its value
to make future calls more efficient (since the initial state distribution is assumed to be constant).
# Returns
The probability distribution of initial states.
"""
return self._get_initial_state_distribution_()
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
"""Get the probability distribution of initial states.
This is a helper function called by default from #UncertainInitialized._get_initial_state_distribution(), the
difference being that the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The probability distribution of initial states.
"""
raise NotImplementedError
class DeterministicInitialized(UncertainInitialized):
"""A domain must inherit this class if it has a deterministic initial state known as white-box."""
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
return SingleValueDistribution(self._get_initial_state())
@autocastable
def get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized.get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state()
@functools.lru_cache()
def _get_initial_state(self) -> D.T_state:
"""Get the (cached) initial state.
By default, #DeterministicInitialized._get_initial_state() internally
calls #DeterministicInitialized._get_initial_state_() the first time and automatically caches its value to make
future calls more efficient (since the initial state is assumed to be constant).
# Returns
The initial state.
"""
return self._get_initial_state_()
def _get_initial_state_(self) -> D.T_state:
"""Get the initial state.
This is a helper function called by default from #DeterministicInitialized._get_initial_state(), the difference
being that the result is not cached here.
# Returns
The initial state.
"""
raise NotImplementedError | 0.933363 | 0.682818 |
from __future__ import annotations
import functools
from typing import Optional, Union
from skdecide.core import D, Memory
__all__ = ["History", "FiniteHistory", "Markovian", "Memoryless"]
class History:
"""A domain must inherit this class if its full state history must be stored to compute its dynamics (non-Markovian
domain)."""
_memory: D.T_memory[D.T_state]
T_memory = Memory
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
"""Initialize memory (possibly with a state) according to its specification and return it.
This function is automatically called by #Initializable._reset() to reinitialize the internal memory whenever
the domain is used as an environment.
# Parameters
state: An optional state to initialize the memory with (typically the initial state).
# Returns
The new initialized memory.
"""
content = [state] if state is not None else []
return Memory(content, maxlen=self._get_memory_maxlen())
def _get_memory_maxlen(self) -> Optional[int]:
"""Get the memory max length (or None if unbounded).
!!! tip
This function returns always None by default because the memory length is unbounded at this level.
# Returns
The memory max length (or None if unbounded).
"""
return None
class FiniteHistory(History):
"""A domain must inherit this class if the last N states must be stored to compute its dynamics (Markovian
domain of order N).
N is specified by the return value of the #FiniteHistory._get_memory_maxlen() function.
"""
T_memory = Memory
@functools.lru_cache()
def _get_memory_maxlen(self) -> int:
"""Get the (cached) memory max length.
By default, #FiniteHistory._get_memory_maxlen() internally calls #FiniteHistory._get_memory_maxlen_() the first
time and automatically caches its value to make future calls more efficient (since the memory max length is
assumed to be constant).
# Returns
The memory max length.
"""
return self._get_memory_maxlen_()
def _get_memory_maxlen_(self) -> int:
"""Get the memory max length.
This is a helper function called by default from #FiniteHistory._get_memory_maxlen(), the difference being that
the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The memory max length.
"""
raise NotImplementedError
class Markovian(FiniteHistory):
"""A domain must inherit this class if only its last state must be stored to compute its dynamics (pure Markovian
domain)."""
T_memory = Union
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
return state
def _get_memory_maxlen_(self) -> int:
return 1
class Memoryless(Markovian):
"""A domain must inherit this class if it does not require any previous state(s) to be stored to compute its
dynamics.
A dice roll simulator is an example of memoryless domain (next states are independent of previous ones).
!!! tip
Whenever an existing domain (environment, simulator...) needs to be wrapped instead of implemented fully in
scikit-decide (e.g. compiled ATARI games), Memoryless can be used because the domain memory (if any) would
be handled externally.
"""
T_memory = Union
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
return None
def _get_memory_maxlen_(self) -> int:
return 0 | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/memory.py | memory.py |
from __future__ import annotations
import functools
from typing import Optional, Union
from skdecide.core import D, Memory
__all__ = ["History", "FiniteHistory", "Markovian", "Memoryless"]
class History:
"""A domain must inherit this class if its full state history must be stored to compute its dynamics (non-Markovian
domain)."""
_memory: D.T_memory[D.T_state]
T_memory = Memory
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
"""Initialize memory (possibly with a state) according to its specification and return it.
This function is automatically called by #Initializable._reset() to reinitialize the internal memory whenever
the domain is used as an environment.
# Parameters
state: An optional state to initialize the memory with (typically the initial state).
# Returns
The new initialized memory.
"""
content = [state] if state is not None else []
return Memory(content, maxlen=self._get_memory_maxlen())
def _get_memory_maxlen(self) -> Optional[int]:
"""Get the memory max length (or None if unbounded).
!!! tip
This function returns always None by default because the memory length is unbounded at this level.
# Returns
The memory max length (or None if unbounded).
"""
return None
class FiniteHistory(History):
"""A domain must inherit this class if the last N states must be stored to compute its dynamics (Markovian
domain of order N).
N is specified by the return value of the #FiniteHistory._get_memory_maxlen() function.
"""
T_memory = Memory
@functools.lru_cache()
def _get_memory_maxlen(self) -> int:
"""Get the (cached) memory max length.
By default, #FiniteHistory._get_memory_maxlen() internally calls #FiniteHistory._get_memory_maxlen_() the first
time and automatically caches its value to make future calls more efficient (since the memory max length is
assumed to be constant).
# Returns
The memory max length.
"""
return self._get_memory_maxlen_()
def _get_memory_maxlen_(self) -> int:
"""Get the memory max length.
This is a helper function called by default from #FiniteHistory._get_memory_maxlen(), the difference being that
the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The memory max length.
"""
raise NotImplementedError
class Markovian(FiniteHistory):
"""A domain must inherit this class if only its last state must be stored to compute its dynamics (pure Markovian
domain)."""
T_memory = Union
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
return state
def _get_memory_maxlen_(self) -> int:
return 1
class Memoryless(Markovian):
"""A domain must inherit this class if it does not require any previous state(s) to be stored to compute its
dynamics.
A dice roll simulator is an example of memoryless domain (next states are independent of previous ones).
!!! tip
Whenever an existing domain (environment, simulator...) needs to be wrapped instead of implemented fully in
scikit-decide (e.g. compiled ATARI games), Memoryless can be used because the domain memory (if any) would
be handled externally.
"""
T_memory = Union
def _init_memory(self, state: Optional[D.T_state] = None) -> D.T_memory[D.T_state]:
return None
def _get_memory_maxlen_(self) -> int:
return 0 | 0.94688 | 0.39946 |
from __future__ import annotations
import functools
from typing import Optional
from skdecide.core import (
D,
DiscreteDistribution,
Distribution,
EnvironmentOutcome,
SingleValueDistribution,
TransitionOutcome,
Value,
autocastable,
)
__all__ = [
"Environment",
"Simulation",
"UncertainTransitions",
"EnumerableTransitions",
"DeterministicTransitions",
]
class Environment:
"""A domain must inherit this class if agents interact with it like a black-box environment.
Black-box environment examples include: the real world, compiled ATARI games, etc.
!!! tip
Environment domains are typically stateful: they must keep the current state or history in their memory to
compute next steps (automatically done by default in the #_memory attribute).
"""
@autocastable
def step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment.step() provides some boilerplate code and internally calls #Environment._step() (which
returns a transition outcome). The boilerplate code automatically stores next state into the #_memory attribute
and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment.step() to call the external environment and not
use the #Environment._step() helper function.
!!! warning
Before calling #Environment.step() the first time or when the end of an episode is
reached, #Initializable.reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
return self._step(action)
def _step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment._step() provides some boilerplate code and internally
calls #Environment._state_step() (which returns a transition outcome). The boilerplate code automatically stores
next state into the #_memory attribute and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment._step() to call the external environment and not
use the #Environment._state_step() helper function.
!!! warning
Before calling #Environment._step() the first time or when the end of an episode is
reached, #Initializable._reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
transition_outcome = self._state_step(action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
if self._get_memory_maxlen() == 1:
self._memory = next_state
elif self._get_memory_maxlen() > 1:
self._memory.append(next_state)
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one step of the transition's dynamics.
This is a helper function called by default from #Environment._step(). It focuses on the state level, as opposed
to the observation one for the latter.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The transition outcome of this step.
"""
raise NotImplementedError
class Simulation(Environment):
"""A domain must inherit this class if agents interact with it like a simulation.
Compared to pure environment domains, simulation ones have the additional ability to sample transitions from any
given state.
!!! tip
Simulation domains are typically stateless: they do not need to store the current state or history in memory
since it is usually passed as parameter of their functions. By default, they only become stateful whenever they
are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
return self._state_sample(self._memory, action)
@autocastable
def set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with successive #Environment.step()
calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain.set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain.step(my_action)
```
"""
return self._set_memory(memory)
def _set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with
successive #Environment._step() calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain._set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain._step(my_action)
```
"""
self._memory = memory
@autocastable
def sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation.sample() provides some boilerplate code and internally calls #Simulation._sample()
(which returns a transition outcome). The boilerplate code automatically samples an observation corresponding to
the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation.sample() to call the external simulator and not use
the #Simulation._sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
return self._sample(memory, action)
def _sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation._sample() provides some boilerplate code and internally
calls #Simulation._state_sample() (which returns a transition outcome). The boilerplate code automatically
samples an observation corresponding to the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation._sample() to call the external simulator and not use
the #Simulation._state_sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
transition_outcome = self._state_sample(memory, action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one sample of the transition's dynamics.
This is a helper function called by default from #Simulation._sample(). It focuses on the state level, as
opposed to the observation one for the latter.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The transition outcome of the sampled transition.
"""
raise NotImplementedError
class UncertainTransitions(Simulation):
"""A domain must inherit this class if its dynamics is uncertain and provided as a white-box model.
Compared to pure simulation domains, uncertain transition ones provide in addition the full probability distribution
of next states given a memory and action.
!!! tip
Uncertain transition domains are typically stateless: they do not need to store the current state or history in
memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
next_state = self._get_next_state_distribution(memory, action).sample()
value = self._get_transition_value(memory, action, next_state)
# Termination could be inferred using get_next_state_distribution based on next_state,
# but would introduce multiple constraints on class definitions
termination = self._is_terminal(next_state)
return TransitionOutcome(next_state, value, termination, None)
@autocastable
def get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> Distribution[D.T_state]:
"""Get the probability distribution of next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The probability distribution of next state.
"""
return self._get_next_state_distribution(memory, action)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> Distribution[D.T_state]:
"""Get the probability distribution of next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The probability distribution of next state.
"""
raise NotImplementedError
@autocastable
def get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
"""Get the value (reward or cost) of a transition.
The transition to consider is defined by the function parameters.
!!! tip
If this function never depends on the next_state parameter for its computation, it is recommended to
indicate it by overriding #UncertainTransitions._is_transition_value_dependent_on_next_state_() to return
False. This information can then be exploited by solvers to avoid computing next state to evaluate a
transition value (more efficient).
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
next_state: The next state in which the transition ends (if needed for the computation).
# Returns
The transition value (reward or cost).
"""
return self._get_transition_value(memory, action, next_state)
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
"""Get the value (reward or cost) of a transition.
The transition to consider is defined by the function parameters.
!!! tip
If this function never depends on the next_state parameter for its computation, it is recommended to
indicate it by overriding #UncertainTransitions._is_transition_value_dependent_on_next_state_() to return
False. This information can then be exploited by solvers to avoid computing next state to evaluate a
transition value (more efficient).
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
next_state: The next state in which the transition ends (if needed for the computation).
# Returns
The transition value (reward or cost).
"""
raise NotImplementedError
@autocastable
def is_transition_value_dependent_on_next_state(self) -> bool:
"""Indicate whether get_transition_value() requires the next_state parameter for its computation (cached).
By default, #UncertainTransitions.is_transition_value_dependent_on_next_state() internally
calls #UncertainTransitions._is_transition_value_dependent_on_next_state_() the first time and automatically
caches its value to make future calls more efficient (since the returned value is assumed to be constant).
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return self._is_transition_value_dependent_on_next_state()
@functools.lru_cache()
def _is_transition_value_dependent_on_next_state(self) -> bool:
"""Indicate whether _get_transition_value() requires the next_state parameter for its computation (cached).
By default, #UncertainTransitions._is_transition_value_dependent_on_next_state() internally
calls #UncertainTransitions._is_transition_value_dependent_on_next_state_() the first time and automatically
caches its value to make future calls more efficient (since the returned value is assumed to be constant).
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return self._is_transition_value_dependent_on_next_state_()
def _is_transition_value_dependent_on_next_state_(self) -> bool:
"""Indicate whether _get_transition_value() requires the next_state parameter for its computation.
This is a helper function called by default
from #UncertainTransitions._is_transition_value_dependent_on_next_state(), the difference being that the result
is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return True
@autocastable
def is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
"""Indicate whether a state is terminal.
A terminal state is a state with no outgoing transition (except to itself with value 0).
# Parameters
state: The state to consider.
# Returns
True if the state is terminal (False otherwise).
"""
return self._is_terminal(state)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
"""Indicate whether a state is terminal.
A terminal state is a state with no outgoing transition (except to itself with value 0).
# Parameters
state: The state to consider.
# Returns
True if the state is terminal (False otherwise).
"""
raise NotImplementedError
class EnumerableTransitions(UncertainTransitions):
"""A domain must inherit this class if its dynamics is uncertain (with enumerable transitions) and provided as a
white-box model.
Compared to pure uncertain transition domains, enumerable transition ones guarantee that all probability
distributions of next state are discrete.
!!! tip
Enumerable transition domains are typically stateless: they do not need to store the current state or history in
memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
@autocastable
def get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
"""Get the discrete probability distribution of next state given a memory and action.
!!! tip
In the Markovian case (memory only holds last state $s$), given an action $a$, this function can
be mathematically represented by $P(S'|s, a)$, where $S'$ is the next state random variable.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The discrete probability distribution of next state.
"""
return self._get_next_state_distribution(memory, action)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
"""Get the discrete probability distribution of next state given a memory and action.
!!! tip
In the Markovian case (memory only holds last state $s$), given an action $a$, this function can
be mathematically represented by $P(S'|s, a)$, where $S'$ is the next state random variable.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The discrete probability distribution of next state.
"""
raise NotImplementedError
class DeterministicTransitions(EnumerableTransitions):
"""A domain must inherit this class if its dynamics is deterministic and provided as a white-box model.
Compared to pure enumerable transition domains, deterministic transition ones guarantee that there is only one next
state for a given source memory (state or history) and action.
!!! tip
Deterministic transition domains are typically stateless: they do not need to store the current state or history
in memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> SingleValueDistribution[D.T_state]:
return SingleValueDistribution(self._get_next_state(memory, action))
@autocastable
def get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
"""Get the next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The deterministic next state.
"""
return self._get_next_state(memory, action)
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
"""Get the next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The deterministic next state.
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/dynamics.py | dynamics.py |
from __future__ import annotations
import functools
from typing import Optional
from skdecide.core import (
D,
DiscreteDistribution,
Distribution,
EnvironmentOutcome,
SingleValueDistribution,
TransitionOutcome,
Value,
autocastable,
)
__all__ = [
"Environment",
"Simulation",
"UncertainTransitions",
"EnumerableTransitions",
"DeterministicTransitions",
]
class Environment:
"""A domain must inherit this class if agents interact with it like a black-box environment.
Black-box environment examples include: the real world, compiled ATARI games, etc.
!!! tip
Environment domains are typically stateful: they must keep the current state or history in their memory to
compute next steps (automatically done by default in the #_memory attribute).
"""
@autocastable
def step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment.step() provides some boilerplate code and internally calls #Environment._step() (which
returns a transition outcome). The boilerplate code automatically stores next state into the #_memory attribute
and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment.step() to call the external environment and not
use the #Environment._step() helper function.
!!! warning
Before calling #Environment.step() the first time or when the end of an episode is
reached, #Initializable.reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
return self._step(action)
def _step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Run one step of the environment's dynamics.
By default, #Environment._step() provides some boilerplate code and internally
calls #Environment._state_step() (which returns a transition outcome). The boilerplate code automatically stores
next state into the #_memory attribute and samples a corresponding observation.
!!! tip
Whenever an existing environment needs to be wrapped instead of implemented fully in scikit-decide (e.g. compiled
ATARI games), it is recommended to overwrite #Environment._step() to call the external environment and not
use the #Environment._state_step() helper function.
!!! warning
Before calling #Environment._step() the first time or when the end of an episode is
reached, #Initializable._reset() must be called to reset the environment's state.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The environment outcome of this step.
"""
transition_outcome = self._state_step(action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
if self._get_memory_maxlen() == 1:
self._memory = next_state
elif self._get_memory_maxlen() > 1:
self._memory.append(next_state)
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one step of the transition's dynamics.
This is a helper function called by default from #Environment._step(). It focuses on the state level, as opposed
to the observation one for the latter.
# Parameters
action: The action taken in the current memory (state or history) triggering the transition.
# Returns
The transition outcome of this step.
"""
raise NotImplementedError
class Simulation(Environment):
"""A domain must inherit this class if agents interact with it like a simulation.
Compared to pure environment domains, simulation ones have the additional ability to sample transitions from any
given state.
!!! tip
Simulation domains are typically stateless: they do not need to store the current state or history in memory
since it is usually passed as parameter of their functions. By default, they only become stateful whenever they
are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
return self._state_sample(self._memory, action)
@autocastable
def set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with successive #Environment.step()
calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain.set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain.step(my_action)
```
"""
return self._set_memory(memory)
def _set_memory(self, memory: D.T_memory[D.T_state]) -> None:
"""Set internal memory attribute #_memory to given one.
This can be useful to set a specific "starting point" before doing a rollout with
successive #Environment._step() calls.
# Parameters
memory: The memory to set internally.
# Example
```python
# Set simulation_domain memory to my_state (assuming Markovian domain)
simulation_domain._set_memory(my_state)
# Start a 100-steps rollout from here (applying my_action at every step)
for _ in range(100):
simulation_domain._step(my_action)
```
"""
self._memory = memory
@autocastable
def sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation.sample() provides some boilerplate code and internally calls #Simulation._sample()
(which returns a transition outcome). The boilerplate code automatically samples an observation corresponding to
the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation.sample() to call the external simulator and not use
the #Simulation._sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
return self._sample(memory, action)
def _sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> EnvironmentOutcome[
D.T_agent[D.T_observation],
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Sample one transition of the simulator's dynamics.
By default, #Simulation._sample() provides some boilerplate code and internally
calls #Simulation._state_sample() (which returns a transition outcome). The boilerplate code automatically
samples an observation corresponding to the sampled next state.
!!! tip
Whenever an existing simulator needs to be wrapped instead of implemented fully in scikit-decide (e.g. a
simulator), it is recommended to overwrite #Simulation._sample() to call the external simulator and not use
the #Simulation._state_sample() helper function.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The environment outcome of the sampled transition.
"""
transition_outcome = self._state_sample(memory, action)
next_state = transition_outcome.state
observation = self._get_observation_distribution(next_state, action).sample()
return EnvironmentOutcome(
observation,
transition_outcome.value,
transition_outcome.termination,
transition_outcome.info,
)
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
"""Compute one sample of the transition's dynamics.
This is a helper function called by default from #Simulation._sample(). It focuses on the state level, as
opposed to the observation one for the latter.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The transition outcome of the sampled transition.
"""
raise NotImplementedError
class UncertainTransitions(Simulation):
"""A domain must inherit this class if its dynamics is uncertain and provided as a white-box model.
Compared to pure simulation domains, uncertain transition ones provide in addition the full probability distribution
of next states given a memory and action.
!!! tip
Uncertain transition domains are typically stateless: they do not need to store the current state or history in
memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _state_sample(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
next_state = self._get_next_state_distribution(memory, action).sample()
value = self._get_transition_value(memory, action, next_state)
# Termination could be inferred using get_next_state_distribution based on next_state,
# but would introduce multiple constraints on class definitions
termination = self._is_terminal(next_state)
return TransitionOutcome(next_state, value, termination, None)
@autocastable
def get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> Distribution[D.T_state]:
"""Get the probability distribution of next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The probability distribution of next state.
"""
return self._get_next_state_distribution(memory, action)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> Distribution[D.T_state]:
"""Get the probability distribution of next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The probability distribution of next state.
"""
raise NotImplementedError
@autocastable
def get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
"""Get the value (reward or cost) of a transition.
The transition to consider is defined by the function parameters.
!!! tip
If this function never depends on the next_state parameter for its computation, it is recommended to
indicate it by overriding #UncertainTransitions._is_transition_value_dependent_on_next_state_() to return
False. This information can then be exploited by solvers to avoid computing next state to evaluate a
transition value (more efficient).
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
next_state: The next state in which the transition ends (if needed for the computation).
# Returns
The transition value (reward or cost).
"""
return self._get_transition_value(memory, action, next_state)
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
"""Get the value (reward or cost) of a transition.
The transition to consider is defined by the function parameters.
!!! tip
If this function never depends on the next_state parameter for its computation, it is recommended to
indicate it by overriding #UncertainTransitions._is_transition_value_dependent_on_next_state_() to return
False. This information can then be exploited by solvers to avoid computing next state to evaluate a
transition value (more efficient).
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
next_state: The next state in which the transition ends (if needed for the computation).
# Returns
The transition value (reward or cost).
"""
raise NotImplementedError
@autocastable
def is_transition_value_dependent_on_next_state(self) -> bool:
"""Indicate whether get_transition_value() requires the next_state parameter for its computation (cached).
By default, #UncertainTransitions.is_transition_value_dependent_on_next_state() internally
calls #UncertainTransitions._is_transition_value_dependent_on_next_state_() the first time and automatically
caches its value to make future calls more efficient (since the returned value is assumed to be constant).
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return self._is_transition_value_dependent_on_next_state()
@functools.lru_cache()
def _is_transition_value_dependent_on_next_state(self) -> bool:
"""Indicate whether _get_transition_value() requires the next_state parameter for its computation (cached).
By default, #UncertainTransitions._is_transition_value_dependent_on_next_state() internally
calls #UncertainTransitions._is_transition_value_dependent_on_next_state_() the first time and automatically
caches its value to make future calls more efficient (since the returned value is assumed to be constant).
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return self._is_transition_value_dependent_on_next_state_()
def _is_transition_value_dependent_on_next_state_(self) -> bool:
"""Indicate whether _get_transition_value() requires the next_state parameter for its computation.
This is a helper function called by default
from #UncertainTransitions._is_transition_value_dependent_on_next_state(), the difference being that the result
is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
True if the transition value computation depends on next_state (False otherwise).
"""
return True
@autocastable
def is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
"""Indicate whether a state is terminal.
A terminal state is a state with no outgoing transition (except to itself with value 0).
# Parameters
state: The state to consider.
# Returns
True if the state is terminal (False otherwise).
"""
return self._is_terminal(state)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
"""Indicate whether a state is terminal.
A terminal state is a state with no outgoing transition (except to itself with value 0).
# Parameters
state: The state to consider.
# Returns
True if the state is terminal (False otherwise).
"""
raise NotImplementedError
class EnumerableTransitions(UncertainTransitions):
"""A domain must inherit this class if its dynamics is uncertain (with enumerable transitions) and provided as a
white-box model.
Compared to pure uncertain transition domains, enumerable transition ones guarantee that all probability
distributions of next state are discrete.
!!! tip
Enumerable transition domains are typically stateless: they do not need to store the current state or history in
memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
@autocastable
def get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
"""Get the discrete probability distribution of next state given a memory and action.
!!! tip
In the Markovian case (memory only holds last state $s$), given an action $a$, this function can
be mathematically represented by $P(S'|s, a)$, where $S'$ is the next state random variable.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The discrete probability distribution of next state.
"""
return self._get_next_state_distribution(memory, action)
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> DiscreteDistribution[D.T_state]:
"""Get the discrete probability distribution of next state given a memory and action.
!!! tip
In the Markovian case (memory only holds last state $s$), given an action $a$, this function can
be mathematically represented by $P(S'|s, a)$, where $S'$ is the next state random variable.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The discrete probability distribution of next state.
"""
raise NotImplementedError
class DeterministicTransitions(EnumerableTransitions):
"""A domain must inherit this class if its dynamics is deterministic and provided as a white-box model.
Compared to pure enumerable transition domains, deterministic transition ones guarantee that there is only one next
state for a given source memory (state or history) and action.
!!! tip
Deterministic transition domains are typically stateless: they do not need to store the current state or history
in memory since it is usually passed as parameter of their functions. By default, they only become stateful
whenever they are used as environments (e.g. via #Initializable.reset() and #Environment.step() functions).
"""
def _get_next_state_distribution(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> SingleValueDistribution[D.T_state]:
return SingleValueDistribution(self._get_next_state(memory, action))
@autocastable
def get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
"""Get the next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The deterministic next state.
"""
return self._get_next_state(memory, action)
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
"""Get the next state given a memory and action.
# Parameters
memory: The source memory (state or history) of the transition.
action: The action taken in the given memory (state or history) triggering the transition.
# Returns
The deterministic next state.
"""
raise NotImplementedError | 0.947442 | 0.725235 |
from __future__ import annotations
import functools
from typing import Union
from skdecide.core import D, Space, autocastable
__all__ = ["Goals"]
class Goals:
"""A domain must inherit this class if it has formalized goals."""
@autocastable
def get_goals(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) domain goals space (finite or infinite set).
By default, #Goals.get_goals() internally calls #Goals._get_goals_() the first time and automatically caches its
value to make future calls more efficient (since the goals space is assumed to be constant).
!!! warning
Goal states are assumed to be fully observable (i.e. observation = state) so that there is never uncertainty
about whether the goal has been reached or not. This assumption guarantees that any policy that does not
reach the goal with certainty incurs in infinite expected cost. - *Geffner, 2013: A Concise Introduction to
Models and Methods for Automated Planning*
# Returns
The goals space.
"""
return self._get_goals()
@functools.lru_cache()
def _get_goals(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) domain goals space (finite or infinite set).
By default, #Goals._get_goals() internally calls #Goals._get_goals_() the first time and automatically caches
its value to make future calls more efficient (since the goals space is assumed to be constant).
!!! warning
Goal states are assumed to be fully observable (i.e. observation = state) so that there is never uncertainty
about whether the goal has been reached or not. This assumption guarantees that any policy that does not
reach the goal with certainty incurs in infinite expected cost. - *Geffner, 2013: A Concise Introduction to
Models and Methods for Automated Planning*
# Returns
The goals space.
"""
return self._get_goals_()
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the domain goals space (finite or infinite set).
This is a helper function called by default from #Goals._get_goals(), the difference being that the result is
not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The goals space.
"""
raise NotImplementedError
@autocastable
def is_goal(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_predicate]:
"""Indicate whether an observation belongs to the goals.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
goals space provided by #Goals.get_goals(), but it can be overridden for faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation is a goal (False otherwise).
"""
return self._is_goal(observation)
def _is_goal(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_predicate]:
"""Indicate whether an observation belongs to the goals.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
goals space provided by #Goals._get_goals(), but it can be overridden for faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation is a goal (False otherwise).
"""
goals = self._get_goals()
if self.T_agent == Union:
return goals.contains(observation)
else: # StrDict
return {k: goals[k].contains(v) for k, v in observation.items()} | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/goals.py | goals.py |
from __future__ import annotations
import functools
from typing import Union
from skdecide.core import D, Space, autocastable
__all__ = ["Goals"]
class Goals:
"""A domain must inherit this class if it has formalized goals."""
@autocastable
def get_goals(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) domain goals space (finite or infinite set).
By default, #Goals.get_goals() internally calls #Goals._get_goals_() the first time and automatically caches its
value to make future calls more efficient (since the goals space is assumed to be constant).
!!! warning
Goal states are assumed to be fully observable (i.e. observation = state) so that there is never uncertainty
about whether the goal has been reached or not. This assumption guarantees that any policy that does not
reach the goal with certainty incurs in infinite expected cost. - *Geffner, 2013: A Concise Introduction to
Models and Methods for Automated Planning*
# Returns
The goals space.
"""
return self._get_goals()
@functools.lru_cache()
def _get_goals(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) domain goals space (finite or infinite set).
By default, #Goals._get_goals() internally calls #Goals._get_goals_() the first time and automatically caches
its value to make future calls more efficient (since the goals space is assumed to be constant).
!!! warning
Goal states are assumed to be fully observable (i.e. observation = state) so that there is never uncertainty
about whether the goal has been reached or not. This assumption guarantees that any policy that does not
reach the goal with certainty incurs in infinite expected cost. - *Geffner, 2013: A Concise Introduction to
Models and Methods for Automated Planning*
# Returns
The goals space.
"""
return self._get_goals_()
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the domain goals space (finite or infinite set).
This is a helper function called by default from #Goals._get_goals(), the difference being that the result is
not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The goals space.
"""
raise NotImplementedError
@autocastable
def is_goal(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_predicate]:
"""Indicate whether an observation belongs to the goals.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
goals space provided by #Goals.get_goals(), but it can be overridden for faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation is a goal (False otherwise).
"""
return self._is_goal(observation)
def _is_goal(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_predicate]:
"""Indicate whether an observation belongs to the goals.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
goals space provided by #Goals._get_goals(), but it can be overridden for faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation is a goal (False otherwise).
"""
goals = self._get_goals()
if self.T_agent == Union:
return goals.contains(observation)
else: # StrDict
return {k: goals[k].contains(v) for k, v in observation.items()} | 0.951605 | 0.457016 |
from __future__ import annotations
from typing import Any, Optional
from skdecide.core import D, autocastable
__all__ = ["Renderable"]
class Renderable:
"""A domain must inherit this class if it can be rendered with any kind of visualization."""
@autocastable
def render(
self, memory: Optional[D.T_memory[D.T_state]] = None, **kwargs: Any
) -> Any:
"""Compute a visual render of the given memory (state or history), or the internal one if omitted.
By default, #Renderable.render() provides some boilerplate code and internally calls #Renderable._render(). The
boilerplate code automatically passes the #_memory attribute instead of the memory parameter whenever the latter
is None.
# Parameters
memory: The memory to consider (if None, the internal memory attribute #_memory is used instead).
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
return self._render(memory, **kwargs)
def _render(
self, memory: Optional[D.T_memory[D.T_state]] = None, **kwargs: Any
) -> Any:
"""Compute a visual render of the given memory (state or history), or the internal one if omitted.
By default, #Renderable._render() provides some boilerplate code and internally
calls #Renderable._render_from(). The boilerplate code automatically passes the #_memory attribute instead of
the memory parameter whenever the latter is None.
# Parameters
memory: The memory to consider (if None, the internal memory attribute #_memory is used instead).
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
if memory is None:
memory = self._memory
return self._render_from(memory, **kwargs)
def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:
"""Compute a visual render of the given memory (state or history).
This is a helper function called by default from #Renderable._render(), the difference being that the
memory parameter is mandatory here.
# Parameters
memory: The memory to consider.
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/renderability.py | renderability.py |
from __future__ import annotations
from typing import Any, Optional
from skdecide.core import D, autocastable
__all__ = ["Renderable"]
class Renderable:
"""A domain must inherit this class if it can be rendered with any kind of visualization."""
@autocastable
def render(
self, memory: Optional[D.T_memory[D.T_state]] = None, **kwargs: Any
) -> Any:
"""Compute a visual render of the given memory (state or history), or the internal one if omitted.
By default, #Renderable.render() provides some boilerplate code and internally calls #Renderable._render(). The
boilerplate code automatically passes the #_memory attribute instead of the memory parameter whenever the latter
is None.
# Parameters
memory: The memory to consider (if None, the internal memory attribute #_memory is used instead).
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
return self._render(memory, **kwargs)
def _render(
self, memory: Optional[D.T_memory[D.T_state]] = None, **kwargs: Any
) -> Any:
"""Compute a visual render of the given memory (state or history), or the internal one if omitted.
By default, #Renderable._render() provides some boilerplate code and internally
calls #Renderable._render_from(). The boilerplate code automatically passes the #_memory attribute instead of
the memory parameter whenever the latter is None.
# Parameters
memory: The memory to consider (if None, the internal memory attribute #_memory is used instead).
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
if memory is None:
memory = self._memory
return self._render_from(memory, **kwargs)
def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:
"""Compute a visual render of the given memory (state or history).
This is a helper function called by default from #Renderable._render(), the difference being that the
memory parameter is mandatory here.
# Parameters
memory: The memory to consider.
# Returns
A render (e.g. image) or nothing (if the function handles the display directly).
"""
raise NotImplementedError | 0.945362 | 0.528716 |
from __future__ import annotations
import functools
from typing import List
from skdecide.core import Constraint, D, autocastable
__all__ = ["Constrained"]
class Constrained:
"""A domain must inherit this class if it has constraints."""
@autocastable
def get_constraints(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the (cached) domain constraints.
By default, #Constrained.get_constraints() internally calls #Constrained._get_constraints_() the first time and
automatically caches its value to make future calls more efficient (since the list of constraints is assumed to
be constant).
# Returns
The list of constraints.
"""
return self._get_constraints()
@functools.lru_cache()
def _get_constraints(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the (cached) domain constraints.
By default, #Constrained._get_constraints() internally calls #Constrained._get_constraints_() the first time and
automatically caches its value to make future calls more efficient (since the list of constraints is assumed to
be constant).
# Returns
The list of constraints.
"""
return self._get_constraints_()
def _get_constraints_(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the domain constraints.
This is a helper function called by default from #Constrained.get_constraints(), the difference being that the
result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The list of constraints.
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/constraints.py | constraints.py |
from __future__ import annotations
import functools
from typing import List
from skdecide.core import Constraint, D, autocastable
__all__ = ["Constrained"]
class Constrained:
"""A domain must inherit this class if it has constraints."""
@autocastable
def get_constraints(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the (cached) domain constraints.
By default, #Constrained.get_constraints() internally calls #Constrained._get_constraints_() the first time and
automatically caches its value to make future calls more efficient (since the list of constraints is assumed to
be constant).
# Returns
The list of constraints.
"""
return self._get_constraints()
@functools.lru_cache()
def _get_constraints(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the (cached) domain constraints.
By default, #Constrained._get_constraints() internally calls #Constrained._get_constraints_() the first time and
automatically caches its value to make future calls more efficient (since the list of constraints is assumed to
be constant).
# Returns
The list of constraints.
"""
return self._get_constraints_()
def _get_constraints_(
self,
) -> List[
Constraint[
D.T_memory[D.T_state], D.T_agent[D.T_concurrency[D.T_event]], D.T_state
]
]:
"""Get the domain constraints.
This is a helper function called by default from #Constrained.get_constraints(), the difference being that the
result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The list of constraints.
"""
raise NotImplementedError | 0.911695 | 0.377369 |
from __future__ import annotations
import functools
from typing import Optional, Union
from skdecide.core import D, Distribution, SingleValueDistribution, Space, autocastable
__all__ = ["PartiallyObservable", "TransformedObservable", "FullyObservable"]
class PartiallyObservable:
"""A domain must inherit this class if it is partially observable.
"Partially observable" means that the observation provided to the agent is computed from (but generally not equal
to) the internal state of the domain. Additionally, according to literature, a partially observable domain must
provide the probability distribution of the observation given a state and action.
"""
@autocastable
def get_observation_space(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) observation space (finite or infinite set).
By default, #PartiallyObservable.get_observation_space() internally
calls #PartiallyObservable._get_observation_space_() the first time and automatically caches its value to make
future calls more efficient (since the observation space is assumed to be constant).
# Returns
The observation space.
"""
return self._get_observation_space()
@functools.lru_cache()
def _get_observation_space(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) observation space (finite or infinite set).
By default, #PartiallyObservable._get_observation_space() internally
calls #PartiallyObservable._get_observation_space_() the first time and automatically caches its value to make
future calls more efficient (since the observation space is assumed to be constant).
# Returns
The observation space.
"""
return self._get_observation_space_()
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the observation space (finite or infinite set).
This is a helper function called by default from #PartiallyObservable._get_observation_space(), the difference
being that the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The observation space.
"""
raise NotImplementedError
@autocastable
def is_observation(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check that an observation indeed belongs to the domain observation space.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
observation space provided by #PartiallyObservable.get_observation_space(), but it can be overridden for
faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation belongs to the domain observation space (False otherwise).
"""
return self._is_observation(observation)
def _is_observation(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check that an observation indeed belongs to the domain observation space.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
observation space provided by #PartiallyObservable._get_observation_space(), but it can be overridden for
faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation belongs to the domain observation space (False otherwise).
"""
observation_space = self._get_observation_space()
if self.T_agent == Union:
return observation_space.contains(observation)
else: # StrDict
return all(observation_space[k].contains(v) for k, v in observation.items())
@autocastable
def get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
"""Get the probability distribution of the observation given a state and action.
In mathematical terms (discrete case), given an action $a$, this function represents: $P(O|s, a)$,
where $O$ is the random variable of the observation.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
return self._get_observation_distribution(state, action)
def _get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
"""Get the probability distribution of the observation given a state and action.
In mathematical terms (discrete case), given an action $a$, this function represents: $P(O|s, a)$,
where $O$ is the random variable of the observation.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
raise NotImplementedError
class TransformedObservable(PartiallyObservable):
"""A domain must inherit this class if it is transformed observable.
"Transformed observable" means that the observation provided to the agent is deterministically computed from (but
generally not equal to) the internal state of the domain.
"""
def _get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
return SingleValueDistribution(self._get_observation(state, action))
@autocastable
def get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
"""Get the deterministic observation given a state and action.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
return self._get_observation(state, action)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
"""Get the deterministic observation given a state and action.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
raise NotImplementedError
class FullyObservable(TransformedObservable):
"""A domain must inherit this class if it is fully observable.
"Fully observable" means that the observation provided to the agent is equal to the internal state of the domain.
!!! warning
In the case of fully observable domains, make sure that the observation type D.T_observation is equal to the
state type D.T_state.
"""
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
return state | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/observability.py | observability.py |
from __future__ import annotations
import functools
from typing import Optional, Union
from skdecide.core import D, Distribution, SingleValueDistribution, Space, autocastable
__all__ = ["PartiallyObservable", "TransformedObservable", "FullyObservable"]
class PartiallyObservable:
"""A domain must inherit this class if it is partially observable.
"Partially observable" means that the observation provided to the agent is computed from (but generally not equal
to) the internal state of the domain. Additionally, according to literature, a partially observable domain must
provide the probability distribution of the observation given a state and action.
"""
@autocastable
def get_observation_space(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) observation space (finite or infinite set).
By default, #PartiallyObservable.get_observation_space() internally
calls #PartiallyObservable._get_observation_space_() the first time and automatically caches its value to make
future calls more efficient (since the observation space is assumed to be constant).
# Returns
The observation space.
"""
return self._get_observation_space()
@functools.lru_cache()
def _get_observation_space(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the (cached) observation space (finite or infinite set).
By default, #PartiallyObservable._get_observation_space() internally
calls #PartiallyObservable._get_observation_space_() the first time and automatically caches its value to make
future calls more efficient (since the observation space is assumed to be constant).
# Returns
The observation space.
"""
return self._get_observation_space_()
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
"""Get the observation space (finite or infinite set).
This is a helper function called by default from #PartiallyObservable._get_observation_space(), the difference
being that the result is not cached here.
!!! tip
The underscore at the end of this function's name is a convention to remind that its result should be
constant.
# Returns
The observation space.
"""
raise NotImplementedError
@autocastable
def is_observation(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check that an observation indeed belongs to the domain observation space.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
observation space provided by #PartiallyObservable.get_observation_space(), but it can be overridden for
faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation belongs to the domain observation space (False otherwise).
"""
return self._is_observation(observation)
def _is_observation(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check that an observation indeed belongs to the domain observation space.
!!! tip
By default, this function is implemented using the #skdecide.core.Space.contains() function on the domain
observation space provided by #PartiallyObservable._get_observation_space(), but it can be overridden for
faster implementations.
# Parameters
observation: The observation to consider.
# Returns
True if the observation belongs to the domain observation space (False otherwise).
"""
observation_space = self._get_observation_space()
if self.T_agent == Union:
return observation_space.contains(observation)
else: # StrDict
return all(observation_space[k].contains(v) for k, v in observation.items())
@autocastable
def get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
"""Get the probability distribution of the observation given a state and action.
In mathematical terms (discrete case), given an action $a$, this function represents: $P(O|s, a)$,
where $O$ is the random variable of the observation.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
return self._get_observation_distribution(state, action)
def _get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
"""Get the probability distribution of the observation given a state and action.
In mathematical terms (discrete case), given an action $a$, this function represents: $P(O|s, a)$,
where $O$ is the random variable of the observation.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
raise NotImplementedError
class TransformedObservable(PartiallyObservable):
"""A domain must inherit this class if it is transformed observable.
"Transformed observable" means that the observation provided to the agent is deterministically computed from (but
generally not equal to) the internal state of the domain.
"""
def _get_observation_distribution(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> Distribution[D.T_agent[D.T_observation]]:
return SingleValueDistribution(self._get_observation(state, action))
@autocastable
def get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
"""Get the deterministic observation given a state and action.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
return self._get_observation(state, action)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
"""Get the deterministic observation given a state and action.
# Parameters
state: The state to be observed.
action: The last applied action (or None if the state is an initial state).
# Returns
The probability distribution of the observation.
"""
raise NotImplementedError
class FullyObservable(TransformedObservable):
"""A domain must inherit this class if it is fully observable.
"Fully observable" means that the observation provided to the agent is equal to the internal state of the domain.
!!! warning
In the case of fully observable domains, make sure that the observation type D.T_observation is equal to the
state type D.T_state.
"""
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
return state | 0.965576 | 0.744122 |
from __future__ import annotations
from enum import Enum
from typing import Dict, List, Optional, Union
__all__ = [
"UncertainResourceAvailabilityChanges",
"DeterministicResourceAvailabilityChanges",
"WithoutResourceAvailabilityChange",
]
class UncertainResourceAvailabilityChanges:
"""A domain must inherit this class if the availability of its resource vary in an uncertain way over time."""
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
raise NotImplementedError
def sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self._sample_quantity_resource(resource=resource, time=time, **kwargs)
def check_unique_resource_names(
self,
) -> bool: # TODO: How to enforce a call to this function when initialising a domain ?
"""Return True if there are no duplicates in resource names across both resource types
and resource units name lists."""
list1 = self.get_resource_types_names() + self.get_resource_units_names()
list2 = list(set(list1))
check_1 = len(list1) == len(list2) # no duplicated names
check_2 = len(list2) > 0 # at least one resource
return check_1 and check_2
class DeterministicResourceAvailabilityChanges(UncertainResourceAvailabilityChanges):
"""A domain must inherit this class if the availability of its resource vary in a deterministic way over time."""
def _get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
raise NotImplementedError
def get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
return self._get_quantity_resource(resource=resource, time=time, **kwargs)
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self.get_quantity_resource(resource, time, **kwargs)
class WithoutResourceAvailabilityChange(DeterministicResourceAvailabilityChanges):
"""A domain must inherit this class if the availability of its resource does not vary over time."""
def _get_original_quantity_resource(self, resource: str, **kwargs) -> int:
"""Return the resource availability (int) for the given resource (either resource type or resource unit)."""
raise NotImplementedError
def get_original_quantity_resource(self, resource: str, **kwargs) -> int:
"""Return the resource availability (int) for the given resource (either resource type or resource unit)."""
return self._get_original_quantity_resource(resource=resource, **kwargs)
def _get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
return self.get_original_quantity_resource(resource)
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self.get_original_quantity_resource(resource) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/resource_availability.py | resource_availability.py |
from __future__ import annotations
from enum import Enum
from typing import Dict, List, Optional, Union
__all__ = [
"UncertainResourceAvailabilityChanges",
"DeterministicResourceAvailabilityChanges",
"WithoutResourceAvailabilityChange",
]
class UncertainResourceAvailabilityChanges:
"""A domain must inherit this class if the availability of its resource vary in an uncertain way over time."""
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
raise NotImplementedError
def sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self._sample_quantity_resource(resource=resource, time=time, **kwargs)
def check_unique_resource_names(
self,
) -> bool: # TODO: How to enforce a call to this function when initialising a domain ?
"""Return True if there are no duplicates in resource names across both resource types
and resource units name lists."""
list1 = self.get_resource_types_names() + self.get_resource_units_names()
list2 = list(set(list1))
check_1 = len(list1) == len(list2) # no duplicated names
check_2 = len(list2) > 0 # at least one resource
return check_1 and check_2
class DeterministicResourceAvailabilityChanges(UncertainResourceAvailabilityChanges):
"""A domain must inherit this class if the availability of its resource vary in a deterministic way over time."""
def _get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
raise NotImplementedError
def get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
return self._get_quantity_resource(resource=resource, time=time, **kwargs)
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self.get_quantity_resource(resource, time, **kwargs)
class WithoutResourceAvailabilityChange(DeterministicResourceAvailabilityChanges):
"""A domain must inherit this class if the availability of its resource does not vary over time."""
def _get_original_quantity_resource(self, resource: str, **kwargs) -> int:
"""Return the resource availability (int) for the given resource (either resource type or resource unit)."""
raise NotImplementedError
def get_original_quantity_resource(self, resource: str, **kwargs) -> int:
"""Return the resource availability (int) for the given resource (either resource type or resource unit)."""
return self._get_original_quantity_resource(resource=resource, **kwargs)
def _get_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Return the resource availability (int) for the given resource
(either resource type or resource unit) at the given time."""
return self.get_original_quantity_resource(resource)
def _sample_quantity_resource(self, resource: str, time: int, **kwargs) -> int:
"""Sample an amount of resource availability (int) for the given resource
(either resource type or resource unit) at the given time. This number should be the sum of the number of
resource available at time t and the number of resource of this type consumed so far)."""
return self.get_original_quantity_resource(resource) | 0.89282 | 0.245548 |
from __future__ import annotations
from typing import Dict
__all__ = [
"TimeWindow",
"ClassicTimeWindow",
"StartFromOnlyTimeWindow",
"StartBeforeOnlyTimeWindow",
"EndFromOnlyTimeWindow",
"EndBeforeOnlyTimeWindow",
"StartTimeWindow",
"EndTimeWindow",
"EmptyTimeWindow",
"WithTimeWindow",
"WithoutTimeWindow",
]
class TimeWindow:
"""Defines a time window with earliest start, latest start, earliest end and latest end only."""
def __init__(
self,
earliest_start: int,
latest_start: int,
earliest_end: int,
latest_end: int,
max_horizon: int,
) -> None:
self.earliest_start = earliest_start
self.latest_start = latest_start
self.earliest_end = earliest_end
self.latest_end = latest_end
class ClassicTimeWindow(TimeWindow):
"""Defines a time window with earliest start and latest end only."""
def __init__(self, earliest_start: int, latest_end: int, max_horizon: int) -> None:
self.earliest_start = earliest_start
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = latest_end
class StartFromOnlyTimeWindow(TimeWindow):
"""Defines a time window with an earliest start only."""
def __init__(self, earliest_start: int, max_horizon: int) -> None:
self.earliest_start = earliest_start
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = max_horizon
class StartBeforeOnlyTimeWindow(TimeWindow):
"""Defines a time window with an latest start only."""
def __init__(self, latest_start: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = latest_start
self.earliest_end = 0
self.latest_end = max_horizon
class EndFromOnlyTimeWindow(TimeWindow):
"""Defines a time window with an earliest end only."""
def __init__(self, earliest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = earliest_end
self.latest_end = max_horizon
class EndBeforeOnlyTimeWindow(TimeWindow):
"""Defines a time window with a latest end only."""
def __init__(self, latest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = latest_end
class StartTimeWindow(TimeWindow):
"""Defines a time window with an earliest start and a latest start only."""
def __init__(
self, earliest_start: int, latest_start: int, max_horizon: int
) -> None:
self.earliest_start = earliest_start
self.latest_start = latest_start
self.earliest_end = 0
self.latest_end = max_horizon
class EndTimeWindow(TimeWindow):
"""Defines a time window with an earliest end and a latest end only."""
def __init__(self, earliest_end: int, latest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = earliest_end
self.latest_end = latest_end
class EmptyTimeWindow(TimeWindow):
"""Defines an empty time window."""
def __init__(self, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = max_horizon
class WithTimeWindow:
"""A domain must inherit this class if some tasks have time windows defined."""
def get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a TimeWindow object.
Note that the max time horizon needs to be provided to the TimeWindow constructors
e.g.
{
1: TimeWindow(10, 15, 20, 30, self.get_max_horizon())
2: EmptyTimeWindow(self.get_max_horizon())
3: EndTimeWindow(20, 25, self.get_max_horizon())
4: EndBeforeOnlyTimeWindow(40, self.get_max_horizon())
}
# Returns
A dictionary of TimeWindow objects.
"""
return self._get_time_window()
def _get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a TimeWindow object.
Note that the max time horizon needs to be provided to the TimeWindow constructors
e.g.
{
1: TimeWindow(10, 15, 20, 30, self.get_max_horizon())
2: EmptyTimeWindow(self.get_max_horizon())
3: EndTimeWindow(20, 25, self.get_max_horizon())
4: EndBeforeOnlyTimeWindow(40, self.get_max_horizon())
}
# Returns
A dictionary of TimeWindow objects.
"""
raise NotImplementedError
class WithoutTimeWindow(WithTimeWindow):
"""A domain must inherit this class if none of the tasks have restrictions on start times or end times."""
def _get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a dictionary of EmptyTimeWindow object.
# Returns
A dictionary of TimeWindow objects.
"""
ids = self.get_tasks_ids()
the_dict = {}
for id in ids:
the_dict[id] = EmptyTimeWindow(self.get_max_horizon())
return the_dict | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/time_windows.py | time_windows.py |
from __future__ import annotations
from typing import Dict
__all__ = [
"TimeWindow",
"ClassicTimeWindow",
"StartFromOnlyTimeWindow",
"StartBeforeOnlyTimeWindow",
"EndFromOnlyTimeWindow",
"EndBeforeOnlyTimeWindow",
"StartTimeWindow",
"EndTimeWindow",
"EmptyTimeWindow",
"WithTimeWindow",
"WithoutTimeWindow",
]
class TimeWindow:
"""Defines a time window with earliest start, latest start, earliest end and latest end only."""
def __init__(
self,
earliest_start: int,
latest_start: int,
earliest_end: int,
latest_end: int,
max_horizon: int,
) -> None:
self.earliest_start = earliest_start
self.latest_start = latest_start
self.earliest_end = earliest_end
self.latest_end = latest_end
class ClassicTimeWindow(TimeWindow):
"""Defines a time window with earliest start and latest end only."""
def __init__(self, earliest_start: int, latest_end: int, max_horizon: int) -> None:
self.earliest_start = earliest_start
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = latest_end
class StartFromOnlyTimeWindow(TimeWindow):
"""Defines a time window with an earliest start only."""
def __init__(self, earliest_start: int, max_horizon: int) -> None:
self.earliest_start = earliest_start
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = max_horizon
class StartBeforeOnlyTimeWindow(TimeWindow):
"""Defines a time window with an latest start only."""
def __init__(self, latest_start: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = latest_start
self.earliest_end = 0
self.latest_end = max_horizon
class EndFromOnlyTimeWindow(TimeWindow):
"""Defines a time window with an earliest end only."""
def __init__(self, earliest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = earliest_end
self.latest_end = max_horizon
class EndBeforeOnlyTimeWindow(TimeWindow):
"""Defines a time window with a latest end only."""
def __init__(self, latest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = latest_end
class StartTimeWindow(TimeWindow):
"""Defines a time window with an earliest start and a latest start only."""
def __init__(
self, earliest_start: int, latest_start: int, max_horizon: int
) -> None:
self.earliest_start = earliest_start
self.latest_start = latest_start
self.earliest_end = 0
self.latest_end = max_horizon
class EndTimeWindow(TimeWindow):
"""Defines a time window with an earliest end and a latest end only."""
def __init__(self, earliest_end: int, latest_end: int, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = earliest_end
self.latest_end = latest_end
class EmptyTimeWindow(TimeWindow):
"""Defines an empty time window."""
def __init__(self, max_horizon: int) -> None:
self.earliest_start = 0
self.latest_start = max_horizon
self.earliest_end = 0
self.latest_end = max_horizon
class WithTimeWindow:
"""A domain must inherit this class if some tasks have time windows defined."""
def get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a TimeWindow object.
Note that the max time horizon needs to be provided to the TimeWindow constructors
e.g.
{
1: TimeWindow(10, 15, 20, 30, self.get_max_horizon())
2: EmptyTimeWindow(self.get_max_horizon())
3: EndTimeWindow(20, 25, self.get_max_horizon())
4: EndBeforeOnlyTimeWindow(40, self.get_max_horizon())
}
# Returns
A dictionary of TimeWindow objects.
"""
return self._get_time_window()
def _get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a TimeWindow object.
Note that the max time horizon needs to be provided to the TimeWindow constructors
e.g.
{
1: TimeWindow(10, 15, 20, 30, self.get_max_horizon())
2: EmptyTimeWindow(self.get_max_horizon())
3: EndTimeWindow(20, 25, self.get_max_horizon())
4: EndBeforeOnlyTimeWindow(40, self.get_max_horizon())
}
# Returns
A dictionary of TimeWindow objects.
"""
raise NotImplementedError
class WithoutTimeWindow(WithTimeWindow):
"""A domain must inherit this class if none of the tasks have restrictions on start times or end times."""
def _get_time_window(self) -> Dict[int, TimeWindow]:
"""
Return a dictionary where the key is the id of a task (int)
and the value is a dictionary of EmptyTimeWindow object.
# Returns
A dictionary of TimeWindow objects.
"""
ids = self.get_tasks_ids()
the_dict = {}
for id in ids:
the_dict[id] = EmptyTimeWindow(self.get_max_horizon())
return the_dict | 0.957338 | 0.345547 |
from __future__ import annotations
from typing import Dict, Optional
from skdecide.core import DiscreteDistribution, Distribution
__all__ = [
"SimulatedTaskDuration",
"UncertainMultivariateTaskDuration",
"UncertainUnivariateTaskDuration",
"UncertainBoundedTaskDuration",
"UniformBoundedTaskDuration",
"EnumerableTaskDuration",
"DeterministicTaskDuration",
]
class SimulatedTaskDuration:
"""A domain must inherit this class if the task duration requires sampling from a simulation."""
# TODO, this can be challenged.. for uncertain domain (with adistribution, you want to sample a different value each time.
# that 's why i override this sample_task_duration in below level.
def sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Sample, store and return task duration for the given task in the given mode."""
if task not in self.sampled_durations:
self.sampled_durations[task] = {}
if mode not in self.sampled_durations[task]:
self.sampled_durations[task][mode] = {}
if progress_from not in self.sampled_durations[task][mode]:
self.sampled_durations[task][mode][
progress_from
] = self._sample_task_duration(task, mode, progress_from)
return self.sampled_durations[task][mode][progress_from]
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
raise NotImplementedError
def get_latest_sampled_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
):
if task in self.sampled_durations:
if mode in self.sampled_durations[task]:
if progress_from in self.sampled_durations[task][mode]:
return self.sampled_durations[task][mode][progress_from]
return self.sample_task_duration(task, mode, progress_from)
# TODO: Can we currently model multivariate distribution with the Distribution object ?
class UncertainMultivariateTaskDuration(SimulatedTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know multivariate
distribution."""
def sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self._sample_task_duration(
task=task, mode=mode, progress_from=progress_from
)
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided."""
return self._get_task_duration_distribution(
task, mode, progress_from, multivariate_settings
)
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided."""
raise NotImplementedError
class UncertainUnivariateTaskDuration(UncertainMultivariateTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know univariate distribution."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution: # TODO, problem here i think
"""Return the univariate Distribution of the duration of the given task in the given mode."""
raise NotImplementedError
class UncertainBoundedTaskDuration(UncertainUnivariateTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a known distribution between these bounds."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution returns values beween the defined lower and upper bounds."""
raise NotImplementedError
def get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self._get_task_duration_upper_bound(task, mode, progress_from)
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self._get_task_duration_lower_bound(task, mode, progress_from)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class UniformBoundedTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a uniform distribution between these bounds."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate uniform bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution is uniform between the defined lower and upper bounds."""
lb = self.get_task_duration_lower_bound(task, mode)
ub = self.get_task_duration_upper_bound(task, mode)
n_vals = ub - lb + 1
p = 1.0 / float(n_vals)
values = [(x, p) for x in range(lb, ub + 1)]
return DiscreteDistribution(values)
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class EnumerableTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration for each task is enumerable."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
as an Enumerable."""
raise NotImplementedError
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
duration_vals = [
x[0] for x in self.get_task_duration_distribution(task, mode).get_values()
]
return max(duration_vals)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
duration_vals = [
x[0] for x in self.get_task_duration_distribution(task, mode).get_values()
]
return min(duration_vals)
class DeterministicTaskDuration(EnumerableTaskDuration):
"""A domain must inherit this class if the task durations are known and deterministic."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration(task, mode, progress_from)
def get_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
return self._get_task_duration(task, mode, progress_from)
def _get_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
):
"""Return the Distribution of the duration of the given task in the given mode.
Because the duration is deterministic, the distribution always returns the same duration."""
return DiscreteDistribution([(self.get_task_duration(task, mode), 1)])
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/task_duration.py | task_duration.py |
from __future__ import annotations
from typing import Dict, Optional
from skdecide.core import DiscreteDistribution, Distribution
__all__ = [
"SimulatedTaskDuration",
"UncertainMultivariateTaskDuration",
"UncertainUnivariateTaskDuration",
"UncertainBoundedTaskDuration",
"UniformBoundedTaskDuration",
"EnumerableTaskDuration",
"DeterministicTaskDuration",
]
class SimulatedTaskDuration:
"""A domain must inherit this class if the task duration requires sampling from a simulation."""
# TODO, this can be challenged.. for uncertain domain (with adistribution, you want to sample a different value each time.
# that 's why i override this sample_task_duration in below level.
def sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Sample, store and return task duration for the given task in the given mode."""
if task not in self.sampled_durations:
self.sampled_durations[task] = {}
if mode not in self.sampled_durations[task]:
self.sampled_durations[task][mode] = {}
if progress_from not in self.sampled_durations[task][mode]:
self.sampled_durations[task][mode][
progress_from
] = self._sample_task_duration(task, mode, progress_from)
return self.sampled_durations[task][mode][progress_from]
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
raise NotImplementedError
def get_latest_sampled_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
):
if task in self.sampled_durations:
if mode in self.sampled_durations[task]:
if progress_from in self.sampled_durations[task][mode]:
return self.sampled_durations[task][mode][progress_from]
return self.sample_task_duration(task, mode, progress_from)
# TODO: Can we currently model multivariate distribution with the Distribution object ?
class UncertainMultivariateTaskDuration(SimulatedTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know multivariate
distribution."""
def sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self._sample_task_duration(
task=task, mode=mode, progress_from=progress_from
)
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying multiivariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided."""
return self._get_task_duration_distribution(
task, mode, progress_from, multivariate_settings
)
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution:
"""Return the multivariate Distribution of the duration of the given task in the given mode.
Multivariate seetings need to be provided."""
raise NotImplementedError
class UncertainUnivariateTaskDuration(UncertainMultivariateTaskDuration):
"""A domain must inherit this class if the task duration is uncertain and follows a know univariate distribution."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> Distribution: # TODO, problem here i think
"""Return the univariate Distribution of the duration of the given task in the given mode."""
raise NotImplementedError
class UncertainBoundedTaskDuration(UncertainUnivariateTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a known distribution between these bounds."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution returns values beween the defined lower and upper bounds."""
raise NotImplementedError
def get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self._get_task_duration_upper_bound(task, mode, progress_from)
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self._get_task_duration_lower_bound(task, mode, progress_from)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class UniformBoundedTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration is known to be between a lower and upper bound
and follows a uniform distribution between these bounds."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode,
sampled from the underlying univariate uniform bounded distribution."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
The distribution is uniform between the defined lower and upper bounds."""
lb = self.get_task_duration_lower_bound(task, mode)
ub = self.get_task_duration_upper_bound(task, mode)
n_vals = ub - lb + 1
p = 1.0 / float(n_vals)
values = [(x, p) for x in range(lb, ub + 1)]
return DiscreteDistribution(values)
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
raise NotImplementedError
class EnumerableTaskDuration(UncertainBoundedTaskDuration):
"""A domain must inherit this class if the task duration for each task is enumerable."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration_distribution(task, mode).sample()
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
) -> DiscreteDistribution:
"""Return the Distribution of the duration of the given task in the given mode.
as an Enumerable."""
raise NotImplementedError
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
duration_vals = [
x[0] for x in self.get_task_duration_distribution(task, mode).get_values()
]
return max(duration_vals)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
duration_vals = [
x[0] for x in self.get_task_duration_distribution(task, mode).get_values()
]
return min(duration_vals)
class DeterministicTaskDuration(EnumerableTaskDuration):
"""A domain must inherit this class if the task durations are known and deterministic."""
def _sample_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return a task duration for the given task in the given mode."""
return self.get_task_duration(task, mode, progress_from)
def get_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
return self._get_task_duration(task, mode, progress_from)
def _get_task_duration(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the fixed deterministic task duration of the given task in the given mode."""
raise NotImplementedError
def _get_task_duration_distribution(
self,
task: int,
mode: Optional[int] = 1,
progress_from: Optional[float] = 0.0,
multivariate_settings: Optional[Dict[str, int]] = None,
):
"""Return the Distribution of the duration of the given task in the given mode.
Because the duration is deterministic, the distribution always returns the same duration."""
return DiscreteDistribution([(self.get_task_duration(task, mode), 1)])
def _get_task_duration_upper_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the upper bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode)
def _get_task_duration_lower_bound(
self, task: int, mode: Optional[int] = 1, progress_from: Optional[float] = 0.0
) -> int:
"""Return the lower bound for the task duration of the given task in the given mode."""
return self.get_task_duration(task, mode) | 0.860925 | 0.385751 |
from __future__ import annotations
from typing import Dict
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import State
__all__ = ["MixedRenewable", "RenewableOnly"]
class MixedRenewable:
"""A domain must inherit this class if the resource available are non-renewable and renewable."""
def get_resource_renewability(self) -> Dict[str, bool]:
"""
Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
return self._get_resource_renewability()
def _get_resource_renewability(self) -> Dict[str, bool]:
"""
Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
raise NotImplementedError
def is_renewable(self, resource: str):
return self.get_resource_renewability()[resource]
def all_tasks_possible(self, state: State) -> bool:
"""Return a True is for each task there is at least one mode in which the task can be executed, given the
resource configuration in the state provided as argument. Returns False otherwise.
If this function returns False, the scheduling problem is unsolvable from this state.
This is to cope with the use of non-renable resources that may lead to state from which a
task will not be possible anymore."""
resource_types_names = self.get_resource_types_names()
resource_not_renewable = set(
res
for res, renewable in self.get_resource_renewability().items()
if res in resource_types_names and not renewable
)
modes_details = self.get_tasks_modes()
remaining_tasks = (
state.task_ids.difference(state.tasks_complete)
.difference(state.tasks_progress)
.difference(state.tasks_unsatisfiable)
)
for task_id in remaining_tasks:
for mode_consumption in modes_details[task_id].values():
for res in resource_not_renewable:
need = mode_consumption.get_resource_need(res)
avail = state.resource_availability[res] - state.resource_used[res]
if avail - need < 0:
break
else:
# The else-clause runs if loop completes normally, which means
# that we found a mode for which all resources are available, and
# we can exit from the loop on modes.
break
else:
# This task is not possible
return False
return True
class RenewableOnly(MixedRenewable):
"""A domain must inherit this class if the resource available are ALL renewable."""
def _get_resource_renewability(self) -> Dict[str, bool]:
"""Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
names = (
self.get_resource_types_names() + self.get_resource_units_names()
) # comes from resource_handling...
renewability = {}
for name in names:
renewability[name] = True
return renewability
def is_renewable(self, resource: str):
return True | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/resource_renewability.py | resource_renewability.py |
from __future__ import annotations
from typing import Dict
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import State
__all__ = ["MixedRenewable", "RenewableOnly"]
class MixedRenewable:
"""A domain must inherit this class if the resource available are non-renewable and renewable."""
def get_resource_renewability(self) -> Dict[str, bool]:
"""
Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
return self._get_resource_renewability()
def _get_resource_renewability(self) -> Dict[str, bool]:
"""
Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
raise NotImplementedError
def is_renewable(self, resource: str):
return self.get_resource_renewability()[resource]
def all_tasks_possible(self, state: State) -> bool:
"""Return a True is for each task there is at least one mode in which the task can be executed, given the
resource configuration in the state provided as argument. Returns False otherwise.
If this function returns False, the scheduling problem is unsolvable from this state.
This is to cope with the use of non-renable resources that may lead to state from which a
task will not be possible anymore."""
resource_types_names = self.get_resource_types_names()
resource_not_renewable = set(
res
for res, renewable in self.get_resource_renewability().items()
if res in resource_types_names and not renewable
)
modes_details = self.get_tasks_modes()
remaining_tasks = (
state.task_ids.difference(state.tasks_complete)
.difference(state.tasks_progress)
.difference(state.tasks_unsatisfiable)
)
for task_id in remaining_tasks:
for mode_consumption in modes_details[task_id].values():
for res in resource_not_renewable:
need = mode_consumption.get_resource_need(res)
avail = state.resource_availability[res] - state.resource_used[res]
if avail - need < 0:
break
else:
# The else-clause runs if loop completes normally, which means
# that we found a mode for which all resources are available, and
# we can exit from the loop on modes.
break
else:
# This task is not possible
return False
return True
class RenewableOnly(MixedRenewable):
"""A domain must inherit this class if the resource available are ALL renewable."""
def _get_resource_renewability(self) -> Dict[str, bool]:
"""Return a dictionary where the key is a resource name (string)
and the value whether this resource is renewable (True) or not (False)."""
names = (
self.get_resource_types_names() + self.get_resource_units_names()
) # comes from resource_handling...
renewability = {}
for name in names:
renewability[name] = True
return renewability
def is_renewable(self, resource: str):
return True | 0.888777 | 0.32662 |
from __future__ import annotations
from typing import Dict, List
__all__ = [
"WithModeCosts",
"WithoutModeCosts",
"WithResourceCosts",
"WithoutResourceCosts",
]
class WithModeCosts:
"""A domain must inherit this class if there are some mode costs to consider."""
def _get_mode_costs(
self,
) -> Dict[
int, Dict[int, float]
]: # TODO: To be handled by domain (in transition cost)
"""
Return a nested dictionary where the first key is the id of a task (int), the second key the id of a mode
and the value indicates the cost of execution the task in the mode."""
raise NotImplementedError
def get_mode_costs(
self,
) -> Dict[
int, Dict[int, float]
]: # TODO: To be handled by domain (in transition cost)
"""
Return a nested dictionary where the first key is the id of a task (int), the second key the id of a mode
and the value indicates the cost of execution the task in the mode."""
return self._get_mode_costs()
class WithoutModeCosts(WithModeCosts):
"""A domain must inherit this class if there are no mode cost to consider."""
def _get_mode_costs(self) -> Dict[int, Dict[int, float]]:
cost_dict = {}
for task_id, modes in self.get_tasks_modes().items():
cost_dict[task_id] = {mode_id: 0.0 for mode_id in modes}
return cost_dict
class WithResourceCosts:
"""A domain must inherit this class if there are some resource costs to consider."""
def _get_resource_cost_per_time_unit(
self,
) -> Dict[str, float]: # TODO: To be handled by domain (in transition cost)
"""
Return a dictionary where the key is the name of a resource (str)
and the value indicates the cost of using this resource per time unit."""
raise NotImplementedError
def get_resource_cost_per_time_unit(
self,
) -> Dict[str, float]: # TODO: To be handled by domain (in transition cost)
"""
Return a dictionary where the key is the name of a resource (str)
and the value indicates the cost of using this resource per time unit."""
return self._get_resource_cost_per_time_unit()
class WithoutResourceCosts(WithResourceCosts):
"""A domain must inherit this class if there are no resource cost to consider."""
def _get_resource_cost_per_time_unit(self) -> Dict[str, float]:
cost_dict = {}
for res in self.get_resource_types_names():
cost_dict[res] = 0.0
for res in self.get_resource_units_names():
cost_dict[res] = 0.0
return cost_dict | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/resource_costs.py | resource_costs.py |
from __future__ import annotations
from typing import Dict, List
__all__ = [
"WithModeCosts",
"WithoutModeCosts",
"WithResourceCosts",
"WithoutResourceCosts",
]
class WithModeCosts:
"""A domain must inherit this class if there are some mode costs to consider."""
def _get_mode_costs(
self,
) -> Dict[
int, Dict[int, float]
]: # TODO: To be handled by domain (in transition cost)
"""
Return a nested dictionary where the first key is the id of a task (int), the second key the id of a mode
and the value indicates the cost of execution the task in the mode."""
raise NotImplementedError
def get_mode_costs(
self,
) -> Dict[
int, Dict[int, float]
]: # TODO: To be handled by domain (in transition cost)
"""
Return a nested dictionary where the first key is the id of a task (int), the second key the id of a mode
and the value indicates the cost of execution the task in the mode."""
return self._get_mode_costs()
class WithoutModeCosts(WithModeCosts):
"""A domain must inherit this class if there are no mode cost to consider."""
def _get_mode_costs(self) -> Dict[int, Dict[int, float]]:
cost_dict = {}
for task_id, modes in self.get_tasks_modes().items():
cost_dict[task_id] = {mode_id: 0.0 for mode_id in modes}
return cost_dict
class WithResourceCosts:
"""A domain must inherit this class if there are some resource costs to consider."""
def _get_resource_cost_per_time_unit(
self,
) -> Dict[str, float]: # TODO: To be handled by domain (in transition cost)
"""
Return a dictionary where the key is the name of a resource (str)
and the value indicates the cost of using this resource per time unit."""
raise NotImplementedError
def get_resource_cost_per_time_unit(
self,
) -> Dict[str, float]: # TODO: To be handled by domain (in transition cost)
"""
Return a dictionary where the key is the name of a resource (str)
and the value indicates the cost of using this resource per time unit."""
return self._get_resource_cost_per_time_unit()
class WithoutResourceCosts(WithResourceCosts):
"""A domain must inherit this class if there are no resource cost to consider."""
def _get_resource_cost_per_time_unit(self) -> Dict[str, float]:
cost_dict = {}
for res in self.get_resource_types_names():
cost_dict[res] = 0.0
for res in self.get_resource_units_names():
cost_dict[res] = 0.0
return cost_dict | 0.745306 | 0.420481 |
from __future__ import annotations
from typing import Any, Dict, List
__all__ = [
"WithResourceTypes",
"WithoutResourceTypes",
"WithResourceUnits",
"SingleResourceUnit",
"WithoutResourceUnit",
]
class WithResourceTypes:
"""A domain must inherit this class if some of its resources are resource types."""
def get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
return self._get_resource_types_names()
def _get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
raise NotImplementedError
class WithoutResourceTypes(WithResourceTypes):
"""A domain must inherit this class if it only uses resource types."""
def _get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
return []
class WithResourceUnits:
"""A domain must inherit this class if some of its resources are resource units."""
def get_resource_units_names(self) -> List[str]:
"""Return the names (string) of all resource units as a list."""
return self._get_resource_units_names()
def _get_resource_units_names(self) -> List[str]:
"""Return the names (string) of all resource units as a list."""
raise NotImplementedError
def get_resource_type_for_unit(self) -> Dict[str, str]:
"""Return a dictionary where the key is a resource unit name and the value a resource type name.
An empty dictionary can be used if there are no resource unit matching a resource type."""
return self._get_resource_type_for_unit()
def _get_resource_type_for_unit(self) -> Dict[str, str]:
"""Return a dictionary where the key is a resource unit name and the value a resource type name.
An empty dictionary can be used if there are no resource unit matching a resource type."""
raise NotImplementedError
class SingleResourceUnit(WithResourceUnits):
"""A domain must inherit this class if there is no allocation to be done (i.e. there is a single resource)."""
def _get_resource_units_names(self) -> List[str]:
return ["single_resource"]
def _get_resource_type_for_unit(self) -> Dict[str, str]:
return {}
class WithoutResourceUnit(SingleResourceUnit):
"""A domain must inherit this class if it only uses resource types."""
def _get_resource_units_names(self) -> List[str]:
return []
def _get_resource_type_for_unit(self) -> Dict[str, str]:
return {} | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/resource_type.py | resource_type.py |
from __future__ import annotations
from typing import Any, Dict, List
__all__ = [
"WithResourceTypes",
"WithoutResourceTypes",
"WithResourceUnits",
"SingleResourceUnit",
"WithoutResourceUnit",
]
class WithResourceTypes:
"""A domain must inherit this class if some of its resources are resource types."""
def get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
return self._get_resource_types_names()
def _get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
raise NotImplementedError
class WithoutResourceTypes(WithResourceTypes):
"""A domain must inherit this class if it only uses resource types."""
def _get_resource_types_names(self) -> List[str]:
"""Return the names (string) of all resource types as a list."""
return []
class WithResourceUnits:
"""A domain must inherit this class if some of its resources are resource units."""
def get_resource_units_names(self) -> List[str]:
"""Return the names (string) of all resource units as a list."""
return self._get_resource_units_names()
def _get_resource_units_names(self) -> List[str]:
"""Return the names (string) of all resource units as a list."""
raise NotImplementedError
def get_resource_type_for_unit(self) -> Dict[str, str]:
"""Return a dictionary where the key is a resource unit name and the value a resource type name.
An empty dictionary can be used if there are no resource unit matching a resource type."""
return self._get_resource_type_for_unit()
def _get_resource_type_for_unit(self) -> Dict[str, str]:
"""Return a dictionary where the key is a resource unit name and the value a resource type name.
An empty dictionary can be used if there are no resource unit matching a resource type."""
raise NotImplementedError
class SingleResourceUnit(WithResourceUnits):
"""A domain must inherit this class if there is no allocation to be done (i.e. there is a single resource)."""
def _get_resource_units_names(self) -> List[str]:
return ["single_resource"]
def _get_resource_type_for_unit(self) -> Dict[str, str]:
return {}
class WithoutResourceUnit(SingleResourceUnit):
"""A domain must inherit this class if it only uses resource types."""
def _get_resource_units_names(self) -> List[str]:
return []
def _get_resource_type_for_unit(self) -> Dict[str, str]:
return {} | 0.939255 | 0.240579 |
from __future__ import annotations
from typing import Dict
__all__ = [
"TimeLag",
"MinimumOnlyTimeLag",
"MaximumOnlyTimeLag",
"WithTimeLag",
"WithoutTimeLag",
]
class TimeLag:
"""Defines a time lag with both a minimum time lag and maximum time lag."""
def __init__(self, minimum_time_lag, maximum_time_lags):
self.minimum_time_lag = minimum_time_lag
self.maximum_time_lags = maximum_time_lags
class MinimumOnlyTimeLag(TimeLag):
"""Defines a minimum time lag."""
def __init__(self, minimum_time_lag):
self.minimum_time_lag = minimum_time_lag
self.maximum_time_lags = self.get_max_horizon()
class MaximumOnlyTimeLag(TimeLag):
"""Defines a maximum time lag."""
def __init__(self, maximum_time_lags):
self.minimum_time_lag = 0
self.maximum_time_lags = maximum_time_lags
class WithTimeLag:
"""A domain must inherit this class if there are minimum and maximum time lags between some of its tasks."""
def get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task.
e.g.
{
12:{
15: TimeLag(5, 10),
16: TimeLag(5, 20),
17: MinimumOnlyTimeLag(5),
18: MaximumOnlyTimeLag(15),
}
}
# Returns
A dictionary of TimeLag objects.
"""
return self._get_time_lags()
def _get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task.
e.g.
{
12:{
15: TimeLag(5, 10),
16: TimeLag(5, 20),
17: MinimumOnlyTimeLag(5),
18: MaximumOnlyTimeLag(15),
}
}
# Returns
A dictionary of TimeLag objects.
"""
raise NotImplementedError
class WithoutTimeLag(WithTimeLag):
"""A domain must inherit this class if there is no required time lag between its tasks."""
def _get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task."""
return {} | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/time_lag.py | time_lag.py |
from __future__ import annotations
from typing import Dict
__all__ = [
"TimeLag",
"MinimumOnlyTimeLag",
"MaximumOnlyTimeLag",
"WithTimeLag",
"WithoutTimeLag",
]
class TimeLag:
"""Defines a time lag with both a minimum time lag and maximum time lag."""
def __init__(self, minimum_time_lag, maximum_time_lags):
self.minimum_time_lag = minimum_time_lag
self.maximum_time_lags = maximum_time_lags
class MinimumOnlyTimeLag(TimeLag):
"""Defines a minimum time lag."""
def __init__(self, minimum_time_lag):
self.minimum_time_lag = minimum_time_lag
self.maximum_time_lags = self.get_max_horizon()
class MaximumOnlyTimeLag(TimeLag):
"""Defines a maximum time lag."""
def __init__(self, maximum_time_lags):
self.minimum_time_lag = 0
self.maximum_time_lags = maximum_time_lags
class WithTimeLag:
"""A domain must inherit this class if there are minimum and maximum time lags between some of its tasks."""
def get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task.
e.g.
{
12:{
15: TimeLag(5, 10),
16: TimeLag(5, 20),
17: MinimumOnlyTimeLag(5),
18: MaximumOnlyTimeLag(15),
}
}
# Returns
A dictionary of TimeLag objects.
"""
return self._get_time_lags()
def _get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task.
e.g.
{
12:{
15: TimeLag(5, 10),
16: TimeLag(5, 20),
17: MinimumOnlyTimeLag(5),
18: MaximumOnlyTimeLag(15),
}
}
# Returns
A dictionary of TimeLag objects.
"""
raise NotImplementedError
class WithoutTimeLag(WithTimeLag):
"""A domain must inherit this class if there is no required time lag between its tasks."""
def _get_time_lags(self) -> Dict[int, Dict[int, TimeLag]]:
"""
Return nested dictionaries where the first key is the id of a task (int)
and the second key is the id of another task (int).
The value is a TimeLag object containing the MINIMUM and MAXIMUM time (int) that needs to separate the end
of the first task to the start of the second task."""
return {} | 0.967194 | 0.50531 |
from __future__ import annotations
from typing import Any, Dict, Hashable, List, Tuple
import networkx as nx
class Graph:
def __init__(
self,
nodes: List[Tuple[Hashable, Dict[str, Any]]],
edges: List[Tuple[Hashable, Hashable, Dict[str, Any]]],
undirected=True,
):
self.nodes = nodes
self.edges = edges
self.undirected = undirected
self.neighbors_dict = {}
self.predecessors_dict = {}
self.edges_infos_dict = {}
self.nodes_infos_dict = {}
self.build_nodes_infos_dict()
self.build_edges()
self.nodes_name = sorted(self.nodes_infos_dict)
self.graph_nx = self.to_networkx()
def get_edges(self):
return self.edges_infos_dict.keys()
def get_nodes(self):
return self.nodes_name
def build_nodes_infos_dict(self):
for n, d in self.nodes:
self.nodes_infos_dict[n] = d
def build_edges(self):
for n1, n2, d in self.edges:
self.edges_infos_dict[(n1, n2)] = d
if n2 not in self.predecessors_dict:
self.predecessors_dict[n2] = set()
if n1 not in self.neighbors_dict:
self.neighbors_dict[n1] = set()
self.predecessors_dict[n2].add(n1)
self.neighbors_dict[n1].add(n2)
if self.undirected:
if n1 not in self.predecessors_dict:
self.predecessors_dict[n1] = set()
if n2 not in self.neighbors_dict:
self.neighbors_dict[n2] = set()
self.predecessors_dict[n1].add(n2)
self.neighbors_dict[n2].add(n1)
self.edges_infos_dict[(n2, n1)] = d
def get_neighbors(self, node):
return self.neighbors_dict.get(node, [])
def get_predecessors(self, node):
return self.predecessors_dict.get(node, [])
def get_attr_node(self, node, attr):
return self.nodes_infos_dict.get(node, {}).get(attr, None)
def get_attr_edge(self, node1, node2, attr):
return self.edges_infos_dict.get((node1, node2), {}).get(attr, None)
def to_networkx(self):
graph_nx = nx.DiGraph() if not self.undirected else nx.Graph()
graph_nx.add_nodes_from(self.nodes)
graph_nx.add_edges_from(self.edges)
return graph_nx
def check_loop(self):
try:
cycles = nx.find_cycle(self.graph_nx, orientation="original")
except:
cycles = None
return cycles
def precedessors_nodes(self, n):
return nx.algorithms.ancestors(self.graph_nx, n)
def ancestors_map(self):
return {
n: nx.algorithms.ancestors(self.graph_nx, n) for n in self.graph_nx.nodes()
}
def descendants_map(self):
return {
n: nx.algorithms.descendants(self.graph_nx, n)
for n in self.graph_nx.nodes()
}
def successors_map(self):
return {n: list(nx.neighbors(self.graph_nx, n)) for n in self.graph_nx.nodes()}
def predecessors_map(self):
return {n: list(self.graph_nx.predecessors(n)) for n in self.graph_nx.nodes()}
if __name__ == "__main__":
nodes = [(0, {"name": 0}), (1, {"name": 1})]
edges = [(0, 1, {"weight": 1.1}), (1, 0, {"weight": 2})]
graph = Graph(nodes, edges, False)
graph_nx = graph.to_networkx()
print(graph.get_attr_edge(0, 1, "weight"))
print(graph.get_attr_edge(1, 0, "weight"))
print(graph.get_attr_edge(0, 0, "weight")) # None
print(graph_nx.size())
print(nx.number_of_nodes(graph_nx), nx.number_of_edges(graph_nx))
print(graph_nx[0][1]["weight"])
print(graph_nx[1][0]["weight"]) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/graph_toolbox.py | graph_toolbox.py |
from __future__ import annotations
from typing import Any, Dict, Hashable, List, Tuple
import networkx as nx
class Graph:
def __init__(
self,
nodes: List[Tuple[Hashable, Dict[str, Any]]],
edges: List[Tuple[Hashable, Hashable, Dict[str, Any]]],
undirected=True,
):
self.nodes = nodes
self.edges = edges
self.undirected = undirected
self.neighbors_dict = {}
self.predecessors_dict = {}
self.edges_infos_dict = {}
self.nodes_infos_dict = {}
self.build_nodes_infos_dict()
self.build_edges()
self.nodes_name = sorted(self.nodes_infos_dict)
self.graph_nx = self.to_networkx()
def get_edges(self):
return self.edges_infos_dict.keys()
def get_nodes(self):
return self.nodes_name
def build_nodes_infos_dict(self):
for n, d in self.nodes:
self.nodes_infos_dict[n] = d
def build_edges(self):
for n1, n2, d in self.edges:
self.edges_infos_dict[(n1, n2)] = d
if n2 not in self.predecessors_dict:
self.predecessors_dict[n2] = set()
if n1 not in self.neighbors_dict:
self.neighbors_dict[n1] = set()
self.predecessors_dict[n2].add(n1)
self.neighbors_dict[n1].add(n2)
if self.undirected:
if n1 not in self.predecessors_dict:
self.predecessors_dict[n1] = set()
if n2 not in self.neighbors_dict:
self.neighbors_dict[n2] = set()
self.predecessors_dict[n1].add(n2)
self.neighbors_dict[n2].add(n1)
self.edges_infos_dict[(n2, n1)] = d
def get_neighbors(self, node):
return self.neighbors_dict.get(node, [])
def get_predecessors(self, node):
return self.predecessors_dict.get(node, [])
def get_attr_node(self, node, attr):
return self.nodes_infos_dict.get(node, {}).get(attr, None)
def get_attr_edge(self, node1, node2, attr):
return self.edges_infos_dict.get((node1, node2), {}).get(attr, None)
def to_networkx(self):
graph_nx = nx.DiGraph() if not self.undirected else nx.Graph()
graph_nx.add_nodes_from(self.nodes)
graph_nx.add_edges_from(self.edges)
return graph_nx
def check_loop(self):
try:
cycles = nx.find_cycle(self.graph_nx, orientation="original")
except:
cycles = None
return cycles
def precedessors_nodes(self, n):
return nx.algorithms.ancestors(self.graph_nx, n)
def ancestors_map(self):
return {
n: nx.algorithms.ancestors(self.graph_nx, n) for n in self.graph_nx.nodes()
}
def descendants_map(self):
return {
n: nx.algorithms.descendants(self.graph_nx, n)
for n in self.graph_nx.nodes()
}
def successors_map(self):
return {n: list(nx.neighbors(self.graph_nx, n)) for n in self.graph_nx.nodes()}
def predecessors_map(self):
return {n: list(self.graph_nx.predecessors(n)) for n in self.graph_nx.nodes()}
if __name__ == "__main__":
nodes = [(0, {"name": 0}), (1, {"name": 1})]
edges = [(0, 1, {"weight": 1.1}), (1, 0, {"weight": 2})]
graph = Graph(nodes, edges, False)
graph_nx = graph.to_networkx()
print(graph.get_attr_edge(0, 1, "weight"))
print(graph.get_attr_edge(1, 0, "weight"))
print(graph.get_attr_edge(0, 0, "weight")) # None
print(graph_nx.size())
print(nx.number_of_nodes(graph_nx), nx.number_of_edges(graph_nx))
print(graph_nx[0][1]["weight"])
print(graph_nx[1][0]["weight"]) | 0.823648 | 0.305024 |
from __future__ import annotations
from typing import Any, Dict, List, Set
__all__ = ["WithResourceSkills", "WithoutResourceSkills"]
class WithResourceSkills:
"""A domain must inherit this class if its resources (either resource types or resource units)
have different set of skills."""
def get_skills_names(self) -> Set[str]:
"""Return a list of all skill names as a list of str. Skill names are defined in the 2 dictionaries returned
by the get_all_resources_skills and get_all_tasks_skills functions."""
all_names = set()
skill_dict = self.get_all_resources_skills()
for key1 in skill_dict.keys():
for key2 in skill_dict[key1].keys():
all_names.add(key2)
skill_dict = self.get_all_tasks_skills()
for key1 in skill_dict.keys():
for mode in skill_dict[key1].keys():
for key2 in skill_dict[key1][mode].keys():
all_names.add(key2)
return all_names
def get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
return self._get_all_resources_skills()
def _get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
raise NotImplementedError
def get_skills_of_resource(self, resource: str) -> Dict[str, Any]:
"""Return the skills of a given resource"""
return self.get_all_resources_skills()[resource]
def get_all_tasks_skills(self) -> Dict[int, Dict[int, Dict[str, Any]]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
return self._get_all_tasks_skills()
def _get_all_tasks_skills(self) -> Dict[int, Dict[int, Dict[str, Any]]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
raise NotImplementedError
def get_skills_of_task(self, task: int, mode: int) -> Dict[str, Any]:
"""Return the skill requirements for a given task"""
return {
s: self.get_all_tasks_skills()[task][mode][s]
for s in self.get_all_tasks_skills()[task][mode]
if self.get_all_tasks_skills()[task][mode][s] > 0
}
def find_one_ressource_to_do_one_task(self, task: int, mode: int) -> List[str]:
"""
For the common case when it is possible to do the task by one resource unit.
For general case, it might just return no possible ressource unit.
"""
skill_of_task = self.get_skills_of_task(task, mode)
resources = []
if len(skill_of_task) == 0:
return [None]
for resource in self.get_all_resources_skills():
if all(
self.get_skills_of_resource(resource=resource).get(s, 0)
>= skill_of_task[s]
for s in skill_of_task
):
resources += [resource]
# print("Ressources ", resources, " can do the task")
return resources
def check_if_skills_are_fulfilled(
self, task: int, mode: int, resource_used: Dict[str, int]
):
skill_of_task = self.get_skills_of_task(task, mode)
if len(skill_of_task) == 0:
return True # No need of skills here.
skills = {s: 0 for s in skill_of_task}
for r in resource_used:
skill_of_ressource = self.get_skills_of_resource(resource=r)
for s in skill_of_ressource:
if s in skills:
skills[s] += skill_of_ressource[s]
# print("Ressource used : ", skills)
# print("Skills required", skill_of_task)
return all(skills[s] >= skill_of_task[s] for s in skill_of_task)
class WithoutResourceSkills(WithResourceSkills):
"""A domain must inherit this class if no resources skills have to be considered."""
def _get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
return {}
def get_skills_of_resource(self, resource: str) -> Dict[str, Any]:
"""Return the skills of a given resource"""
return {}
def _get_all_tasks_skills(self) -> Dict[int, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
return {}
def get_skills_of_task(self, task: int, mode: int) -> Dict[str, Any]:
"""Return the skill requirements for a given task"""
return {}
def check_if_skills_are_fulfilled(
self, task: int, mode: int, resource_used: Dict[str, int]
):
return True | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/skills.py | skills.py |
from __future__ import annotations
from typing import Any, Dict, List, Set
__all__ = ["WithResourceSkills", "WithoutResourceSkills"]
class WithResourceSkills:
"""A domain must inherit this class if its resources (either resource types or resource units)
have different set of skills."""
def get_skills_names(self) -> Set[str]:
"""Return a list of all skill names as a list of str. Skill names are defined in the 2 dictionaries returned
by the get_all_resources_skills and get_all_tasks_skills functions."""
all_names = set()
skill_dict = self.get_all_resources_skills()
for key1 in skill_dict.keys():
for key2 in skill_dict[key1].keys():
all_names.add(key2)
skill_dict = self.get_all_tasks_skills()
for key1 in skill_dict.keys():
for mode in skill_dict[key1].keys():
for key2 in skill_dict[key1][mode].keys():
all_names.add(key2)
return all_names
def get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
return self._get_all_resources_skills()
def _get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
raise NotImplementedError
def get_skills_of_resource(self, resource: str) -> Dict[str, Any]:
"""Return the skills of a given resource"""
return self.get_all_resources_skills()[resource]
def get_all_tasks_skills(self) -> Dict[int, Dict[int, Dict[str, Any]]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
return self._get_all_tasks_skills()
def _get_all_tasks_skills(self) -> Dict[int, Dict[int, Dict[str, Any]]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
raise NotImplementedError
def get_skills_of_task(self, task: int, mode: int) -> Dict[str, Any]:
"""Return the skill requirements for a given task"""
return {
s: self.get_all_tasks_skills()[task][mode][s]
for s in self.get_all_tasks_skills()[task][mode]
if self.get_all_tasks_skills()[task][mode][s] > 0
}
def find_one_ressource_to_do_one_task(self, task: int, mode: int) -> List[str]:
"""
For the common case when it is possible to do the task by one resource unit.
For general case, it might just return no possible ressource unit.
"""
skill_of_task = self.get_skills_of_task(task, mode)
resources = []
if len(skill_of_task) == 0:
return [None]
for resource in self.get_all_resources_skills():
if all(
self.get_skills_of_resource(resource=resource).get(s, 0)
>= skill_of_task[s]
for s in skill_of_task
):
resources += [resource]
# print("Ressources ", resources, " can do the task")
return resources
def check_if_skills_are_fulfilled(
self, task: int, mode: int, resource_used: Dict[str, int]
):
skill_of_task = self.get_skills_of_task(task, mode)
if len(skill_of_task) == 0:
return True # No need of skills here.
skills = {s: 0 for s in skill_of_task}
for r in resource_used:
skill_of_ressource = self.get_skills_of_resource(resource=r)
for s in skill_of_ressource:
if s in skills:
skills[s] += skill_of_ressource[s]
# print("Ressource used : ", skills)
# print("Skills required", skill_of_task)
return all(skills[s] >= skill_of_task[s] for s in skill_of_task)
class WithoutResourceSkills(WithResourceSkills):
"""A domain must inherit this class if no resources skills have to be considered."""
def _get_all_resources_skills(self) -> Dict[str, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a resource type or resource unit
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {unit: {skill: (detail of skill)}}"""
return {}
def get_skills_of_resource(self, resource: str) -> Dict[str, Any]:
"""Return the skills of a given resource"""
return {}
def _get_all_tasks_skills(self) -> Dict[int, Dict[str, Any]]:
"""Return a nested dictionary where the first key is the name of a task
and the second key is the name of a skill. The value defines the details of the skill.
E.g. {task: {skill: (detail of skill)}}"""
return {}
def get_skills_of_task(self, task: int, mode: int) -> Dict[str, Any]:
"""Return the skill requirements for a given task"""
return {}
def check_if_skills_are_fulfilled(
self, task: int, mode: int, resource_used: Dict[str, int]
):
return True | 0.861436 | 0.378402 |
from __future__ import annotations
from typing import Dict, List
from skdecide.builders.domain.scheduling.graph_toolbox import Graph
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import State
__all__ = ["WithPrecedence", "WithoutPrecedence"]
class WithPrecedence:
"""A domain must inherit this class if there exist some predecence constraints between tasks."""
def _get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
raise NotImplementedError
def get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
return self._get_successors()
def _get_successors_task(self, task_id: int) -> List[int]:
return self.get_successors()[task_id]
def get_successors_task(self, task_id: int) -> List[int]:
return self._get_successors_task(task_id=task_id)
def _get_predecessors(self) -> Dict[int, List[int]]:
"""Return the predecessors of the task. Successors are given as a list for a task given as a key."""
return self.graph.predecessors_map()
def get_predecessors(self) -> Dict[int, List[int]]:
"""Return the predecessors of the task. Successors are given as a list for a task given as a key."""
return self._get_predecessors()
def _get_predecessors_task(self, task_id: int) -> List[int]:
return self.get_predecessors()[task_id]
def get_predecessors_task(self, task_id: int) -> List[int]:
return self._get_predecessors_task(task_id=task_id)
def compute_graph(self):
task_ids = self.get_tasks_ids()
successors = self.get_successors()
mode_details = self.get_tasks_modes()
nodes = [
(
n,
{
mode: self.sample_task_duration(task=n, mode=mode)
for mode in mode_details[n]
},
)
for n in task_ids
]
edges = []
for n in successors:
for succ in successors[n]:
edges += [(n, succ, {})]
return Graph(nodes, edges, False)
def _task_modes_possible_to_launch(self, state: State):
mode_details = self.get_tasks_modes()
return [
(n, mode)
for n in state.tasks_remaining
for mode in mode_details[n]
if all(m in state.tasks_complete for m in self.ancestors[n])
]
def task_modes_possible_to_launch(self, state: State):
return self._task_modes_possible_to_launch(state=state)
def _task_possible_to_launch_precedence(self, state: State):
return [
n
for n in state.tasks_remaining
if all(m in state.tasks_complete for m in self.ancestors[n])
]
def task_possible_to_launch_precedence(self, state: State):
return self._task_possible_to_launch_precedence(state=state)
class WithoutPrecedence(WithPrecedence):
"""A domain must inherit this class if there are no predecence constraints between tasks."""
def _get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
ids = self.get_tasks_ids()
succ = {}
for id in ids:
succ[id] = []
return succ
def _get_predecessors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
ids = self.get_tasks_ids()
prec = {}
for id in ids:
prec[id] = []
return prec | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/domain/scheduling/precedence.py | precedence.py |
from __future__ import annotations
from typing import Dict, List
from skdecide.builders.domain.scheduling.graph_toolbox import Graph
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import State
__all__ = ["WithPrecedence", "WithoutPrecedence"]
class WithPrecedence:
"""A domain must inherit this class if there exist some predecence constraints between tasks."""
def _get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
raise NotImplementedError
def get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
return self._get_successors()
def _get_successors_task(self, task_id: int) -> List[int]:
return self.get_successors()[task_id]
def get_successors_task(self, task_id: int) -> List[int]:
return self._get_successors_task(task_id=task_id)
def _get_predecessors(self) -> Dict[int, List[int]]:
"""Return the predecessors of the task. Successors are given as a list for a task given as a key."""
return self.graph.predecessors_map()
def get_predecessors(self) -> Dict[int, List[int]]:
"""Return the predecessors of the task. Successors are given as a list for a task given as a key."""
return self._get_predecessors()
def _get_predecessors_task(self, task_id: int) -> List[int]:
return self.get_predecessors()[task_id]
def get_predecessors_task(self, task_id: int) -> List[int]:
return self._get_predecessors_task(task_id=task_id)
def compute_graph(self):
task_ids = self.get_tasks_ids()
successors = self.get_successors()
mode_details = self.get_tasks_modes()
nodes = [
(
n,
{
mode: self.sample_task_duration(task=n, mode=mode)
for mode in mode_details[n]
},
)
for n in task_ids
]
edges = []
for n in successors:
for succ in successors[n]:
edges += [(n, succ, {})]
return Graph(nodes, edges, False)
def _task_modes_possible_to_launch(self, state: State):
mode_details = self.get_tasks_modes()
return [
(n, mode)
for n in state.tasks_remaining
for mode in mode_details[n]
if all(m in state.tasks_complete for m in self.ancestors[n])
]
def task_modes_possible_to_launch(self, state: State):
return self._task_modes_possible_to_launch(state=state)
def _task_possible_to_launch_precedence(self, state: State):
return [
n
for n in state.tasks_remaining
if all(m in state.tasks_complete for m in self.ancestors[n])
]
def task_possible_to_launch_precedence(self, state: State):
return self._task_possible_to_launch_precedence(state=state)
class WithoutPrecedence(WithPrecedence):
"""A domain must inherit this class if there are no predecence constraints between tasks."""
def _get_successors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
ids = self.get_tasks_ids()
succ = {}
for id in ids:
succ[id] = []
return succ
def _get_predecessors(self) -> Dict[int, List[int]]:
"""Return the successors of the tasks. Successors are given as a list for a task given as a key."""
ids = self.get_tasks_ids()
prec = {}
for id in ids:
prec[id] = []
return prec | 0.919308 | 0.443841 |
from __future__ import annotations
from skdecide.core import D, autocastable
__all__ = ["Utilities", "QValues"]
class Utilities:
"""A solver must inherit this class if it can provide the utility function (i.e. value function)."""
@autocastable
def get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
return self._get_utility(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
raise NotImplementedError
class QValues(Utilities):
"""A solver must inherit this class if it can provide the Q function (i.e. action-value function)."""
@autocastable
def get_q_value(
self,
observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
return self._get_q_value(observation, action)
def _get_q_value(
self,
observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/solver/assessability.py | assessability.py |
from __future__ import annotations
from skdecide.core import D, autocastable
__all__ = ["Utilities", "QValues"]
class Utilities:
"""A solver must inherit this class if it can provide the utility function (i.e. value function)."""
@autocastable
def get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
return self._get_utility(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
"""Get the estimated on-policy utility of the given observation.
In mathematical terms, for a fully observable domain, this function estimates:
$$V^\\pi(s)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$ the initial state for the trajectories.
# Parameters
observation: The observation to consider.
# Returns
The estimated on-policy utility of the given observation.
"""
raise NotImplementedError
class QValues(Utilities):
"""A solver must inherit this class if it can provide the Q function (i.e. action-value function)."""
@autocastable
def get_q_value(
self,
observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
return self._get_q_value(observation, action)
def _get_q_value(
self,
observation: D.T_agent[D.T_observation],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_value:
"""Get the estimated on-policy Q value of the given observation and action.
In mathematical terms, for a fully observable domain, this function estimates:
$$Q^\\pi(s,a)=\\underset{\\tau\\sim\\pi}{\\mathbb{E}}[R(\\tau)|s_0=s,a_0=a]$$
where $\\pi$ is the current policy, any $\\tau=(s_0,a_0, s_1, a_1, ...)$ represents a trajectory sampled from
the policy, $R(\\tau)$ is the return (cumulative reward) and $s_0$/$a_0$ the initial state/action for the
trajectories.
# Parameters
observation: The observation to consider.
action: The action to consider.
# Returns
The estimated on-policy Q value of the given observation and action.
"""
raise NotImplementedError | 0.950365 | 0.947088 |
from __future__ import annotations
from typing import Callable, List
from skdecide.domains import Domain, PipeParallelDomain, ShmParallelDomain
__all__ = ["ParallelSolver"]
class ParallelSolver:
"""A solver must inherit this class if it wants to call several cloned parallel domains in separate concurrent processes.
The solver is meant to be called either within a 'with' context statement, or to be cleaned up using the close() method.
"""
def __init__(
self,
domain_factory: Callable[[], Domain],
parallel: bool = False,
shared_memory_proxy=None,
):
"""Creates a parallelizable solver
# Parameters
domain_factory: A callable with no argument returning the domain to solve (factory is the domain class if None).
parallel: True if the solver is run in parallel mode.
shared_memory_proxy: Shared memory proxy to use if not None, otherwise run piped parallel domains.
"""
self._domain_factory = domain_factory
self._parallel = parallel
self._shared_memory_proxy = shared_memory_proxy
self._domain = None
self._lambdas = [] # to define in the inherited class!
self._ipc_notify = False # to define in the inherited class!
def _initialize(self):
"""Launches the parallel domains.
This method requires to have previously recorded the self._domain_factory (e.g. after calling _init_solve),
the set of lambda functions passed to the solver's constructor (e.g. heuristic lambda for heuristic-based solvers),
and whether the parallel domain jobs should notify their status via the IPC protocol (required when interacting with
other programming languages like C++)
"""
if self._parallel:
if self._shared_memory_proxy is None:
self._domain = PipeParallelDomain(
self._domain_factory,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
else:
self._domain = ShmParallelDomain(
self._domain_factory,
self._shared_memory_proxy,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
# Launch parallel domains before created the algorithm object
# otherwise spawning new processes (the default on Windows)
# will fail trying to pickle the C++ underlying algorithm
self._domain._launch_processes()
else:
self._domain = self._domain_factory()
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._domain is not None and self._parallel:
self._domain.close()
self._domain = None
def _cleanup(self):
self.close()
def get_domain(self):
"""
Returns the domain, optionally creating a parallel domain if not already created.
"""
if self._domain is None:
self._initialize()
return self._domain
def call_domain_method(self, name, *args):
"""Calls a parallel domain's method.
This is the only way to get a domain method for a parallel domain.
"""
if self._parallel:
process_id = getattr(self._domain, name)(*args)
return self._domain.get_result(process_id)
else:
return getattr(self._domain, name)(*args) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/solver/parallelability.py | parallelability.py |
from __future__ import annotations
from typing import Callable, List
from skdecide.domains import Domain, PipeParallelDomain, ShmParallelDomain
__all__ = ["ParallelSolver"]
class ParallelSolver:
"""A solver must inherit this class if it wants to call several cloned parallel domains in separate concurrent processes.
The solver is meant to be called either within a 'with' context statement, or to be cleaned up using the close() method.
"""
def __init__(
self,
domain_factory: Callable[[], Domain],
parallel: bool = False,
shared_memory_proxy=None,
):
"""Creates a parallelizable solver
# Parameters
domain_factory: A callable with no argument returning the domain to solve (factory is the domain class if None).
parallel: True if the solver is run in parallel mode.
shared_memory_proxy: Shared memory proxy to use if not None, otherwise run piped parallel domains.
"""
self._domain_factory = domain_factory
self._parallel = parallel
self._shared_memory_proxy = shared_memory_proxy
self._domain = None
self._lambdas = [] # to define in the inherited class!
self._ipc_notify = False # to define in the inherited class!
def _initialize(self):
"""Launches the parallel domains.
This method requires to have previously recorded the self._domain_factory (e.g. after calling _init_solve),
the set of lambda functions passed to the solver's constructor (e.g. heuristic lambda for heuristic-based solvers),
and whether the parallel domain jobs should notify their status via the IPC protocol (required when interacting with
other programming languages like C++)
"""
if self._parallel:
if self._shared_memory_proxy is None:
self._domain = PipeParallelDomain(
self._domain_factory,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
else:
self._domain = ShmParallelDomain(
self._domain_factory,
self._shared_memory_proxy,
lambdas=self._lambdas,
ipc_notify=self._ipc_notify,
)
# Launch parallel domains before created the algorithm object
# otherwise spawning new processes (the default on Windows)
# will fail trying to pickle the C++ underlying algorithm
self._domain._launch_processes()
else:
self._domain = self._domain_factory()
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._domain is not None and self._parallel:
self._domain.close()
self._domain = None
def _cleanup(self):
self.close()
def get_domain(self):
"""
Returns the domain, optionally creating a parallel domain if not already created.
"""
if self._domain is None:
self._initialize()
return self._domain
def call_domain_method(self, name, *args):
"""Calls a parallel domain's method.
This is the only way to get a domain method for a parallel domain.
"""
if self._parallel:
process_id = getattr(self._domain, name)(*args)
return self._domain.get_result(process_id)
else:
return getattr(self._domain, name)(*args) | 0.91462 | 0.193738 |
from __future__ import annotations
from skdecide.core import D, Distribution, SingleValueDistribution, autocastable
__all__ = ["Policies", "UncertainPolicies", "DeterministicPolicies"]
class Policies:
"""A solver must inherit this class if it computes a stochastic policy as part of the solving process."""
@autocastable
def sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Sample an action for the given observation (from the solver's current policy).
# Parameters
observation: The observation for which an action must be sampled.
# Returns
The sampled action.
"""
return self._sample_action(observation)
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Sample an action for the given observation (from the solver's current policy).
# Parameters
observation: The observation for which an action must be sampled.
# Returns
The sampled action.
"""
raise NotImplementedError
@autocastable
def is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check whether the solver's current policy is defined for the given observation.
# Parameters
observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
"""
return self._is_policy_defined_for(observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check whether the solver's current policy is defined for the given observation.
# Parameters
observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
"""
raise NotImplementedError
class UncertainPolicies(Policies):
"""A solver must inherit this class if it computes a stochastic policy (providing next action distribution
explicitly) as part of the solving process."""
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._get_next_action_distribution(observation).sample()
@autocastable
def get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
"""Get the probabilistic distribution of next action for the given observation (from the solver's current
policy).
# Parameters
observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
"""
return self._get_next_action_distribution(observation)
def _get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
"""Get the probabilistic distribution of next action for the given observation (from the solver's current
policy).
# Parameters
observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
"""
raise NotImplementedError
class DeterministicPolicies(UncertainPolicies):
"""A solver must inherit this class if it computes a deterministic policy as part of the solving process."""
def _get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
return SingleValueDistribution(self._get_next_action(observation))
@autocastable
def get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Get the next deterministic action (from the solver's current policy).
# Parameters
observation: The observation for which next action is requested.
# Returns
The next deterministic action.
"""
return self._get_next_action(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Get the next deterministic action (from the solver's current policy).
# Parameters
observation: The observation for which next action is requested.
# Returns
The next deterministic action.
"""
raise NotImplementedError | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/builders/solver/policy.py | policy.py |
from __future__ import annotations
from skdecide.core import D, Distribution, SingleValueDistribution, autocastable
__all__ = ["Policies", "UncertainPolicies", "DeterministicPolicies"]
class Policies:
"""A solver must inherit this class if it computes a stochastic policy as part of the solving process."""
@autocastable
def sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Sample an action for the given observation (from the solver's current policy).
# Parameters
observation: The observation for which an action must be sampled.
# Returns
The sampled action.
"""
return self._sample_action(observation)
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Sample an action for the given observation (from the solver's current policy).
# Parameters
observation: The observation for which an action must be sampled.
# Returns
The sampled action.
"""
raise NotImplementedError
@autocastable
def is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check whether the solver's current policy is defined for the given observation.
# Parameters
observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
"""
return self._is_policy_defined_for(observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
"""Check whether the solver's current policy is defined for the given observation.
# Parameters
observation: The observation to consider.
# Returns
True if the policy is defined for the given observation memory (False otherwise).
"""
raise NotImplementedError
class UncertainPolicies(Policies):
"""A solver must inherit this class if it computes a stochastic policy (providing next action distribution
explicitly) as part of the solving process."""
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._get_next_action_distribution(observation).sample()
@autocastable
def get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
"""Get the probabilistic distribution of next action for the given observation (from the solver's current
policy).
# Parameters
observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
"""
return self._get_next_action_distribution(observation)
def _get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
"""Get the probabilistic distribution of next action for the given observation (from the solver's current
policy).
# Parameters
observation: The observation to consider.
# Returns
The probabilistic distribution of next action.
"""
raise NotImplementedError
class DeterministicPolicies(UncertainPolicies):
"""A solver must inherit this class if it computes a deterministic policy as part of the solving process."""
def _get_next_action_distribution(
self, observation: D.T_agent[D.T_observation]
) -> Distribution[D.T_agent[D.T_concurrency[D.T_event]]]:
return SingleValueDistribution(self._get_next_action(observation))
@autocastable
def get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Get the next deterministic action (from the solver's current policy).
# Parameters
observation: The observation for which next action is requested.
# Returns
The next deterministic action.
"""
return self._get_next_action(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
"""Get the next deterministic action (from the solver's current policy).
# Parameters
observation: The observation for which next action is requested.
# Returns
The next deterministic action.
"""
raise NotImplementedError | 0.960324 | 0.778334 |
from __future__ import annotations
# Load rcpsp domains from psplib files.
# You need the discrete optimisation library to be able to use those.
from typing import Union
from skdecide.hub.domain.rcpsp.rcpsp_sk import MSRCPSP
def load_domain(file_path):
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp.rcpsp_parser import parse_file
from skdecide.hub.domain.rcpsp.rcpsp_sk import MRCPSP, RCPSP
rcpsp_model: Union[SingleModeRCPSPModel, MultiModeRCPSPModel] = parse_file(
file_path
)
if isinstance(rcpsp_model, SingleModeRCPSPModel):
my_domain = RCPSP(
resource_names=rcpsp_model.resources_list,
task_ids=sorted(rcpsp_model.mode_details.keys()),
tasks_mode=rcpsp_model.mode_details,
successors=rcpsp_model.successors,
max_horizon=rcpsp_model.horizon,
resource_availability=rcpsp_model.resources,
resource_renewable={
r: r not in rcpsp_model.non_renewable_resources
for r in rcpsp_model.resources_list
},
)
elif isinstance(rcpsp_model, MultiModeRCPSPModel):
my_domain = MRCPSP(
resource_names=rcpsp_model.resources_list,
task_ids=sorted(rcpsp_model.mode_details.keys()),
tasks_mode=rcpsp_model.mode_details,
successors=rcpsp_model.successors,
max_horizon=rcpsp_model.horizon,
resource_availability=rcpsp_model.resources,
resource_renewable={
r: r not in rcpsp_model.non_renewable_resources
for r in rcpsp_model.resources_list
},
)
return my_domain
def load_multiskill_domain(file_path):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_parser import (
parse_file,
)
model_msrcpsp, new_tame_to_original_task_id = parse_file(
file_path, max_horizon=2000
)
resource_type_names = list(model_msrcpsp.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: model_msrcpsp.resources_availability[r][0]
for r in model_msrcpsp.resources_availability
}
resource_renewable = {
r: r not in model_msrcpsp.non_renewable_resources
for r in model_msrcpsp.resources_list
}
resource_unit_names = []
for employee in model_msrcpsp.employees:
resource_unit_names += ["employee-" + str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = 1
resource_renewable[resource_unit_names[-1]] = True
for s in model_msrcpsp.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
model_msrcpsp.employees[employee].dict_skill[s].skill_value
)
return MSRCPSP(
skills_names=list(model_msrcpsp.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(model_msrcpsp.mode_details.keys()),
tasks_mode=model_msrcpsp.mode_details,
successors=model_msrcpsp.successors,
max_horizon=model_msrcpsp.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/domain/rcpsp/rcpsp_sk_parser.py | rcpsp_sk_parser.py |
from __future__ import annotations
# Load rcpsp domains from psplib files.
# You need the discrete optimisation library to be able to use those.
from typing import Union
from skdecide.hub.domain.rcpsp.rcpsp_sk import MSRCPSP
def load_domain(file_path):
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp.rcpsp_parser import parse_file
from skdecide.hub.domain.rcpsp.rcpsp_sk import MRCPSP, RCPSP
rcpsp_model: Union[SingleModeRCPSPModel, MultiModeRCPSPModel] = parse_file(
file_path
)
if isinstance(rcpsp_model, SingleModeRCPSPModel):
my_domain = RCPSP(
resource_names=rcpsp_model.resources_list,
task_ids=sorted(rcpsp_model.mode_details.keys()),
tasks_mode=rcpsp_model.mode_details,
successors=rcpsp_model.successors,
max_horizon=rcpsp_model.horizon,
resource_availability=rcpsp_model.resources,
resource_renewable={
r: r not in rcpsp_model.non_renewable_resources
for r in rcpsp_model.resources_list
},
)
elif isinstance(rcpsp_model, MultiModeRCPSPModel):
my_domain = MRCPSP(
resource_names=rcpsp_model.resources_list,
task_ids=sorted(rcpsp_model.mode_details.keys()),
tasks_mode=rcpsp_model.mode_details,
successors=rcpsp_model.successors,
max_horizon=rcpsp_model.horizon,
resource_availability=rcpsp_model.resources,
resource_renewable={
r: r not in rcpsp_model.non_renewable_resources
for r in rcpsp_model.resources_list
},
)
return my_domain
def load_multiskill_domain(file_path):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_parser import (
parse_file,
)
model_msrcpsp, new_tame_to_original_task_id = parse_file(
file_path, max_horizon=2000
)
resource_type_names = list(model_msrcpsp.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: model_msrcpsp.resources_availability[r][0]
for r in model_msrcpsp.resources_availability
}
resource_renewable = {
r: r not in model_msrcpsp.non_renewable_resources
for r in model_msrcpsp.resources_list
}
resource_unit_names = []
for employee in model_msrcpsp.employees:
resource_unit_names += ["employee-" + str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = 1
resource_renewable[resource_unit_names[-1]] = True
for s in model_msrcpsp.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
model_msrcpsp.employees[employee].dict_skill[s].skill_value
)
return MSRCPSP(
skills_names=list(model_msrcpsp.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(model_msrcpsp.mode_details.keys()),
tasks_mode=model_msrcpsp.mode_details,
successors=model_msrcpsp.successors,
max_horizon=model_msrcpsp.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
) | 0.890354 | 0.143008 |
from __future__ import annotations
from enum import Enum
from typing import NamedTuple, Optional
from skdecide import DeterministicPlanningDomain, Space, Value
from skdecide.builders.domain import UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class SimpleGridWorld(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
if action == Action.left:
next_state = State(max(memory.x - 1, 0), memory.y)
if action == Action.right:
next_state = State(min(memory.x + 1, self.num_cols - 1), memory.y)
if action == Action.up:
next_state = State(memory.x, max(memory.y - 1, 0))
if action == Action.down:
next_state = State(memory.x, min(memory.y + 1, self.num_rows - 1))
return next_state
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(
next_state.y - memory.y
) # every move costs 1
return Value(cost=cost)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([State(x=self.num_cols - 1, y=self.num_rows - 1)])
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows]) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/domain/simple_grid_world/simple_grid_world.py | simple_grid_world.py |
from __future__ import annotations
from enum import Enum
from typing import NamedTuple, Optional
from skdecide import DeterministicPlanningDomain, Space, Value
from skdecide.builders.domain import UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class SimpleGridWorld(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
if action == Action.left:
next_state = State(max(memory.x - 1, 0), memory.y)
if action == Action.right:
next_state = State(min(memory.x + 1, self.num_cols - 1), memory.y)
if action == Action.up:
next_state = State(memory.x, max(memory.y - 1, 0))
if action == Action.down:
next_state = State(memory.x, min(memory.y + 1, self.num_rows - 1))
return next_state
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(
next_state.y - memory.y
) # every move costs 1
return Value(cost=cost)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([State(x=self.num_cols - 1, y=self.num_rows - 1)])
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows]) | 0.9306 | 0.472075 |
# Original code by Patrik Haslum
from __future__ import annotations
from typing import NamedTuple, Optional, Tuple
from skdecide import DiscreteDistribution, Distribution, GoalPOMDPDomain, Space, Value
from skdecide.builders.domain import (
DeterministicTransitions,
TransformedObservable,
UnrestrictedActions,
)
from skdecide.hub.space.gym import ListSpace, MultiDiscreteSpace
Row = Tuple[int] # a row of code pegs (solution or guess)
class Score(NamedTuple):
total_bulls: int
total_cows: int
class State(NamedTuple):
solution: Row
score: Score
class D(
GoalPOMDPDomain,
DeterministicTransitions,
UnrestrictedActions,
TransformedObservable,
):
T_state = State # Type of states
T_observation = Score # Type of observations
T_event = Row # Type of events (a row guess in this case)
T_value = int # Type of transition values (costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class MasterMind(D):
def __init__(self, n_colours=2, n_positions=2):
self._n_colours = n_colours
self._n_positions = n_positions
self._h_solutions = self._list_hidden_solutions()
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
# Input is a state and an action; output is a next state.
if (
action is None
): # TODO: handle this option on algo side rather than domain; here action should never be None
return memory
else:
return State(memory.solution, self._calc_score(memory, action))
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
return Value(cost=1)
# Overridden to help some solvers compute more efficiently (not mandatory, but good practice)
def _is_transition_value_dependent_on_next_state_(self) -> bool:
return False
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state.score)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
# Return the possible actions (guesses) as an enumerable space
return ListSpace(self._h_solutions)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
# Return the space of goal OBSERVATIONS
return ListSpace([Score(total_bulls=self._n_positions, total_cows=0)])
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
# Return a uniform distribution over all initial states
n = len(self._h_solutions)
return DiscreteDistribution(
[(State(solution=s, score=Score(0, 0)), 1 / n) for s in self._h_solutions]
)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
# `action` is the last applied action (or None if the state is an initial state)
# `state` is the state to observe (that resulted from applying the action)
if action is None:
return Score(0, 0)
return self._calc_score(state, action)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self._n_positions + 1, self._n_positions + 1])
def _list_hidden_solutions(self):
"""Return a list of all possible hidden solutions (n_colours ** n_positions)."""
h_solutions = [tuple()]
for i in range(self._n_positions):
h_solutions = [
s + (c,) for s in h_solutions for c in range(self._n_colours)
]
return h_solutions
def _calc_score(self, state, guess):
"""Compute the score of a guess."""
solution = state.solution
bulls = [False for _ in range(len(guess))]
for i in range(len(guess)):
if guess[i] == solution[i]:
bulls[i] = True
cows = [False for _ in range(len(guess))]
for i in range(len(guess)):
if guess[i] != solution[i]:
for j in range(len(guess)):
if guess[i] == solution[j] and not bulls[j] and not cows[j]:
cows[j] = True
break
return Score(total_bulls=sum(bulls), total_cows=sum(cows))
if __name__ == "__main__":
from skdecide.utils import rollout
domain = MasterMind(3, 3)
rollout(
domain,
max_steps=1000,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/domain/mastermind/mastermind.py | mastermind.py |
# Original code by Patrik Haslum
from __future__ import annotations
from typing import NamedTuple, Optional, Tuple
from skdecide import DiscreteDistribution, Distribution, GoalPOMDPDomain, Space, Value
from skdecide.builders.domain import (
DeterministicTransitions,
TransformedObservable,
UnrestrictedActions,
)
from skdecide.hub.space.gym import ListSpace, MultiDiscreteSpace
Row = Tuple[int] # a row of code pegs (solution or guess)
class Score(NamedTuple):
total_bulls: int
total_cows: int
class State(NamedTuple):
solution: Row
score: Score
class D(
GoalPOMDPDomain,
DeterministicTransitions,
UnrestrictedActions,
TransformedObservable,
):
T_state = State # Type of states
T_observation = Score # Type of observations
T_event = Row # Type of events (a row guess in this case)
T_value = int # Type of transition values (costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class MasterMind(D):
def __init__(self, n_colours=2, n_positions=2):
self._n_colours = n_colours
self._n_positions = n_positions
self._h_solutions = self._list_hidden_solutions()
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
# Input is a state and an action; output is a next state.
if (
action is None
): # TODO: handle this option on algo side rather than domain; here action should never be None
return memory
else:
return State(memory.solution, self._calc_score(memory, action))
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
return Value(cost=1)
# Overridden to help some solvers compute more efficiently (not mandatory, but good practice)
def _is_transition_value_dependent_on_next_state_(self) -> bool:
return False
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state.score)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
# Return the possible actions (guesses) as an enumerable space
return ListSpace(self._h_solutions)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
# Return the space of goal OBSERVATIONS
return ListSpace([Score(total_bulls=self._n_positions, total_cows=0)])
def _get_initial_state_distribution_(self) -> Distribution[D.T_state]:
# Return a uniform distribution over all initial states
n = len(self._h_solutions)
return DiscreteDistribution(
[(State(solution=s, score=Score(0, 0)), 1 / n) for s in self._h_solutions]
)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
# `action` is the last applied action (or None if the state is an initial state)
# `state` is the state to observe (that resulted from applying the action)
if action is None:
return Score(0, 0)
return self._calc_score(state, action)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self._n_positions + 1, self._n_positions + 1])
def _list_hidden_solutions(self):
"""Return a list of all possible hidden solutions (n_colours ** n_positions)."""
h_solutions = [tuple()]
for i in range(self._n_positions):
h_solutions = [
s + (c,) for s in h_solutions for c in range(self._n_colours)
]
return h_solutions
def _calc_score(self, state, guess):
"""Compute the score of a guess."""
solution = state.solution
bulls = [False for _ in range(len(guess))]
for i in range(len(guess)):
if guess[i] == solution[i]:
bulls[i] = True
cows = [False for _ in range(len(guess))]
for i in range(len(guess)):
if guess[i] != solution[i]:
for j in range(len(guess)):
if guess[i] == solution[j] and not bulls[j] and not cows[j]:
cows[j] = True
break
return Score(total_bulls=sum(bulls), total_cows=sum(cows))
if __name__ == "__main__":
from skdecide.utils import rollout
domain = MasterMind(3, 3)
rollout(
domain,
max_steps=1000,
outcome_formatter=lambda o: f"{o.observation} - cost: {o.value.cost:.2f}",
) | 0.864239 | 0.430088 |
from __future__ import annotations
from enum import Enum
from typing import NamedTuple, Optional
from skdecide import Domain, Space, TransitionOutcome, Value
from skdecide.builders.domain import *
from skdecide.hub.space.gym import EnumSpace
class Move(Enum):
rock = 0
paper = 1
scissors = 2
class State(NamedTuple):
num_move: int
class D(
Domain,
MultiAgent,
Sequential,
Environment,
UnrestrictedActions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
T_state = State # Type of states
T_observation = Move # Type of observations
T_event = Move # Type of events
T_value = int # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class RockPaperScissors(D):
def __init__(self, max_moves: int = 10):
self._max_moves = max_moves
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
# Get players' moves
move1, move2 = action["player1"], action["player2"]
# Compute rewards
r1, r2 = {
(Move.rock, Move.rock): (0, 0),
(Move.rock, Move.paper): (-1, 1),
(Move.rock, Move.scissors): (1, -1),
(Move.paper, Move.rock): (1, -1),
(Move.paper, Move.paper): (0, 0),
(Move.paper, Move.scissors): (-1, 1),
(Move.scissors, Move.rock): (-1, 1),
(Move.scissors, Move.paper): (1, -1),
(Move.scissors, Move.scissors): (0, 0),
}[move1, move2]
# Compute num_move increment
last_state = self._memory
num_move = last_state.num_move + 1
return TransitionOutcome(
state=State(num_move=num_move),
value={"player1": Value(reward=r1), "player2": Value(reward=r2)},
termination=(num_move >= self._max_moves),
)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
def _state_reset(self) -> D.T_state:
return State(num_move=0)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
# The observation is simply the last opponent move (or Move.rock initially by default)
obs1 = action["player2"] if action is not None else Move.rock
obs2 = action["player1"] if action is not None else Move.rock
return {"player1": obs1, "player2": obs2}
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
if __name__ == "__main__":
from skdecide.utils import rollout
domain = RockPaperScissors()
rollout(
domain,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/domain/rock_paper_scissors/rock_paper_scissors.py | rock_paper_scissors.py |
from __future__ import annotations
from enum import Enum
from typing import NamedTuple, Optional
from skdecide import Domain, Space, TransitionOutcome, Value
from skdecide.builders.domain import *
from skdecide.hub.space.gym import EnumSpace
class Move(Enum):
rock = 0
paper = 1
scissors = 2
class State(NamedTuple):
num_move: int
class D(
Domain,
MultiAgent,
Sequential,
Environment,
UnrestrictedActions,
Initializable,
Markovian,
TransformedObservable,
Rewards,
):
T_state = State # Type of states
T_observation = Move # Type of observations
T_event = Move # Type of events
T_value = int # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class RockPaperScissors(D):
def __init__(self, max_moves: int = 10):
self._max_moves = max_moves
def _state_step(
self, action: D.T_agent[D.T_concurrency[D.T_event]]
) -> TransitionOutcome[
D.T_state,
D.T_agent[Value[D.T_value]],
D.T_agent[D.T_predicate],
D.T_agent[D.T_info],
]:
# Get players' moves
move1, move2 = action["player1"], action["player2"]
# Compute rewards
r1, r2 = {
(Move.rock, Move.rock): (0, 0),
(Move.rock, Move.paper): (-1, 1),
(Move.rock, Move.scissors): (1, -1),
(Move.paper, Move.rock): (1, -1),
(Move.paper, Move.paper): (0, 0),
(Move.paper, Move.scissors): (-1, 1),
(Move.scissors, Move.rock): (-1, 1),
(Move.scissors, Move.paper): (1, -1),
(Move.scissors, Move.scissors): (0, 0),
}[move1, move2]
# Compute num_move increment
last_state = self._memory
num_move = last_state.num_move + 1
return TransitionOutcome(
state=State(num_move=num_move),
value={"player1": Value(reward=r1), "player2": Value(reward=r2)},
termination=(num_move >= self._max_moves),
)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
def _state_reset(self) -> D.T_state:
return State(num_move=0)
def _get_observation(
self,
state: D.T_state,
action: Optional[D.T_agent[D.T_concurrency[D.T_event]]] = None,
) -> D.T_agent[D.T_observation]:
# The observation is simply the last opponent move (or Move.rock initially by default)
obs1 = action["player2"] if action is not None else Move.rock
obs2 = action["player1"] if action is not None else Move.rock
return {"player1": obs1, "player2": obs2}
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return {"player1": EnumSpace(Move), "player2": EnumSpace(Move)}
if __name__ == "__main__":
from skdecide.utils import rollout
domain = RockPaperScissors()
rollout(
domain,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | 0.951986 | 0.344526 |