repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.network_from_edgelist | def network_from_edgelist(self, edgelist):
"""
Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight].
"""
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
self.network = pd.DataFrame(edgelist, columns=colnames)
self._update_network() | python | def network_from_edgelist(self, edgelist):
"""
Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight].
"""
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
self.network = pd.DataFrame(edgelist, columns=colnames)
self._update_network() | Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight]. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L227-L243 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork._drop_duplicate_ij | def _drop_duplicate_ij(self):
"""
Drops duplicate entries from the network dataframe.
"""
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | python | def _drop_duplicate_ij(self):
"""
Drops duplicate entries from the network dataframe.
"""
self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list(
zip(*[self.network['i'].values, self.network['j'].values]))))
self.network.drop_duplicates(['ij', 't'], inplace=True)
self.network.reset_index(inplace=True, drop=True)
self.network.drop('ij', inplace=True, axis=1) | Drops duplicate entries from the network dataframe. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L260-L268 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork._drop_diagonal | def _drop_diagonal(self):
"""
Drops self-contacts from the network dataframe.
"""
self.network = self.network.where(
self.network['i'] != self.network['j']).dropna()
self.network.reset_index(inplace=True, drop=True) | python | def _drop_diagonal(self):
"""
Drops self-contacts from the network dataframe.
"""
self.network = self.network.where(
self.network['i'] != self.network['j']).dropna()
self.network.reset_index(inplace=True, drop=True) | Drops self-contacts from the network dataframe. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L270-L276 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.add_edge | def add_edge(self, edgelist):
"""
Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
rows = hdf.get_storer('network').nrows
hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(
rows, rows+len(edgelist))), format='table', data_columns=True)
edgelist = np.array(edgelist)
if np.max(edgelist[:, :2]) > self.netshape[0]:
self.netshape[0] = np.max(edgelist[:, :2])
if np.max(edgelist[:, 2]) > self.netshape[1]:
self.netshape[1] = np.max(edgelist[:, 2])
else:
newedges = pd.DataFrame(edgelist, columns=colnames)
self.network = pd.concat(
[self.network, newedges], ignore_index=True, sort=True)
self._update_network() | python | def add_edge(self, edgelist):
"""
Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
rows = hdf.get_storer('network').nrows
hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(
rows, rows+len(edgelist))), format='table', data_columns=True)
edgelist = np.array(edgelist)
if np.max(edgelist[:, :2]) > self.netshape[0]:
self.netshape[0] = np.max(edgelist[:, :2])
if np.max(edgelist[:, 2]) > self.netshape[1]:
self.netshape[1] = np.max(edgelist[:, 2])
else:
newedges = pd.DataFrame(edgelist, columns=colnames)
self.network = pd.concat(
[self.network, newedges], ignore_index=True, sort=True)
self._update_network() | Adds an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.
Returns
--------
Updates TenetoBIDS.network dataframe with new edge | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L297-L332 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.drop_edge | def drop_edge(self, edgelist):
"""
Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
for e in edgelist:
hdf.remove(
'network', 'i == ' + str(e[0]) + ' & ' + 'j == ' + str(e[1]) + ' & ' + 't == ' + str(e[2]))
print('HDF5 delete warning. This will not reduce the size of the file.')
else:
for e in edgelist:
idx = self.network[(self.network['i'] == e[0]) & (
self.network['j'] == e[1]) & (self.network['t'] == e[2])].index
self.network.drop(idx, inplace=True)
self.network.reset_index(inplace=True, drop=True)
self._update_network() | python | def drop_edge(self, edgelist):
"""
Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe
"""
if not isinstance(edgelist[0], list):
edgelist = [edgelist]
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if self.hdf5:
with pd.HDFStore(self.network) as hdf:
for e in edgelist:
hdf.remove(
'network', 'i == ' + str(e[0]) + ' & ' + 'j == ' + str(e[1]) + ' & ' + 't == ' + str(e[2]))
print('HDF5 delete warning. This will not reduce the size of the file.')
else:
for e in edgelist:
idx = self.network[(self.network['i'] == e[0]) & (
self.network['j'] == e[1]) & (self.network['t'] == e[2])].index
self.network.drop(idx, inplace=True)
self.network.reset_index(inplace=True, drop=True)
self._update_network() | Removes an edge from network.
Parameters
----------
edgelist : list
a list (or list of lists) containing the i,j and t indicies to be removes.
Returns
--------
Updates TenetoBIDS.network dataframe | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L334-L363 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.calc_networkmeasure | def calc_networkmeasure(self, networkmeasure, **measureparams):
"""
Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure]
"""
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure | python | def calc_networkmeasure(self, networkmeasure, **measureparams):
"""
Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure]
"""
availablemeasures = [f for f in dir(
teneto.networkmeasures) if not f.startswith('__')]
if networkmeasure not in availablemeasures:
raise ValueError(
'Unknown network measure. Available network measures are: ' + ', '.join(availablemeasures))
funs = inspect.getmembers(teneto.networkmeasures)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
measure = funs[networkmeasure](self, **measureparams)
return measure | Calculate network measure.
Parameters
-----------
networkmeasure : str
Function to call. Functions available are in teneto.networkmeasures
measureparams : kwargs
kwargs for teneto.networkmeasure.[networkmeasure] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L365-L385 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.generatenetwork | def generatenetwork(self, networktype, **networkparams):
"""
Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network.
"""
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
'Unknown network measure. Available networks to generate are: ' + ', '.join(availabletypes))
funs = inspect.getmembers(teneto.generatenetwork)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
network = funs[networktype](**networkparams)
self.network_from_array(network)
if self.nettype[1] == 'u':
self._drop_duplicate_ij() | python | def generatenetwork(self, networktype, **networkparams):
"""
Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network.
"""
availabletypes = [f for f in dir(
teneto.generatenetwork) if not f.startswith('__')]
if networktype not in availabletypes:
raise ValueError(
'Unknown network measure. Available networks to generate are: ' + ', '.join(availabletypes))
funs = inspect.getmembers(teneto.generatenetwork)
funs = {m[0]: m[1] for m in funs if not m[0].startswith('__')}
network = funs[networktype](**networkparams)
self.network_from_array(network)
if self.nettype[1] == 'u':
self._drop_duplicate_ij() | Generate a network
Parameters
-----------
networktype : str
Function to call. Functions available are in teneto.generatenetwork
measureparams : kwargs
kwargs for teneto.generatenetwork.[networktype]
Returns
--------
TenetoBIDS.network is made with the generated network. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L387-L413 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.save_aspickle | def save_aspickle(self, fname):
"""
Saves object as pickle.
fname : str
file path.
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) | python | def save_aspickle(self, fname):
"""
Saves object as pickle.
fname : str
file path.
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) | Saves object as pickle.
fname : str
file path. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L441-L451 |
wiheto/teneto | teneto/timeseries/postprocess.py | postpro_fisher | def postpro_fisher(data, report=None):
"""
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | python | def postpro_fisher(data, report=None):
"""
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Due to rounding errors
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report['fisher'] = {}
report['fisher']['performed'] = 'yes'
#report['fisher']['diagonal'] = 'zeroed'
return fisher_data, report | Performs fisher transform on everything in data.
If report variable is passed, this is added to the report. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L10-L25 |
wiheto/teneto | teneto/timeseries/postprocess.py | postpro_boxcox | def postpro_boxcox(data, report=None):
"""
Performs box cox transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | python | def postpro_boxcox(data, report=None):
"""
Performs box cox transform on everything in data.
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# Note the min value of all time series will now be at least 1.
mindata = 1 - np.nanmin(data)
data = data + mindata
ind = np.triu_indices(data.shape[0], k=1)
boxcox_list = np.array([sp.stats.boxcox(np.squeeze(
data[ind[0][n], ind[1][n], :])) for n in range(0, len(ind[0]))])
boxcox_data = np.zeros(data.shape)
boxcox_data[ind[0], ind[1], :] = np.vstack(boxcox_list[:, 0])
boxcox_data[ind[1], ind[0], :] = np.vstack(boxcox_list[:, 0])
bccheck = np.array(np.transpose(boxcox_data, [2, 0, 1]))
bccheck = (bccheck - bccheck.mean(axis=0)) / bccheck.std(axis=0)
bccheck = np.squeeze(np.mean(bccheck, axis=0))
np.fill_diagonal(bccheck, 0)
report['boxcox'] = {}
report['boxcox']['performed'] = 'yes'
report['boxcox']['lambda'] = [
tuple([ind[0][n], ind[1][n], boxcox_list[n, -1]]) for n in range(0, len(ind[0]))]
report['boxcox']['shift'] = mindata
report['boxcox']['shited_to'] = 1
if np.sum(np.isnan(bccheck)) > 0:
report['boxcox'] = {}
report['boxcox']['performed'] = 'FAILED'
report['boxcox']['failure_reason'] = (
'Box cox transform is returning edges with uniform values through time. '
'This is probabaly due to one or more outliers or a very skewed distribution. '
'Have you corrected for sources of noise (e.g. movement)? '
'If yes, some time-series might need additional transforms to approximate to Gaussian.'
)
report['boxcox']['failure_consequence'] = (
'Box cox transform was skipped from the postprocess pipeline.'
)
boxcox_data = data - mindata
error_msg = ('TENETO WARNING: Box Cox transform problem. \n'
'Box Cox transform not performed. \n'
'See report for more details.')
print(error_msg)
return boxcox_data, report | Performs box cox transform on everything in data.
If report variable is passed, this is added to the report. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L28-L78 |
wiheto/teneto | teneto/timeseries/postprocess.py | postpro_standardize | def postpro_standardize(data, report=None):
"""
Standardizes everything in data (along axis -1).
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# First make dim 1 = time.
data = np.transpose(data, [2, 0, 1])
standardized_data = (data - data.mean(axis=0)) / data.std(axis=0)
standardized_data = np.transpose(standardized_data, [1, 2, 0])
report['standardize'] = {}
report['standardize']['performed'] = 'yes'
report['standardize']['method'] = 'Z-score'
# The above makes self connections to nan, set to 1.
data = set_diagonal(data, 1)
return standardized_data, report | python | def postpro_standardize(data, report=None):
"""
Standardizes everything in data (along axis -1).
If report variable is passed, this is added to the report.
"""
if not report:
report = {}
# First make dim 1 = time.
data = np.transpose(data, [2, 0, 1])
standardized_data = (data - data.mean(axis=0)) / data.std(axis=0)
standardized_data = np.transpose(standardized_data, [1, 2, 0])
report['standardize'] = {}
report['standardize']['performed'] = 'yes'
report['standardize']['method'] = 'Z-score'
# The above makes self connections to nan, set to 1.
data = set_diagonal(data, 1)
return standardized_data, report | Standardizes everything in data (along axis -1).
If report variable is passed, this is added to the report. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/postprocess.py#L81-L98 |
wiheto/teneto | teneto/timeseries/derive.py | derive_temporalnetwork | def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time'
if 'report' not in params.keys():
params['report'] = False
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'postpro' not in params.keys():
params['postpro'] = 'no'
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id']
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html'
if params['dimord'] == 'node,time':
data = data.transpose()
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
weights, report = _weightfun_jackknife(data.shape[0], report)
relation = 'weight'
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
weights, report = _weightfun_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
weights, report = _weightfun_tapered_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'distance' or params['method'] == "spatial distance" or params['method'] == "node distance" or params['method'] == "nodedistance" or params['method'] == "spatialdistance":
weights, report = _weightfun_spatial_distance(data, params, report)
relation = 'weight'
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or params['method'] == 'temporal derivative' or params['method'] == "temporalderivative":
R, report = _temporal_derivative(data, params, report)
relation = 'coupling'
else:
raise ValueError(
'Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix')
else:
try:
weights = np.array(params['method'])
relation = 'weight'
except:
raise ValueError(
'Unrecognoized method. See documentation for predefined methods')
if weights.shape[0] != weights.shape[1]:
raise ValueError("weight matrix should be square")
if weights.shape[0] != data.shape[0]:
raise ValueError("weight matrix must equal number of time points")
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array(
[DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0])
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0])
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0)
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0])
R = set_diagonal(R, 1)
if params['postpro'] != 'no':
R, report = postpro_pipeline(
R, params['postpro'], report)
R = set_diagonal(R, 1)
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename'])
return R | python | def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time'
if 'report' not in params.keys():
params['report'] = False
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'postpro' not in params.keys():
params['postpro'] = 'no'
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id']
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html'
if params['dimord'] == 'node,time':
data = data.transpose()
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
weights, report = _weightfun_jackknife(data.shape[0], report)
relation = 'weight'
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
weights, report = _weightfun_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
weights, report = _weightfun_tapered_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'distance' or params['method'] == "spatial distance" or params['method'] == "node distance" or params['method'] == "nodedistance" or params['method'] == "spatialdistance":
weights, report = _weightfun_spatial_distance(data, params, report)
relation = 'weight'
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or params['method'] == 'temporal derivative' or params['method'] == "temporalderivative":
R, report = _temporal_derivative(data, params, report)
relation = 'coupling'
else:
raise ValueError(
'Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix')
else:
try:
weights = np.array(params['method'])
relation = 'weight'
except:
raise ValueError(
'Unrecognoized method. See documentation for predefined methods')
if weights.shape[0] != weights.shape[1]:
raise ValueError("weight matrix should be square")
if weights.shape[0] != data.shape[0]:
raise ValueError("weight matrix must equal number of time points")
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array(
[DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0])
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0])
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0)
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0])
R = set_diagonal(R, 1)
if params['postpro'] != 'no':
R, report = postpro_pipeline(
R, params['postpro'], report)
R = set_diagonal(R, 1)
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename'])
return R | Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report* | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L16-L237 |
wiheto/teneto | teneto/timeseries/derive.py | _weightfun_jackknife | def _weightfun_jackknife(T, report):
"""
Creates the weights for the jackknife method. See func: teneto.derive.derive.
"""
weights = np.ones([T, T])
np.fill_diagonal(weights, 0)
report['method'] = 'jackknife'
report['jackknife'] = ''
return weights, report | python | def _weightfun_jackknife(T, report):
"""
Creates the weights for the jackknife method. See func: teneto.derive.derive.
"""
weights = np.ones([T, T])
np.fill_diagonal(weights, 0)
report['method'] = 'jackknife'
report['jackknife'] = ''
return weights, report | Creates the weights for the jackknife method. See func: teneto.derive.derive. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L240-L249 |
wiheto/teneto | teneto/timeseries/derive.py | _weightfun_sliding_window | def _weightfun_sliding_window(T, params, report):
"""
Creates the weights for the sliding window method. See func: teneto.derive.derive.
"""
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = np.ones(params['windowsize'])
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = 'untapered/uniform'
return weights, report | python | def _weightfun_sliding_window(T, params, report):
"""
Creates the weights for the sliding window method. See func: teneto.derive.derive.
"""
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = np.ones(params['windowsize'])
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = 'untapered/uniform'
return weights, report | Creates the weights for the sliding window method. See func: teneto.derive.derive. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L252-L263 |
wiheto/teneto | teneto/timeseries/derive.py | _weightfun_tapered_sliding_window | def _weightfun_tapered_sliding_window(T, params, report):
"""
Creates the weights for the tapered method. See func: teneto.derive.derive.
"""
x = np.arange(-(params['windowsize'] - 1) / 2, (params['windowsize']) / 2)
distribution_parameters = ','.join(map(str, params['distribution_params']))
taper = eval('sps.' + params['distribution'] +
'.pdf(x,' + distribution_parameters + ')')
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = taper
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = taper
report['slidingwindow']['taper_window'] = x
return weights, report | python | def _weightfun_tapered_sliding_window(T, params, report):
"""
Creates the weights for the tapered method. See func: teneto.derive.derive.
"""
x = np.arange(-(params['windowsize'] - 1) / 2, (params['windowsize']) / 2)
distribution_parameters = ','.join(map(str, params['distribution_params']))
taper = eval('sps.' + params['distribution'] +
'.pdf(x,' + distribution_parameters + ')')
weightat0 = np.zeros(T)
weightat0[0:params['windowsize']] = taper
weights = np.array([np.roll(weightat0, i)
for i in range(0, T + 1 - params['windowsize'])])
report['method'] = 'slidingwindow'
report['slidingwindow'] = params
report['slidingwindow']['taper'] = taper
report['slidingwindow']['taper_window'] = x
return weights, report | Creates the weights for the tapered method. See func: teneto.derive.derive. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L266-L283 |
wiheto/teneto | teneto/timeseries/derive.py | _weightfun_spatial_distance | def _weightfun_spatial_distance(data, params, report):
"""
Creates the weights for the spatial distance method. See func: teneto.derive.derive.
"""
distance = getDistanceFunction(params['distance'])
weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange(
0, data.shape[0]) for t in np.arange(0, data.shape[0])])
weights = np.reshape(weights, [data.shape[0], data.shape[0]])
np.fill_diagonal(weights, np.nan)
weights = 1 / weights
weights = (weights - np.nanmin(weights)) / \
(np.nanmax(weights) - np.nanmin(weights))
np.fill_diagonal(weights, 1)
return weights, report | python | def _weightfun_spatial_distance(data, params, report):
"""
Creates the weights for the spatial distance method. See func: teneto.derive.derive.
"""
distance = getDistanceFunction(params['distance'])
weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange(
0, data.shape[0]) for t in np.arange(0, data.shape[0])])
weights = np.reshape(weights, [data.shape[0], data.shape[0]])
np.fill_diagonal(weights, np.nan)
weights = 1 / weights
weights = (weights - np.nanmin(weights)) / \
(np.nanmax(weights) - np.nanmin(weights))
np.fill_diagonal(weights, 1)
return weights, report | Creates the weights for the spatial distance method. See func: teneto.derive.derive. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L286-L299 |
wiheto/teneto | teneto/timeseries/derive.py | _temporal_derivative | def _temporal_derivative(data, params, report):
"""
Performs mtd method. See func: teneto.derive.derive.
"""
# Data should be timexnode
report = {}
# Derivative
tdat = data[1:, :] - data[:-1, :]
# Normalize
tdat = tdat / np.std(tdat, axis=0)
# Coupling
coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0,
tdat.shape[1]) for j in np.arange(0, tdat.shape[1])])
coupling = np.reshape(
coupling, [tdat.shape[1], tdat.shape[1], tdat.shape[0]])
# Average over window using strides
shape = coupling.shape[:-1] + (coupling.shape[-1] -
params['windowsize'] + 1, params['windowsize'])
strides = coupling.strides + (coupling.strides[-1],)
coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(
coupling, shape=shape, strides=strides), -1)
report = {}
report['method'] = 'temporalderivative'
report['temporalderivative'] = {}
report['temporalderivative']['windowsize'] = params['windowsize']
return coupling_windowed, report | python | def _temporal_derivative(data, params, report):
"""
Performs mtd method. See func: teneto.derive.derive.
"""
# Data should be timexnode
report = {}
# Derivative
tdat = data[1:, :] - data[:-1, :]
# Normalize
tdat = tdat / np.std(tdat, axis=0)
# Coupling
coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0,
tdat.shape[1]) for j in np.arange(0, tdat.shape[1])])
coupling = np.reshape(
coupling, [tdat.shape[1], tdat.shape[1], tdat.shape[0]])
# Average over window using strides
shape = coupling.shape[:-1] + (coupling.shape[-1] -
params['windowsize'] + 1, params['windowsize'])
strides = coupling.strides + (coupling.strides[-1],)
coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(
coupling, shape=shape, strides=strides), -1)
report = {}
report['method'] = 'temporalderivative'
report['temporalderivative'] = {}
report['temporalderivative']['windowsize'] = params['windowsize']
return coupling_windowed, report | Performs mtd method. See func: teneto.derive.derive. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/derive.py#L302-L330 |
wiheto/teneto | teneto/utils/utils.py | binarize_percent | def binarize_percent(netin, level, sign='pos', axis='time'):
"""
Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered.
Parameters
----------
netin : array or dict
network (graphlet or contact representation),
level : float
Percent to keep (expressed as decimal, e.g. 0.1 = top 10%)
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str, default='time'
Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet)
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
# Set diagonal to 0
netin = set_diagonal(netin, 0)
if axis == 'graphlet' and netinfo['nettype'][-1] == 'u':
triu = np.triu_indices(netinfo['netshape'][0], k=1)
netin = netin[triu[0], triu[1], :]
netin = netin.transpose()
if sign == 'both':
net_sorted = np.argsort(np.abs(netin), axis=-1)
elif sign == 'pos':
net_sorted = np.argsort(netin, axis=-1)
elif sign == 'neg':
net_sorted = np.argsort(-1*netin, axis=-1)
else:
raise ValueError('Unknown value for parameter: sign')
# Predefine
netout = np.zeros(netinfo['netshape'])
if axis == 'time':
# These for loops can probabaly be removed for speed
for i in range(netinfo['netshape'][0]):
for j in range(netinfo['netshape'][1]):
netout[i, j, net_sorted[i, j, -
int(round(net_sorted.shape[-1])*level):]] = 1
elif axis == 'graphlet':
netout_tmp = np.zeros(netin.shape)
for i in range(netout_tmp.shape[0]):
netout_tmp[i, net_sorted[i, -
int(round(net_sorted.shape[-1])*level):]] = 1
netout_tmp = netout_tmp.transpose()
netout[triu[0], triu[1], :] = netout_tmp
netout[triu[1], triu[0], :] = netout_tmp
netout = set_diagonal(netout, 0)
# If input is contact, output contact
if netinfo['inputtype'] == 'C':
netinfo['nettype'] = 'b' + netinfo['nettype'][1]
netout = graphlet2contact(netout, netinfo)
netout.pop('inputtype')
netout.pop('values')
netout['diagonal'] = 0
return netout | python | def binarize_percent(netin, level, sign='pos', axis='time'):
"""
Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered.
Parameters
----------
netin : array or dict
network (graphlet or contact representation),
level : float
Percent to keep (expressed as decimal, e.g. 0.1 = top 10%)
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str, default='time'
Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet)
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
# Set diagonal to 0
netin = set_diagonal(netin, 0)
if axis == 'graphlet' and netinfo['nettype'][-1] == 'u':
triu = np.triu_indices(netinfo['netshape'][0], k=1)
netin = netin[triu[0], triu[1], :]
netin = netin.transpose()
if sign == 'both':
net_sorted = np.argsort(np.abs(netin), axis=-1)
elif sign == 'pos':
net_sorted = np.argsort(netin, axis=-1)
elif sign == 'neg':
net_sorted = np.argsort(-1*netin, axis=-1)
else:
raise ValueError('Unknown value for parameter: sign')
# Predefine
netout = np.zeros(netinfo['netshape'])
if axis == 'time':
# These for loops can probabaly be removed for speed
for i in range(netinfo['netshape'][0]):
for j in range(netinfo['netshape'][1]):
netout[i, j, net_sorted[i, j, -
int(round(net_sorted.shape[-1])*level):]] = 1
elif axis == 'graphlet':
netout_tmp = np.zeros(netin.shape)
for i in range(netout_tmp.shape[0]):
netout_tmp[i, net_sorted[i, -
int(round(net_sorted.shape[-1])*level):]] = 1
netout_tmp = netout_tmp.transpose()
netout[triu[0], triu[1], :] = netout_tmp
netout[triu[1], triu[0], :] = netout_tmp
netout = set_diagonal(netout, 0)
# If input is contact, output contact
if netinfo['inputtype'] == 'C':
netinfo['nettype'] = 'b' + netinfo['nettype'][1]
netout = graphlet2contact(netout, netinfo)
netout.pop('inputtype')
netout.pop('values')
netout['diagonal'] = 0
return netout | Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered.
Parameters
----------
netin : array or dict
network (graphlet or contact representation),
level : float
Percent to keep (expressed as decimal, e.g. 0.1 = top 10%)
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str, default='time'
Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet)
Returns
-------
netout : array or dict (depending on input)
Binarized network | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L214-L279 |
wiheto/teneto | teneto/utils/utils.py | binarize_rdp | def binarize_rdp(netin, level, sign='pos', axis='time'):
"""
Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] < 0]
else:
sel = trajectory['trajectory_points']
i_ind = np.repeat(trajectory['index'][n, 0], len(sel))
j_ind = np.repeat(trajectory['index'][n, 1], len(sel))
contacts.append(np.array([i_ind, j_ind, sel]).transpose())
contacts = np.concatenate(contacts)
# Create output dictionary
netout = dict(netinfo)
netout['contacts'] = contacts
netout['nettype'] = 'b' + netout['nettype'][1]
netout['dimord'] = 'node,node,time'
netout['timetype'] = 'discrete'
netout['diagonal'] = 0
# If input is graphlet, output graphlet
if netinfo['inputtype'] == 'G':
netout = contact2graphlet(netout)
else:
netout.pop('inputtype')
return netout | python | def binarize_rdp(netin, level, sign='pos', axis='time'):
"""
Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] < 0]
else:
sel = trajectory['trajectory_points']
i_ind = np.repeat(trajectory['index'][n, 0], len(sel))
j_ind = np.repeat(trajectory['index'][n, 1], len(sel))
contacts.append(np.array([i_ind, j_ind, sel]).transpose())
contacts = np.concatenate(contacts)
# Create output dictionary
netout = dict(netinfo)
netout['contacts'] = contacts
netout['nettype'] = 'b' + netout['nettype'][1]
netout['dimord'] = 'node,node,time'
netout['timetype'] = 'discrete'
netout['diagonal'] = 0
# If input is graphlet, output graphlet
if netinfo['inputtype'] == 'G':
netout = contact2graphlet(netout)
else:
netout.pop('inputtype')
return netout | Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L283-L335 |
wiheto/teneto | teneto/utils/utils.py | binarize | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
"""
Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | python | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'):
"""
Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L382-L422 |
wiheto/teneto | teneto/utils/utils.py | process_input | def process_input(netIn, allowedformats, outputformat='G'):
"""
Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class
"""
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | python | def process_input(netIn, allowedformats, outputformat='G'):
"""
Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class
"""
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L575-L646 |
wiheto/teneto | teneto/utils/utils.py | clean_community_indexes | def clean_community_indexes(communityID):
"""
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID.
"""
communityID = np.array(communityID)
cid_shape = communityID.shape
if len(cid_shape) > 1:
communityID = communityID.flatten()
new_communityID = np.zeros(len(communityID))
for i, n in enumerate(np.unique(communityID)):
new_communityID[communityID == n] = i
if len(cid_shape) > 1:
new_communityID = new_communityID.reshape(cid_shape)
return new_communityID | python | def clean_community_indexes(communityID):
"""
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID.
"""
communityID = np.array(communityID)
cid_shape = communityID.shape
if len(cid_shape) > 1:
communityID = communityID.flatten()
new_communityID = np.zeros(len(communityID))
for i, n in enumerate(np.unique(communityID)):
new_communityID[communityID == n] = i
if len(cid_shape) > 1:
new_communityID = new_communityID.reshape(cid_shape)
return new_communityID | Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L649-L680 |
wiheto/teneto | teneto/utils/utils.py | multiple_contacts_get_values | def multiple_contacts_get_values(C):
"""
Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
"""
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | python | def multiple_contacts_get_values(C):
"""
Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
"""
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L683-L718 |
wiheto/teneto | teneto/utils/utils.py | df_to_array | def df_to_array(df, netshape, nettype):
"""
Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network
"""
if len(df) > 0:
idx = np.array(list(map(list, df.values)))
G = np.zeros([netshape[0], netshape[0], netshape[1]])
if idx.shape[1] == 3:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2]]])
idx = idx.astype(int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
elif idx.shape[1] == 4:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])
weights = idx[:, 3]
idx = np.array(idx[:, :3], dtype=int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights
else:
G = np.zeros([netshape[0], netshape[0], netshape[1]])
return G | python | def df_to_array(df, netshape, nettype):
"""
Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network
"""
if len(df) > 0:
idx = np.array(list(map(list, df.values)))
G = np.zeros([netshape[0], netshape[0], netshape[1]])
if idx.shape[1] == 3:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2]]])
idx = idx.astype(int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
elif idx.shape[1] == 4:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])
weights = idx[:, 3]
idx = np.array(idx[:, :3], dtype=int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights
else:
G = np.zeros([netshape[0], netshape[0], netshape[1]])
return G | Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L721-L754 |
wiheto/teneto | teneto/utils/utils.py | check_distance_funciton_input | def check_distance_funciton_input(distance_func_name, netinfo):
"""
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name.
"""
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | python | def check_distance_funciton_input(distance_func_name, netinfo):
"""
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name.
"""
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L757-L786 |
wiheto/teneto | teneto/utils/utils.py | load_parcellation_coords | def load_parcellation_coords(parcellation_name):
"""
Loads coordinates of included parcellations.
Parameters
----------
parcellation_name : str
options: 'gordon2014_333', 'power2012_264', 'shen2013_278'.
Returns
-------
parc : array
parcellation cordinates
"""
path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv'
parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3])
return parc | python | def load_parcellation_coords(parcellation_name):
"""
Loads coordinates of included parcellations.
Parameters
----------
parcellation_name : str
options: 'gordon2014_333', 'power2012_264', 'shen2013_278'.
Returns
-------
parc : array
parcellation cordinates
"""
path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv'
parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3])
return parc | Loads coordinates of included parcellations.
Parameters
----------
parcellation_name : str
options: 'gordon2014_333', 'power2012_264', 'shen2013_278'.
Returns
-------
parc : array
parcellation cordinates | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L789-L809 |
wiheto/teneto | teneto/utils/utils.py | make_parcellation | def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None):
"""
Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
"""
if isinstance(parcellation, str):
parcin = ''
if '+' in parcellation:
parcin = parcellation
parcellation = parcellation.split('+')[0]
if '+OH' in parcin:
subcortical = True
else:
subcortical = None
if '+SUIT' in parcin:
cerebellar = True
else:
cerebellar = None
if not parc_type or not parc_params:
path = tenetopath[0] + '/data/parcellation_defaults/defaults.json'
with open(path) as data_file:
defaults = json.load(data_file)
if not parc_type:
parc_type = defaults[parcellation]['type']
print('Using default parcellation type')
if not parc_params:
parc_params = defaults[parcellation]['params']
print('Using default parameters')
if parc_type == 'sphere':
parcellation = load_parcellation_coords(parcellation)
seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)
data = seed.fit_transform(data_path)
elif parc_type == 'region':
path = tenetopath[0] + '/data/parcellation/' + parcellation + '.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data = region.fit_transform(data_path)
else:
raise ValueError('Unknown parc_type specified')
if subcortical:
subatlas = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm')['maps']
region = NiftiLabelsMasker(subatlas, **parc_params)
data_sub = region.fit_transform(data_path)
data = np.hstack([data, data_sub])
if cerebellar:
path = tenetopath[0] + '/data/parcellation/Cerebellum-SUIT_space-MNI152NLin2009cAsym.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data_cerebellar = region.fit_transform(data_path)
data = np.hstack([data, data_cerebellar])
return data | python | def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None):
"""
Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
"""
if isinstance(parcellation, str):
parcin = ''
if '+' in parcellation:
parcin = parcellation
parcellation = parcellation.split('+')[0]
if '+OH' in parcin:
subcortical = True
else:
subcortical = None
if '+SUIT' in parcin:
cerebellar = True
else:
cerebellar = None
if not parc_type or not parc_params:
path = tenetopath[0] + '/data/parcellation_defaults/defaults.json'
with open(path) as data_file:
defaults = json.load(data_file)
if not parc_type:
parc_type = defaults[parcellation]['type']
print('Using default parcellation type')
if not parc_params:
parc_params = defaults[parcellation]['params']
print('Using default parameters')
if parc_type == 'sphere':
parcellation = load_parcellation_coords(parcellation)
seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)
data = seed.fit_transform(data_path)
elif parc_type == 'region':
path = tenetopath[0] + '/data/parcellation/' + parcellation + '.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data = region.fit_transform(data_path)
else:
raise ValueError('Unknown parc_type specified')
if subcortical:
subatlas = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm')['maps']
region = NiftiLabelsMasker(subatlas, **parc_params)
data_sub = region.fit_transform(data_path)
data = np.hstack([data, data_sub])
if cerebellar:
path = tenetopath[0] + '/data/parcellation/Cerebellum-SUIT_space-MNI152NLin2009cAsym.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data_cerebellar = region.fit_transform(data_path)
data = np.hstack([data, data_cerebellar])
return data | Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L812-L890 |
wiheto/teneto | teneto/utils/utils.py | create_traj_ranges | def create_traj_ranges(start, stop, N):
"""
Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614
"""
steps = (1.0/(N-1)) * (stop - start)
if np.isscalar(steps):
return steps*np.arange(N) + start
else:
return steps[:, None]*np.arange(N) + start[:, None] | python | def create_traj_ranges(start, stop, N):
"""
Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614
"""
steps = (1.0/(N-1)) * (stop - start)
if np.isscalar(steps):
return steps*np.arange(N) + start
else:
return steps[:, None]*np.arange(N) + start[:, None] | Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614 | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L893-L903 |
wiheto/teneto | teneto/utils/utils.py | get_dimord | def get_dimord(measure, calc=None, community=None):
"""
Get the dimension order of a network measure.
Parameters
----------
measure : str
Name of funciton in teneto.networkmeasures.
calc : str, default=None
Calc parameter for the function
community : bool, default=None
If not null, then community property is assumed to be believed.
Returns
-------
dimord : str
Dimension order. So "node,node,time" would define the dimensions of the network measure.
"""
if not calc:
calc = ''
else:
calc = '_' + calc
if not community:
community = ''
else:
community = 'community'
if 'community' in calc and 'community' in community:
community = ''
if calc == 'community_avg' or calc == 'community_pairs':
community = ''
dimord_dict = {
'temporal_closeness_centrality': 'node',
'temporal_degree_centrality': 'node',
'temporal_degree_centralit_avg': 'node',
'temporal_degree_centrality_time': 'node,time',
'temporal_efficiency': 'global',
'temporal_efficiency_global': 'global',
'temporal_efficiency_node': 'node',
'temporal_efficiency_to': 'node',
'sid_global': 'global,time',
'community_pairs': 'community,community,time',
'community_avg': 'community,time',
'sid': 'community,community,time',
'reachability_latency_global': 'global',
'reachability_latency': 'global',
'reachability_latency_node': 'node',
'fluctuability': 'node',
'fluctuability_global': 'global',
'bursty_coeff': 'edge,edge',
'bursty_coeff_edge': 'edge,edge',
'bursty_coeff_node': 'node',
'bursty_coeff_meanEdgePerNode': 'node',
'volatility_global': 'time',
}
if measure + calc + community in dimord_dict:
return dimord_dict[measure + calc + community]
else:
print('WARNINGL: get_dimord() returned unknown dimension labels')
return 'unknown' | python | def get_dimord(measure, calc=None, community=None):
"""
Get the dimension order of a network measure.
Parameters
----------
measure : str
Name of funciton in teneto.networkmeasures.
calc : str, default=None
Calc parameter for the function
community : bool, default=None
If not null, then community property is assumed to be believed.
Returns
-------
dimord : str
Dimension order. So "node,node,time" would define the dimensions of the network measure.
"""
if not calc:
calc = ''
else:
calc = '_' + calc
if not community:
community = ''
else:
community = 'community'
if 'community' in calc and 'community' in community:
community = ''
if calc == 'community_avg' or calc == 'community_pairs':
community = ''
dimord_dict = {
'temporal_closeness_centrality': 'node',
'temporal_degree_centrality': 'node',
'temporal_degree_centralit_avg': 'node',
'temporal_degree_centrality_time': 'node,time',
'temporal_efficiency': 'global',
'temporal_efficiency_global': 'global',
'temporal_efficiency_node': 'node',
'temporal_efficiency_to': 'node',
'sid_global': 'global,time',
'community_pairs': 'community,community,time',
'community_avg': 'community,time',
'sid': 'community,community,time',
'reachability_latency_global': 'global',
'reachability_latency': 'global',
'reachability_latency_node': 'node',
'fluctuability': 'node',
'fluctuability_global': 'global',
'bursty_coeff': 'edge,edge',
'bursty_coeff_edge': 'edge,edge',
'bursty_coeff_node': 'node',
'bursty_coeff_meanEdgePerNode': 'node',
'volatility_global': 'time',
}
if measure + calc + community in dimord_dict:
return dimord_dict[measure + calc + community]
else:
print('WARNINGL: get_dimord() returned unknown dimension labels')
return 'unknown' | Get the dimension order of a network measure.
Parameters
----------
measure : str
Name of funciton in teneto.networkmeasures.
calc : str, default=None
Calc parameter for the function
community : bool, default=None
If not null, then community property is assumed to be believed.
Returns
-------
dimord : str
Dimension order. So "node,node,time" would define the dimensions of the network measure. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L906-L969 |
wiheto/teneto | teneto/utils/utils.py | get_network_when | def get_network_when(tnet, i=None, j=None, t=None, ij=None, logic='and', copy=False, asarray=False):
"""
Returns subset of dataframe that matches index
Parameters
----------
tnet : df or TemporalNetwork
TemporalNetwork object or pandas dataframe edgelist
i : list or int
get nodes in column i (source nodes in directed networks)
j : list or int
get nodes in column j (target nodes in directed networks)
t : list or int
get edges at this time-points.
ij : list or int
get nodes for column i or j (logic and can still persist for t). Cannot be specified along with i or j
logic : str
options: \'and\' or \'or\'. If \'and\', functions returns rows that corrspond that match all i,j,t arguments. If \'or\', only has to match one of them
copy : bool
default False. If True, returns a copy of the dataframe. Note relevant if hd5 data.
asarray : bool
default False. If True, returns the list of edges as an array.
Returns
-------
df : pandas dataframe
Unless asarray are set to true.
"""
if isinstance(tnet, pd.DataFrame):
network = tnet
hdf5 = False
# Can add hdfstore
elif isinstance(tnet, object):
network = tnet.network
hdf5 = tnet.hdf5
if ij is not None and (i is not None or j is not None):
raise ValueError('ij cannoed be specifed along with i or j')
# Make non list inputs a list
if i is not None and not isinstance(i, list):
i = [i]
if j is not None and not isinstance(j, list):
j = [j]
if t is not None and not isinstance(t, list):
t = [t]
if ij is not None and not isinstance(ij, list):
ij = [ij]
if hdf5:
if i is not None and j is not None and t is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 'j in ' + \
str(j) + ' & ' + 't in ' + str(t)
elif ij is not None and t is not None and logic == 'and':
isinstr = '(i in ' + str(ij) + ' | ' + 'j in ' + \
str(ij) + ') & ' + 't in ' + str(t)
elif ij is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(ij) + ' | ' + 'j in ' + \
str(ij) + ' | ' + 't in ' + str(t)
elif i is not None and j is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 'j in ' + str(j)
elif i is not None and t is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 't in ' + str(t)
elif j is not None and t is not None and logic == 'and':
isinstr = 'j in ' + str(j) + ' & ' + 't in ' + str(t)
elif i is not None and j is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 'j in ' + \
str(j) + ' | ' + 't in ' + str(t)
elif i is not None and j is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 'j in ' + str(j)
elif i is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 't in ' + str(t)
elif j is not None and t is not None and logic == 'or':
isinstr = 'j in ' + str(j) + ' | ' + 't in ' + str(t)
elif i is not None:
isinstr = 'i in ' + str(i)
elif j is not None:
isinstr = 'j in ' + str(j)
elif t is not None:
isinstr = 't in ' + str(t)
elif ij is not None:
isinstr = 'i in ' + str(ij) + ' | ' + 'j in ' + str(ij)
df = pd.read_hdf(network, where=isinstr)
else:
if i is not None and j is not None and t is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (
network['j'].isin(j)) & (network['t'].isin(t))]
elif ij is not None and t is not None and logic == 'and':
df = network[((network['i'].isin(ij)) | (
network['j'].isin(ij))) & (network['t'].isin(t))]
elif ij is not None and t is not None and logic == 'or':
df = network[((network['i'].isin(ij)) | (
network['j'].isin(ij))) | (network['t'].isin(t))]
elif i is not None and j is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (network['j'].isin(j))]
elif i is not None and t is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (network['t'].isin(t))]
elif j is not None and t is not None and logic == 'and':
df = network[(network['j'].isin(j)) & (network['t'].isin(t))]
elif i is not None and j is not None and t is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (
network['j'].isin(j)) | (network['t'].isin(t))]
elif i is not None and j is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (network['j'].isin(j))]
elif i is not None and t is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (network['t'].isin(t))]
elif j is not None and t is not None and logic == 'or':
df = network[(network['j'].isin(j)) | (network['t'].isin(t))]
elif i is not None:
df = network[network['i'].isin(i)]
elif j is not None:
df = network[network['j'].isin(j)]
elif t is not None:
df = network[network['t'].isin(t)]
elif ij is not None:
df = network[(network['i'].isin(ij)) | (network['j'].isin(ij))]
if copy:
df = df.copy()
if asarray:
df = df.values
return df | python | def get_network_when(tnet, i=None, j=None, t=None, ij=None, logic='and', copy=False, asarray=False):
"""
Returns subset of dataframe that matches index
Parameters
----------
tnet : df or TemporalNetwork
TemporalNetwork object or pandas dataframe edgelist
i : list or int
get nodes in column i (source nodes in directed networks)
j : list or int
get nodes in column j (target nodes in directed networks)
t : list or int
get edges at this time-points.
ij : list or int
get nodes for column i or j (logic and can still persist for t). Cannot be specified along with i or j
logic : str
options: \'and\' or \'or\'. If \'and\', functions returns rows that corrspond that match all i,j,t arguments. If \'or\', only has to match one of them
copy : bool
default False. If True, returns a copy of the dataframe. Note relevant if hd5 data.
asarray : bool
default False. If True, returns the list of edges as an array.
Returns
-------
df : pandas dataframe
Unless asarray are set to true.
"""
if isinstance(tnet, pd.DataFrame):
network = tnet
hdf5 = False
# Can add hdfstore
elif isinstance(tnet, object):
network = tnet.network
hdf5 = tnet.hdf5
if ij is not None and (i is not None or j is not None):
raise ValueError('ij cannoed be specifed along with i or j')
# Make non list inputs a list
if i is not None and not isinstance(i, list):
i = [i]
if j is not None and not isinstance(j, list):
j = [j]
if t is not None and not isinstance(t, list):
t = [t]
if ij is not None and not isinstance(ij, list):
ij = [ij]
if hdf5:
if i is not None and j is not None and t is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 'j in ' + \
str(j) + ' & ' + 't in ' + str(t)
elif ij is not None and t is not None and logic == 'and':
isinstr = '(i in ' + str(ij) + ' | ' + 'j in ' + \
str(ij) + ') & ' + 't in ' + str(t)
elif ij is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(ij) + ' | ' + 'j in ' + \
str(ij) + ' | ' + 't in ' + str(t)
elif i is not None and j is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 'j in ' + str(j)
elif i is not None and t is not None and logic == 'and':
isinstr = 'i in ' + str(i) + ' & ' + 't in ' + str(t)
elif j is not None and t is not None and logic == 'and':
isinstr = 'j in ' + str(j) + ' & ' + 't in ' + str(t)
elif i is not None and j is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 'j in ' + \
str(j) + ' | ' + 't in ' + str(t)
elif i is not None and j is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 'j in ' + str(j)
elif i is not None and t is not None and logic == 'or':
isinstr = 'i in ' + str(i) + ' | ' + 't in ' + str(t)
elif j is not None and t is not None and logic == 'or':
isinstr = 'j in ' + str(j) + ' | ' + 't in ' + str(t)
elif i is not None:
isinstr = 'i in ' + str(i)
elif j is not None:
isinstr = 'j in ' + str(j)
elif t is not None:
isinstr = 't in ' + str(t)
elif ij is not None:
isinstr = 'i in ' + str(ij) + ' | ' + 'j in ' + str(ij)
df = pd.read_hdf(network, where=isinstr)
else:
if i is not None and j is not None and t is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (
network['j'].isin(j)) & (network['t'].isin(t))]
elif ij is not None and t is not None and logic == 'and':
df = network[((network['i'].isin(ij)) | (
network['j'].isin(ij))) & (network['t'].isin(t))]
elif ij is not None and t is not None and logic == 'or':
df = network[((network['i'].isin(ij)) | (
network['j'].isin(ij))) | (network['t'].isin(t))]
elif i is not None and j is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (network['j'].isin(j))]
elif i is not None and t is not None and logic == 'and':
df = network[(network['i'].isin(i)) & (network['t'].isin(t))]
elif j is not None and t is not None and logic == 'and':
df = network[(network['j'].isin(j)) & (network['t'].isin(t))]
elif i is not None and j is not None and t is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (
network['j'].isin(j)) | (network['t'].isin(t))]
elif i is not None and j is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (network['j'].isin(j))]
elif i is not None and t is not None and logic == 'or':
df = network[(network['i'].isin(i)) | (network['t'].isin(t))]
elif j is not None and t is not None and logic == 'or':
df = network[(network['j'].isin(j)) | (network['t'].isin(t))]
elif i is not None:
df = network[network['i'].isin(i)]
elif j is not None:
df = network[network['j'].isin(j)]
elif t is not None:
df = network[network['t'].isin(t)]
elif ij is not None:
df = network[(network['i'].isin(ij)) | (network['j'].isin(ij))]
if copy:
df = df.copy()
if asarray:
df = df.values
return df | Returns subset of dataframe that matches index
Parameters
----------
tnet : df or TemporalNetwork
TemporalNetwork object or pandas dataframe edgelist
i : list or int
get nodes in column i (source nodes in directed networks)
j : list or int
get nodes in column j (target nodes in directed networks)
t : list or int
get edges at this time-points.
ij : list or int
get nodes for column i or j (logic and can still persist for t). Cannot be specified along with i or j
logic : str
options: \'and\' or \'or\'. If \'and\', functions returns rows that corrspond that match all i,j,t arguments. If \'or\', only has to match one of them
copy : bool
default False. If True, returns a copy of the dataframe. Note relevant if hd5 data.
asarray : bool
default False. If True, returns the list of edges as an array.
Returns
-------
df : pandas dataframe
Unless asarray are set to true. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L972-L1089 |
wiheto/teneto | teneto/utils/utils.py | create_supraadjacency_matrix | def create_supraadjacency_matrix(tnet, intersliceweight=1):
"""
Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix
"""
newnetwork = tnet.network.copy()
newnetwork['i'] = (tnet.network['i']) + \
((tnet.netshape[0]) * (tnet.network['t']))
newnetwork['j'] = (tnet.network['j']) + \
((tnet.netshape[0]) * (tnet.network['t']))
if 'weight' not in newnetwork.columns:
newnetwork['weight'] = 1
newnetwork.drop('t', axis=1, inplace=True)
timepointconns = pd.DataFrame()
timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N)
timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T))
timepointconns['weight'] = intersliceweight
supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)
return supranet | python | def create_supraadjacency_matrix(tnet, intersliceweight=1):
"""
Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix
"""
newnetwork = tnet.network.copy()
newnetwork['i'] = (tnet.network['i']) + \
((tnet.netshape[0]) * (tnet.network['t']))
newnetwork['j'] = (tnet.network['j']) + \
((tnet.netshape[0]) * (tnet.network['t']))
if 'weight' not in newnetwork.columns:
newnetwork['weight'] = 1
newnetwork.drop('t', axis=1, inplace=True)
timepointconns = pd.DataFrame()
timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N)
timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T))
timepointconns['weight'] = intersliceweight
supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)
return supranet | Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/utils.py#L1092-L1121 |
wiheto/teneto | teneto/utils/io.py | tnet_to_nx | def tnet_to_nx(df, t=None):
"""
Creates undirected networkx object
"""
if t is not None:
df = get_network_when(df, t=t)
if 'weight' in df.columns:
nxobj = nx.from_pandas_edgelist(
df, source='i', target='j', edge_attr='weight')
else:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j')
return nxobj | python | def tnet_to_nx(df, t=None):
"""
Creates undirected networkx object
"""
if t is not None:
df = get_network_when(df, t=t)
if 'weight' in df.columns:
nxobj = nx.from_pandas_edgelist(
df, source='i', target='j', edge_attr='weight')
else:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j')
return nxobj | Creates undirected networkx object | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/io.py#L5-L16 |
wiheto/teneto | teneto/communitydetection/louvain.py | temporal_louvain | def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1):
r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
----------
"""
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
# Divide resolution by the number of timepoints
resolution = resolution / tnet.T
supranet = create_supraadjacency_matrix(
tnet, intersliceweight=intersliceweight)
if negativeedge == 'ignore':
supranet = supranet[supranet['weight'] > 0]
nxsupra = tnet_to_nx(supranet)
np.random.seed(randomseed)
while True:
comtmp = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)}
for j in as_completed(job):
comtmp.append(j.result())
comtmp = np.stack(comtmp)
comtmp = comtmp.transpose()
comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F')
if n_iter == 1:
break
nxsupra_old = nxsupra
nxsupra = make_consensus_matrix(comtmp, consensus_threshold)
# If there was no consensus, there are no communities possible, return
if nxsupra is None:
break
if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all():
break
communities = comtmp[:, :, 0]
if temporal_consensus == True:
communities = make_temporal_consensus(communities)
return communities | python | def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1):
r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
----------
"""
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
# Divide resolution by the number of timepoints
resolution = resolution / tnet.T
supranet = create_supraadjacency_matrix(
tnet, intersliceweight=intersliceweight)
if negativeedge == 'ignore':
supranet = supranet[supranet['weight'] > 0]
nxsupra = tnet_to_nx(supranet)
np.random.seed(randomseed)
while True:
comtmp = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)}
for j in as_completed(job):
comtmp.append(j.result())
comtmp = np.stack(comtmp)
comtmp = comtmp.transpose()
comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F')
if n_iter == 1:
break
nxsupra_old = nxsupra
nxsupra = make_consensus_matrix(comtmp, consensus_threshold)
# If there was no consensus, there are no communities possible, return
if nxsupra is None:
break
if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all():
break
communities = comtmp[:, :, 0]
if temporal_consensus == True:
communities = make_temporal_consensus(communities)
return communities | r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
---------- | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/louvain.py#L11-L75 |
wiheto/teneto | teneto/communitydetection/louvain.py | make_consensus_matrix | def make_consensus_matrix(com_membership, th=0.5):
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
"""
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | python | def make_consensus_matrix(com_membership, th=0.5):
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
"""
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/louvain.py#L84-L120 |
wiheto/teneto | teneto/communitydetection/louvain.py | make_temporal_consensus | def make_temporal_consensus(com_membership):
r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance
"""
com_membership = np.array(com_membership)
# make first indicies be between 0 and 1.
com_membership[:, 0] = clean_community_indexes(com_membership[:, 0])
# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before
for t in range(1, com_membership.shape[1]):
ct, counts_t = np.unique(com_membership[:, t], return_counts=True)
ct = ct[np.argsort(counts_t)[::-1]]
c1back = np.unique(com_membership[:, t-1])
new_index = np.zeros(com_membership.shape[0])
for n in ct:
if len(c1back) > 0:
d = np.ones(int(c1back.max())+1)
for m in c1back:
v1 = np.zeros(com_membership.shape[0])
v2 = np.zeros(com_membership.shape[0])
v1[com_membership[:, t] == n] = 1
v2[com_membership[:, t-1] == m] = 1
d[int(m)] = jaccard(v1, v2)
bestval = np.argmin(d)
else:
bestval = new_index.max() + 1
new_index[com_membership[:, t] == n] = bestval
c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))
com_membership[:, t] = new_index
return com_membership | python | def make_temporal_consensus(com_membership):
r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance
"""
com_membership = np.array(com_membership)
# make first indicies be between 0 and 1.
com_membership[:, 0] = clean_community_indexes(com_membership[:, 0])
# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before
for t in range(1, com_membership.shape[1]):
ct, counts_t = np.unique(com_membership[:, t], return_counts=True)
ct = ct[np.argsort(counts_t)[::-1]]
c1back = np.unique(com_membership[:, t-1])
new_index = np.zeros(com_membership.shape[0])
for n in ct:
if len(c1back) > 0:
d = np.ones(int(c1back.max())+1)
for m in c1back:
v1 = np.zeros(com_membership.shape[0])
v2 = np.zeros(com_membership.shape[0])
v1[com_membership[:, t] == n] = 1
v2[com_membership[:, t-1] == m] = 1
d[int(m)] = jaccard(v1, v2)
bestval = np.argmin(d)
else:
bestval = new_index.max() + 1
new_index[com_membership[:, t] == n] = bestval
c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))
com_membership[:, t] = new_index
return com_membership | r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/louvain.py#L123-L167 |
wiheto/teneto | teneto/temporalcommunity/flexibility.py | flexibility | def flexibility(communities):
"""
Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6.
"""
# Preallocate
flex = np.zeros(communities.shape[0])
# Go from the second time point to last, compare with time-point before
for t in range(1, communities.shape[1]):
flex[communities[:, t] != communities[:, t-1]] += 1
# Normalize
flex = flex / (communities.shape[1] - 1)
return flex | python | def flexibility(communities):
"""
Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6.
"""
# Preallocate
flex = np.zeros(communities.shape[0])
# Go from the second time point to last, compare with time-point before
for t in range(1, communities.shape[1]):
flex[communities[:, t] != communities[:, t-1]] += 1
# Normalize
flex = flex / (communities.shape[1] - 1)
return flex | Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/flexibility.py#L4-L34 |
wiheto/teneto | teneto/plot/slice_plot.py | slice_plot | def slice_plot(netin, ax, nodelabels=None, timelabels=None, communities=None, plotedgeweights=False, edgeweightscalar=1, timeunit='', linestyle='k-', cmap=None, nodesize=100, nodekwargs=None, edgekwargs=None):
r'''
Fuction draws "slice graph" and exports axis handles
Parameters
----------
netin : array, dict
temporal network input (graphlet or contact)
ax : matplotlib figure handles.
nodelabels : list
nodes labels. List of strings.
timelabels : list
labels of dimension Graph is expressed across. List of strings.
communities : array
array of size: (time) or (node,time). Nodes will be coloured accordingly.
plotedgeweights : bool
if True, edges will vary in size (default False)
edgeweightscalar : int
scalar to multiply all edges if tweaking is needed.
timeunit : string
unit time axis is in.
linestyle : string
line style of Bezier curves.
nodesize : int
size of nodes
nodekwargs : dict
any additional kwargs for matplotlib.plt.scatter for the nodes
edgekwargs : dict
any additional kwargs for matplotlib.plt.plots for the edges
Returns
---------
ax : axis handle of slice graph
Examples
---------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot'] # Node names
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(C, ax, cmap='Pastel2')
>>> plt.tight_layout()
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot']
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Pastel2'
ax = teneto.plot.slice_plot(C,ax,cmap=cmap)
plt.tight_layout()
fig.show()
'''
# Get input type (C or G)
inputType = checkInput(netin)
# Convert C representation to G
if inputType == 'G':
netin = graphlet2contact(netin)
inputType = 'C'
edgelist = [tuple(np.array(e[0:2]) + e[2] * netin['netshape'][0])
for e in netin['contacts']]
if nodelabels is not None and len(nodelabels) == netin['netshape'][0]:
pass
elif nodelabels is not None and len(nodelabels) != netin['netshape'][0]:
raise ValueError('specified node label length does not match netshape')
elif nodelabels is None and netin['nodelabels'] == '':
nodelabels = np.arange(1, netin['netshape'][0] + 1)
else:
nodelabels = netin['nodelabels']
if timelabels is not None and len(timelabels) == netin['netshape'][-1]:
pass
elif timelabels is not None and len(timelabels) != netin['netshape'][-1]:
raise ValueError('specified time label length does not match netshape')
elif timelabels is None and str(netin['t0']) == '':
timelabels = np.arange(1, netin['netshape'][-1] + 1)
else:
timelabels = np.arange(netin['t0'], netin['Fs'] *
netin['netshape'][-1] + netin['t0'], netin['Fs'])
if timeunit is None:
timeunit = netin['timeunit']
timeNum = len(timelabels)
nodeNum = len(nodelabels)
posy = np.tile(list(range(0, nodeNum)), timeNum)
posx = np.repeat(list(range(0, timeNum)), nodeNum)
if nodekwargs is None:
nodekwargs = {}
if edgekwargs is None:
edgekwargs = {}
if cmap:
nodekwargs['cmap'] = cmap
if 'c' not in nodekwargs:
nodekwargs['c'] = posy
if communities is not None:
# check if temporal or static
if len(communities.shape) == 1:
nodekwargs['c'] = np.tile(communities, timeNum)
else:
nodekwargs['c'] = communities.flatten(order='F')
# plt.plot(points)
# Draw Bezier vectors around egde positions
for ei, edge in enumerate(edgelist):
if plotedgeweights == True and netin['nettype'][0] == 'w':
edgekwargs['linewidth'] = netin['values'][ei] * edgeweightscalar
bvx, bvy = bezier_points(
(posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), nodeNum, 20)
ax.plot(bvx, bvy, linestyle, **edgekwargs)
ax.set_yticks(range(0, len(nodelabels)))
ax.set_xticks(range(0, len(timelabels)))
ax.set_yticklabels(nodelabels)
ax.set_xticklabels(timelabels)
ax.grid()
ax.set_frame_on(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xlim([min(posx) - 1, max(posx) + 1])
ax.set_ylim([min(posy) - 1, max(posy) + 1])
ax.scatter(posx, posy, s=nodesize, zorder=10, **nodekwargs)
if timeunit != '':
timeunit = ' (' + timeunit + ')'
ax.set_xlabel('Time' + timeunit)
return ax | python | def slice_plot(netin, ax, nodelabels=None, timelabels=None, communities=None, plotedgeweights=False, edgeweightscalar=1, timeunit='', linestyle='k-', cmap=None, nodesize=100, nodekwargs=None, edgekwargs=None):
r'''
Fuction draws "slice graph" and exports axis handles
Parameters
----------
netin : array, dict
temporal network input (graphlet or contact)
ax : matplotlib figure handles.
nodelabels : list
nodes labels. List of strings.
timelabels : list
labels of dimension Graph is expressed across. List of strings.
communities : array
array of size: (time) or (node,time). Nodes will be coloured accordingly.
plotedgeweights : bool
if True, edges will vary in size (default False)
edgeweightscalar : int
scalar to multiply all edges if tweaking is needed.
timeunit : string
unit time axis is in.
linestyle : string
line style of Bezier curves.
nodesize : int
size of nodes
nodekwargs : dict
any additional kwargs for matplotlib.plt.scatter for the nodes
edgekwargs : dict
any additional kwargs for matplotlib.plt.plots for the edges
Returns
---------
ax : axis handle of slice graph
Examples
---------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot'] # Node names
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(C, ax, cmap='Pastel2')
>>> plt.tight_layout()
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot']
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Pastel2'
ax = teneto.plot.slice_plot(C,ax,cmap=cmap)
plt.tight_layout()
fig.show()
'''
# Get input type (C or G)
inputType = checkInput(netin)
# Convert C representation to G
if inputType == 'G':
netin = graphlet2contact(netin)
inputType = 'C'
edgelist = [tuple(np.array(e[0:2]) + e[2] * netin['netshape'][0])
for e in netin['contacts']]
if nodelabels is not None and len(nodelabels) == netin['netshape'][0]:
pass
elif nodelabels is not None and len(nodelabels) != netin['netshape'][0]:
raise ValueError('specified node label length does not match netshape')
elif nodelabels is None and netin['nodelabels'] == '':
nodelabels = np.arange(1, netin['netshape'][0] + 1)
else:
nodelabels = netin['nodelabels']
if timelabels is not None and len(timelabels) == netin['netshape'][-1]:
pass
elif timelabels is not None and len(timelabels) != netin['netshape'][-1]:
raise ValueError('specified time label length does not match netshape')
elif timelabels is None and str(netin['t0']) == '':
timelabels = np.arange(1, netin['netshape'][-1] + 1)
else:
timelabels = np.arange(netin['t0'], netin['Fs'] *
netin['netshape'][-1] + netin['t0'], netin['Fs'])
if timeunit is None:
timeunit = netin['timeunit']
timeNum = len(timelabels)
nodeNum = len(nodelabels)
posy = np.tile(list(range(0, nodeNum)), timeNum)
posx = np.repeat(list(range(0, timeNum)), nodeNum)
if nodekwargs is None:
nodekwargs = {}
if edgekwargs is None:
edgekwargs = {}
if cmap:
nodekwargs['cmap'] = cmap
if 'c' not in nodekwargs:
nodekwargs['c'] = posy
if communities is not None:
# check if temporal or static
if len(communities.shape) == 1:
nodekwargs['c'] = np.tile(communities, timeNum)
else:
nodekwargs['c'] = communities.flatten(order='F')
# plt.plot(points)
# Draw Bezier vectors around egde positions
for ei, edge in enumerate(edgelist):
if plotedgeweights == True and netin['nettype'][0] == 'w':
edgekwargs['linewidth'] = netin['values'][ei] * edgeweightscalar
bvx, bvy = bezier_points(
(posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), nodeNum, 20)
ax.plot(bvx, bvy, linestyle, **edgekwargs)
ax.set_yticks(range(0, len(nodelabels)))
ax.set_xticks(range(0, len(timelabels)))
ax.set_yticklabels(nodelabels)
ax.set_xticklabels(timelabels)
ax.grid()
ax.set_frame_on(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xlim([min(posx) - 1, max(posx) + 1])
ax.set_ylim([min(posy) - 1, max(posy) + 1])
ax.scatter(posx, posy, s=nodesize, zorder=10, **nodekwargs)
if timeunit != '':
timeunit = ' (' + timeunit + ')'
ax.set_xlabel('Time' + timeunit)
return ax | r'''
Fuction draws "slice graph" and exports axis handles
Parameters
----------
netin : array, dict
temporal network input (graphlet or contact)
ax : matplotlib figure handles.
nodelabels : list
nodes labels. List of strings.
timelabels : list
labels of dimension Graph is expressed across. List of strings.
communities : array
array of size: (time) or (node,time). Nodes will be coloured accordingly.
plotedgeweights : bool
if True, edges will vary in size (default False)
edgeweightscalar : int
scalar to multiply all edges if tweaking is needed.
timeunit : string
unit time axis is in.
linestyle : string
line style of Bezier curves.
nodesize : int
size of nodes
nodekwargs : dict
any additional kwargs for matplotlib.plt.scatter for the nodes
edgekwargs : dict
any additional kwargs for matplotlib.plt.plots for the edges
Returns
---------
ax : axis handle of slice graph
Examples
---------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot'] # Node names
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.slice_plot(C, ax, cmap='Pastel2')
>>> plt.tight_layout()
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot']
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Pastel2'
ax = teneto.plot.slice_plot(C,ax,cmap=cmap)
plt.tight_layout()
fig.show() | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/plot/slice_plot.py#L6-L181 |
wiheto/teneto | teneto/networkmeasures/local_variation.py | local_variation | def local_variation(data):
r"""
Calculates the local variaiont of inter-contact times. [LV-1]_, [LV-2]_
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
Returns
-------
LV : array
Local variation per edge.
Notes
------
The local variation is like the bursty coefficient and quantifies if a series of inter-contact times are periodic, random or Poisson distributed or bursty.
It is defined as:
.. math:: LV = {3 \over {n-1}}\sum_{i=1}^{n-1}{{{\iota_i - \iota_{i+1}} \over {\iota_i + \iota_{i+1}}}^2}
Where :math:`\iota` are inter-contact times and i is the index of the inter-contact time (not a node index). n is the number of events, making n-1 the number of inter-contact times.
The possible range is: :math:`0 \geq LV \gt 3`.
When periodic, LV=0, Poisson, LV=1 Larger LVs indicate bursty process.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
Now we call local variation for each edge.
>>> LV_periodic = teneto.networkmeasures.local_variation(G_periodic)
>>> LV_periodic
array([[nan, 0.],
[ 0., nan]])
Above we can see that between node 0 and 1, LV=0 (the diagonal is nan).
This is indicative of a periodic contacts (which is what we defined).
Doing the same for the second example:
>>> LV_bursty = teneto.networkmeasures.local_variation(G_bursty)
>>> LV_bursty
array([[ nan, 1.28748748],
[1.28748748, nan]])
When the value is greater than 1, it indicates a bursty process.
nans are returned if there are no intercontacttimes
References
----------
.. [LV-1] Shinomoto et al (2003) Differences in spiking patterns among cortical neurons. Neural Computation 15.12 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/089976603322518759>`_]
.. [LV-2] Followed eq., 4.34 in Masuda N & Lambiotte (2016) A guide to temporal networks. World Scientific. Series on Complex Networks. Vol 4 [`Link <https://www.worldscientific.com/doi/abs/10.1142/9781786341150_0001>`_]
"""
ict = 0 # are ict present
if isinstance(data, dict):
# This could be done better
if [k for k in list(data.keys()) if k == 'intercontacttimes'] == ['intercontacttimes']:
ict = 1
# if shortest paths are not calculated, calculate them
if ict == 0:
data = intercontacttimes(data)
if data['nettype'][1] == 'u':
ind = np.triu_indices(data['intercontacttimes'].shape[0], k=1)
if data['nettype'][1] == 'd':
triu = np.triu_indices(data['intercontacttimes'].shape[0], k=1)
tril = np.tril_indices(data['intercontacttimes'].shape[0], k=-1)
ind = [[], []]
ind[0] = np.concatenate([tril[0], triu[0]])
ind[1] = np.concatenate([tril[1], triu[1]])
ind = tuple(ind)
ict_shape = data['intercontacttimes'].shape
lv = np.zeros(ict_shape)
for n in range(len(ind[0])):
icts = data['intercontacttimes'][ind[0][n], ind[1][n]]
# make sure there is some contact
if len(icts) > 0:
lv_nonnorm = np.sum(
np.power((icts[:-1] - icts[1:]) / (icts[:-1] + icts[1:]), 2))
lv[ind[0][n], ind[1][n]] = (3/len(icts)) * lv_nonnorm
else:
lv[ind[0][n], ind[1][n]] = np.nan
# Make symetric if undirected
if data['nettype'][1] == 'u':
lv = lv + lv.transpose()
for n in range(lv.shape[0]):
lv[n, n] = np.nan
return lv | python | def local_variation(data):
r"""
Calculates the local variaiont of inter-contact times. [LV-1]_, [LV-2]_
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
Returns
-------
LV : array
Local variation per edge.
Notes
------
The local variation is like the bursty coefficient and quantifies if a series of inter-contact times are periodic, random or Poisson distributed or bursty.
It is defined as:
.. math:: LV = {3 \over {n-1}}\sum_{i=1}^{n-1}{{{\iota_i - \iota_{i+1}} \over {\iota_i + \iota_{i+1}}}^2}
Where :math:`\iota` are inter-contact times and i is the index of the inter-contact time (not a node index). n is the number of events, making n-1 the number of inter-contact times.
The possible range is: :math:`0 \geq LV \gt 3`.
When periodic, LV=0, Poisson, LV=1 Larger LVs indicate bursty process.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
Now we call local variation for each edge.
>>> LV_periodic = teneto.networkmeasures.local_variation(G_periodic)
>>> LV_periodic
array([[nan, 0.],
[ 0., nan]])
Above we can see that between node 0 and 1, LV=0 (the diagonal is nan).
This is indicative of a periodic contacts (which is what we defined).
Doing the same for the second example:
>>> LV_bursty = teneto.networkmeasures.local_variation(G_bursty)
>>> LV_bursty
array([[ nan, 1.28748748],
[1.28748748, nan]])
When the value is greater than 1, it indicates a bursty process.
nans are returned if there are no intercontacttimes
References
----------
.. [LV-1] Shinomoto et al (2003) Differences in spiking patterns among cortical neurons. Neural Computation 15.12 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/089976603322518759>`_]
.. [LV-2] Followed eq., 4.34 in Masuda N & Lambiotte (2016) A guide to temporal networks. World Scientific. Series on Complex Networks. Vol 4 [`Link <https://www.worldscientific.com/doi/abs/10.1142/9781786341150_0001>`_]
"""
ict = 0 # are ict present
if isinstance(data, dict):
# This could be done better
if [k for k in list(data.keys()) if k == 'intercontacttimes'] == ['intercontacttimes']:
ict = 1
# if shortest paths are not calculated, calculate them
if ict == 0:
data = intercontacttimes(data)
if data['nettype'][1] == 'u':
ind = np.triu_indices(data['intercontacttimes'].shape[0], k=1)
if data['nettype'][1] == 'd':
triu = np.triu_indices(data['intercontacttimes'].shape[0], k=1)
tril = np.tril_indices(data['intercontacttimes'].shape[0], k=-1)
ind = [[], []]
ind[0] = np.concatenate([tril[0], triu[0]])
ind[1] = np.concatenate([tril[1], triu[1]])
ind = tuple(ind)
ict_shape = data['intercontacttimes'].shape
lv = np.zeros(ict_shape)
for n in range(len(ind[0])):
icts = data['intercontacttimes'][ind[0][n], ind[1][n]]
# make sure there is some contact
if len(icts) > 0:
lv_nonnorm = np.sum(
np.power((icts[:-1] - icts[1:]) / (icts[:-1] + icts[1:]), 2))
lv[ind[0][n], ind[1][n]] = (3/len(icts)) * lv_nonnorm
else:
lv[ind[0][n], ind[1][n]] = np.nan
# Make symetric if undirected
if data['nettype'][1] == 'u':
lv = lv + lv.transpose()
for n in range(lv.shape[0]):
lv[n, n] = np.nan
return lv | r"""
Calculates the local variaiont of inter-contact times. [LV-1]_, [LV-2]_
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
Returns
-------
LV : array
Local variation per edge.
Notes
------
The local variation is like the bursty coefficient and quantifies if a series of inter-contact times are periodic, random or Poisson distributed or bursty.
It is defined as:
.. math:: LV = {3 \over {n-1}}\sum_{i=1}^{n-1}{{{\iota_i - \iota_{i+1}} \over {\iota_i + \iota_{i+1}}}^2}
Where :math:`\iota` are inter-contact times and i is the index of the inter-contact time (not a node index). n is the number of events, making n-1 the number of inter-contact times.
The possible range is: :math:`0 \geq LV \gt 3`.
When periodic, LV=0, Poisson, LV=1 Larger LVs indicate bursty process.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
Now we call local variation for each edge.
>>> LV_periodic = teneto.networkmeasures.local_variation(G_periodic)
>>> LV_periodic
array([[nan, 0.],
[ 0., nan]])
Above we can see that between node 0 and 1, LV=0 (the diagonal is nan).
This is indicative of a periodic contacts (which is what we defined).
Doing the same for the second example:
>>> LV_bursty = teneto.networkmeasures.local_variation(G_bursty)
>>> LV_bursty
array([[ nan, 1.28748748],
[1.28748748, nan]])
When the value is greater than 1, it indicates a bursty process.
nans are returned if there are no intercontacttimes
References
----------
.. [LV-1] Shinomoto et al (2003) Differences in spiking patterns among cortical neurons. Neural Computation 15.12 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/089976603322518759>`_]
.. [LV-2] Followed eq., 4.34 in Masuda N & Lambiotte (2016) A guide to temporal networks. World Scientific. Series on Complex Networks. Vol 4 [`Link <https://www.worldscientific.com/doi/abs/10.1142/9781786341150_0001>`_] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/local_variation.py#L9-L131 |
wiheto/teneto | teneto/utils/bidsutils.py | drop_bids_suffix | def drop_bids_suffix(fname):
"""
Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz']
Parameters
----------
fname : str
BIDS filename with suffice. Directories should not be included.
Returns
-------
fname_head : str
BIDS filename with
fileformat : str
The file format (text after suffix)
Note
------
This assumes that there are no periods in the filename
"""
if '/' in fname:
split = fname.split('/')
dirnames = '/'.join(split[:-1]) + '/'
fname = split[-1]
else:
dirnames = ''
tags = [tag for tag in fname.split('_') if '-' in tag]
fname_head = '_'.join(tags)
fileformat = '.' + '.'.join(fname.split('.')[1:])
return dirnames + fname_head, fileformat | python | def drop_bids_suffix(fname):
"""
Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz']
Parameters
----------
fname : str
BIDS filename with suffice. Directories should not be included.
Returns
-------
fname_head : str
BIDS filename with
fileformat : str
The file format (text after suffix)
Note
------
This assumes that there are no periods in the filename
"""
if '/' in fname:
split = fname.split('/')
dirnames = '/'.join(split[:-1]) + '/'
fname = split[-1]
else:
dirnames = ''
tags = [tag for tag in fname.split('_') if '-' in tag]
fname_head = '_'.join(tags)
fileformat = '.' + '.'.join(fname.split('.')[1:])
return dirnames + fname_head, fileformat | Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz']
Parameters
----------
fname : str
BIDS filename with suffice. Directories should not be included.
Returns
-------
fname_head : str
BIDS filename with
fileformat : str
The file format (text after suffix)
Note
------
This assumes that there are no periods in the filename | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L24-L54 |
wiheto/teneto | teneto/utils/bidsutils.py | load_tabular_file | def load_tabular_file(fname, return_meta=False, header=True, index_col=True):
"""
Given a file name loads as a pandas data frame
Parameters
----------
fname : str
file name and path. Must be tsv.
return_meta :
header : bool (default True)
if there is a header in the tsv file, true will use first row in file.
index_col : bool (default None)
if there is an index column in the csv or tsv file, true will use first row in file.
Returns
-------
df : pandas
The loaded file
info : pandas, if return_meta=True
Meta infomration in json file (if specified)
"""
if index_col:
index_col = 0
else:
index_col = None
if header:
header = 0
else:
header = None
df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t')
if return_meta:
json_fname = fname.replace('tsv', 'json')
meta = pd.read_json(json_fname)
return df, meta
else:
return df | python | def load_tabular_file(fname, return_meta=False, header=True, index_col=True):
"""
Given a file name loads as a pandas data frame
Parameters
----------
fname : str
file name and path. Must be tsv.
return_meta :
header : bool (default True)
if there is a header in the tsv file, true will use first row in file.
index_col : bool (default None)
if there is an index column in the csv or tsv file, true will use first row in file.
Returns
-------
df : pandas
The loaded file
info : pandas, if return_meta=True
Meta infomration in json file (if specified)
"""
if index_col:
index_col = 0
else:
index_col = None
if header:
header = 0
else:
header = None
df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t')
if return_meta:
json_fname = fname.replace('tsv', 'json')
meta = pd.read_json(json_fname)
return df, meta
else:
return df | Given a file name loads as a pandas data frame
Parameters
----------
fname : str
file name and path. Must be tsv.
return_meta :
header : bool (default True)
if there is a header in the tsv file, true will use first row in file.
index_col : bool (default None)
if there is an index column in the csv or tsv file, true will use first row in file.
Returns
-------
df : pandas
The loaded file
info : pandas, if return_meta=True
Meta infomration in json file (if specified) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L77-L115 |
wiheto/teneto | teneto/utils/bidsutils.py | get_sidecar | def get_sidecar(fname, allowedfileformats='default'):
"""
Loads sidecar or creates one
"""
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
for f in allowedfileformats:
fname = fname.split(f)[0]
fname += '.json'
if os.path.exists(fname):
with open(fname) as fs:
sidecar = json.load(fs)
else:
sidecar = {}
if 'filestatus' not in sidecar:
sidecar['filestatus'] = {}
sidecar['filestatus']['reject'] = False
sidecar['filestatus']['reason'] = []
return sidecar | python | def get_sidecar(fname, allowedfileformats='default'):
"""
Loads sidecar or creates one
"""
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
for f in allowedfileformats:
fname = fname.split(f)[0]
fname += '.json'
if os.path.exists(fname):
with open(fname) as fs:
sidecar = json.load(fs)
else:
sidecar = {}
if 'filestatus' not in sidecar:
sidecar['filestatus'] = {}
sidecar['filestatus']['reject'] = False
sidecar['filestatus']['reason'] = []
return sidecar | Loads sidecar or creates one | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L118-L136 |
wiheto/teneto | teneto/utils/bidsutils.py | process_exclusion_criteria | def process_exclusion_criteria(exclusion_criteria):
"""
Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun
"""
relfun = []
threshold = []
for ec in exclusion_criteria:
if ec[0:2] == '>=':
relfun.append(np.greater_equal)
threshold.append(float(ec[2:]))
elif ec[0:2] == '<=':
relfun.append(np.less_equal)
threshold.append(float(ec[2:]))
elif ec[0] == '>':
relfun.append(np.greater)
threshold.append(float(ec[1:]))
elif ec[0] == '<':
relfun.append(np.less)
threshold.append(float(ec[1:]))
else:
raise ValueError('exclusion crieria must being with >,<,>= or <=')
return relfun, threshold | python | def process_exclusion_criteria(exclusion_criteria):
"""
Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun
"""
relfun = []
threshold = []
for ec in exclusion_criteria:
if ec[0:2] == '>=':
relfun.append(np.greater_equal)
threshold.append(float(ec[2:]))
elif ec[0:2] == '<=':
relfun.append(np.less_equal)
threshold.append(float(ec[2:]))
elif ec[0] == '>':
relfun.append(np.greater)
threshold.append(float(ec[1:]))
elif ec[0] == '<':
relfun.append(np.less)
threshold.append(float(ec[1:]))
else:
raise ValueError('exclusion crieria must being with >,<,>= or <=')
return relfun, threshold | Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/utils/bidsutils.py#L166-L201 |
wiheto/teneto | teneto/networkmeasures/reachability_latency.py | reachability_latency | def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'):
"""
Reachability latency. This is the r-th longest temporal path.
Parameters
---------
data : array or dict
Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path)
rratio: float (default: 1)
reachability ratio that the latency is calculated in relation to.
Value must be over 0 and up to 1.
1 (default) - all nodes must be reached.
Other values (e.g. .5 imply that 50% of nodes are reached)
This is rounded to the nearest node inter.
E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards)
calc : str
what to calculate. Alternatives: 'global' entire network; 'nodes': for each node.
Returns
--------
reach_lat : array
Reachability latency
Notes
------
Reachability latency calculates the time it takes for the paths.
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
netshape = pathmat.shape
edges_to_reach = netshape[0] - np.round(netshape[0] * rratio)
reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan
for t_ind in range(0, netshape[2]):
paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1)
reach_lat[:, t_ind] = paths_sort[:, edges_to_reach]
if calc == 'global':
reach_lat = np.nansum(reach_lat)
reach_lat = reach_lat / ((netshape[0]) * netshape[2])
elif calc == 'nodes':
reach_lat = np.nansum(reach_lat, axis=1)
reach_lat = reach_lat / (netshape[2])
return reach_lat | python | def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'):
"""
Reachability latency. This is the r-th longest temporal path.
Parameters
---------
data : array or dict
Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path)
rratio: float (default: 1)
reachability ratio that the latency is calculated in relation to.
Value must be over 0 and up to 1.
1 (default) - all nodes must be reached.
Other values (e.g. .5 imply that 50% of nodes are reached)
This is rounded to the nearest node inter.
E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards)
calc : str
what to calculate. Alternatives: 'global' entire network; 'nodes': for each node.
Returns
--------
reach_lat : array
Reachability latency
Notes
------
Reachability latency calculates the time it takes for the paths.
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
netshape = pathmat.shape
edges_to_reach = netshape[0] - np.round(netshape[0] * rratio)
reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan
for t_ind in range(0, netshape[2]):
paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1)
reach_lat[:, t_ind] = paths_sort[:, edges_to_reach]
if calc == 'global':
reach_lat = np.nansum(reach_lat)
reach_lat = reach_lat / ((netshape[0]) * netshape[2])
elif calc == 'nodes':
reach_lat = np.nansum(reach_lat, axis=1)
reach_lat = reach_lat / (netshape[2])
return reach_lat | Reachability latency. This is the r-th longest temporal path.
Parameters
---------
data : array or dict
Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path)
rratio: float (default: 1)
reachability ratio that the latency is calculated in relation to.
Value must be over 0 and up to 1.
1 (default) - all nodes must be reached.
Other values (e.g. .5 imply that 50% of nodes are reached)
This is rounded to the nearest node inter.
E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards)
calc : str
what to calculate. Alternatives: 'global' entire network; 'nodes': for each node.
Returns
--------
reach_lat : array
Reachability latency
Notes
------
Reachability latency calculates the time it takes for the paths. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/reachability_latency.py#L9-L72 |
wiheto/teneto | teneto/networkmeasures/fluctuability.py | fluctuability | def fluctuability(netin, calc='global'):
r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
"""
# Get input type (C or G)
netin, _ = process_input(netin, ['C', 'G', 'TN'])
netin[netin != 0] = 1
unique_edges = np.sum(netin, axis=2)
unique_edges[unique_edges > 0] = 1
unique_edges[unique_edges == 0] = 0
fluct = (np.sum(unique_edges)) / np.sum(netin)
return fluct | python | def fluctuability(netin, calc='global'):
r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_]
"""
# Get input type (C or G)
netin, _ = process_input(netin, ['C', 'G', 'TN'])
netin[netin != 0] = 1
unique_edges = np.sum(netin, axis=2)
unique_edges[unique_edges > 0] = 1
unique_edges[unique_edges == 0] = 0
fluct = (np.sum(unique_edges)) / np.sum(netin)
return fluct | r"""
Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_
This is the unique number of edges through time divided by the overall number of edges.
Parameters
----------
netin : array or dict
Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd')
calc : str
Version of fluctuabiility to calcualte. 'global'
Returns
-------
fluct : array
Fluctuability
Notes
------
Fluctuability quantifies the variability of edges.
Given x number of edges, F is higher when those are repeated edges among a smaller set of edges
and lower when there are distributed across more edges.
.. math:: F = {{\sum_{i,j} H_{i,j}} \over {\sum_{i,j,t} G_{i,j,t}}}
where :math:`H_{i,j}` is a binary matrix where it is 1 if there is at least one t such that G_{i,j,t} = 1 (i.e. at least one temporal edge exists).
F is not normalized which makes comparisions of F across very different networks difficult (could be added).
Examples
--------
This example compares the fluctability of two different networks with the same number of edges.
Below two temporal networks, both with 3 nodes and 3 time-points.
Both get 3 connections.
>>> import teneto
>>> import numpy as np
>>> # Manually specify node (i,j) and temporal (t) indicies.
>>> ind_highF_i = [0,0,1]
>>> ind_highF_j = [1,2,2]
>>> ind_highF_t = [1,2,2]
>>> ind_lowF_i = [0,0,0]
>>> ind_lowF_j = [1,1,1]
>>> ind_lowF_t = [0,1,2]
>>> # Define 2 networks below and set above edges to 1
>>> G_highF = np.zeros([3,3,3])
>>> G_lowF = np.zeros([3,3,3])
>>> G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
>>> G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
The two different networks look like this:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
# Manually specify node (i,j) and temporal (t) indicies.
ind_highF_i = [0,0,1]
ind_highF_j = [1,2,2]
ind_highF_t = [1,2,2]
ind_lowF_i = [0,0,0]
ind_lowF_j = [1,1,1]
ind_lowF_t = [0,1,2]
# Define 2 networks below and set above edges to 1
G_highF = np.zeros([3,3,3])
G_lowF = np.zeros([3,3,3])
G_highF[ind_highF_i,ind_highF_j,ind_highF_t] = 1
G_lowF[ind_lowF_i,ind_lowF_j,ind_lowF_t] = 1
fig, ax = plt.subplots(1,2)
teneto.plot.slice_plot(G_highF, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
teneto.plot.slice_plot(G_lowF, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1', '2'])
ax[0].set_title('G_highF')
ax[1].set_title('G_lowF')
ax[0].set_ylim([-0.25,2.25])
ax[1].set_ylim([-0.25,2.25])
plt.tight_layout()
fig.show()
Now calculate the fluctability of the two networks above.
>>> F_high = teneto.networkmeasures.fluctuability(G_highF)
>>> F_high
1.0
>>> F_low = teneto.networkmeasures.fluctuability(G_lowF)
>>> F_low
0.3333333333333333
Here we see that the network with more unique connections has the higher fluctuability.
Reference
---------
.. [fluct-1] Thompson et al (2017) "From static to temporal network theory applications to functional brain connectivity." Network Neuroscience, 2: 1. p.69-99 [`Link <https://www.mitpressjournals.org/doi/abs/10.1162/NETN_a_00011>`_] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/fluctuability.py#L8-L121 |
wiheto/teneto | teneto/networkmeasures/topological_overlap.py | topological_overlap | def topological_overlap(tnet, calc='time'):
r"""
Topological overlap quantifies the persistency of edges through time. If two consequtive time-points have similar edges, this becomes high (max 1). If there is high change, this becomes 0.
References: [topo-1]_, [topo-2]_
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Nettype: 'bu'.
calc: str
which version of topological overlap to calculate:
'node' - calculates for each node, averaging over time.
'time' - (default) calculates for each node per time points.
'global' - (default) calculates for each node per time points.
Returns
-------
topo_overlap : array
if calc = 'time', array is (node,time) in size.
if calc = 'node', array is (node) in size.
if calc = 'global', array is (1) in size. The final time point returns as nan.
Notes
------
When edges persist over time, the topological overlap increases. It can be calculated as a global valu, per node, per node-time.
When calc='time', then the topological overlap is:
.. math:: TopoOverlap_{i,t} = {\sum_j G_{i,j,t} G_{i,j,t+1} \over \sqrt{\sum_j G_{i,j,t} \sum_j G_{i,j,t+1}}}
When calc='node', then the topological overlap is the mean of math:`TopoOverlap_{i,t}`:
.. math:: AvgTopoOverlap_{i} = {1 \over T-1} \sum_t TopoOverlap_{i,t}
where T is the number of time-points. This is called the *average topological overlap*.
When calc='global', the *temporal-correlation coefficient* is calculated
.. math:: TempCorrCoeff = {1 \over N} \sum_i AvgTopoOverlap_i
where N is the number of nodes.
For all the three measures above, the value is between 0 and 1 where 0 entails "all edges changes" and 1 entails "no edges change".
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Then make an temporal network with 3 nodes and 4 time-points.
>>> G = np.zeros([3, 3, 3])
>>> i_ind = np.array([0, 0, 0, 0,])
>>> j_ind = np.array([1, 1, 1, 2,])
>>> t_ind = np.array([0, 1, 2, 2,])
>>> G[i_ind, j_ind, t_ind] = 1
>>> G = G + G.transpose([1,0,2]) # Make symmetric
Now the topological overlap can be calculated:
>>> topo_overlap = teneto.networkmeasures.topological_overlap(G)
This returns *topo_overlap* which is a (node,time) array. Looking above at how we defined G,
when t = 0, there is only the edge (0,1). When t = 1, this edge still remains. This means
topo_overlap should equal 1 for node 0 at t=0 and 0 for node 2:
>>> topo_overlap[0,0]
1.0
>>> topo_overlap[2,0]
0.0
At t=2, there is now also an edge between (0,2), this means node 0's topological overlap at t=1 decreases as
its edges have decreased in their persistency at the next time point (i.e. some change has occured). It equals ca. 0.71
>>> topo_overlap[0,1]
0.7071067811865475
If we want the average topological overlap, we simply add the calc argument to be 'node'.
>>> avg_topo_overlap = teneto.networkmeasures.topological_overlap(G, calc='node')
Now this is an array with a length of 3 (one per node).
>>> avg_topo_overlap
array([0.85355339, 1. , 0. ])
Here we see that node 1 had all its connections persist, node 2 had no connections persisting, and node 0 was in between.
To calculate the temporal correlation coefficient,
>>> temp_corr_coeff = teneto.networkmeasures.topological_overlap(G, calc='global')
This produces one value reflecting all of G
>>> temp_corr_coeff
0.617851130197758
References
----------
.. [topo-1] Tang et al (2010) Small-world behavior in time-varying graphs. Phys. Rev. E 81, 055101(R) [`arxiv link <https://arxiv.org/pdf/0909.1712.pdf>`_]
.. [topo-2] Nicosia et al (2013) "Graph Metrics for Temporal Networks" In: Holme P., Saramäki J. (eds) Temporal Networks. Understanding Complex Systems. Springer.
[`arxiv link <https://arxiv.org/pdf/1306.0493.pdf>`_]
"""
tnet = process_input(tnet, ['C', 'G', 'TN'])[0]
numerator = np.sum(tnet[:, :, :-1] * tnet[:, :, 1:], axis=1)
denominator = np.sqrt(
np.sum(tnet[:, :, :-1], axis=1) * np.sum(tnet[:, :, 1:], axis=1))
topo_overlap = numerator / denominator
topo_overlap[np.isnan(topo_overlap)] = 0
if calc == 'time':
# Add missing timepoint as nan to end of time series
topo_overlap = np.hstack(
[topo_overlap, np.zeros([topo_overlap.shape[0], 1])*np.nan])
else:
topo_overlap = np.mean(topo_overlap, axis=1)
if calc == 'node':
pass
elif calc == 'global':
topo_overlap = np.mean(topo_overlap)
return topo_overlap | python | def topological_overlap(tnet, calc='time'):
r"""
Topological overlap quantifies the persistency of edges through time. If two consequtive time-points have similar edges, this becomes high (max 1). If there is high change, this becomes 0.
References: [topo-1]_, [topo-2]_
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Nettype: 'bu'.
calc: str
which version of topological overlap to calculate:
'node' - calculates for each node, averaging over time.
'time' - (default) calculates for each node per time points.
'global' - (default) calculates for each node per time points.
Returns
-------
topo_overlap : array
if calc = 'time', array is (node,time) in size.
if calc = 'node', array is (node) in size.
if calc = 'global', array is (1) in size. The final time point returns as nan.
Notes
------
When edges persist over time, the topological overlap increases. It can be calculated as a global valu, per node, per node-time.
When calc='time', then the topological overlap is:
.. math:: TopoOverlap_{i,t} = {\sum_j G_{i,j,t} G_{i,j,t+1} \over \sqrt{\sum_j G_{i,j,t} \sum_j G_{i,j,t+1}}}
When calc='node', then the topological overlap is the mean of math:`TopoOverlap_{i,t}`:
.. math:: AvgTopoOverlap_{i} = {1 \over T-1} \sum_t TopoOverlap_{i,t}
where T is the number of time-points. This is called the *average topological overlap*.
When calc='global', the *temporal-correlation coefficient* is calculated
.. math:: TempCorrCoeff = {1 \over N} \sum_i AvgTopoOverlap_i
where N is the number of nodes.
For all the three measures above, the value is between 0 and 1 where 0 entails "all edges changes" and 1 entails "no edges change".
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Then make an temporal network with 3 nodes and 4 time-points.
>>> G = np.zeros([3, 3, 3])
>>> i_ind = np.array([0, 0, 0, 0,])
>>> j_ind = np.array([1, 1, 1, 2,])
>>> t_ind = np.array([0, 1, 2, 2,])
>>> G[i_ind, j_ind, t_ind] = 1
>>> G = G + G.transpose([1,0,2]) # Make symmetric
Now the topological overlap can be calculated:
>>> topo_overlap = teneto.networkmeasures.topological_overlap(G)
This returns *topo_overlap* which is a (node,time) array. Looking above at how we defined G,
when t = 0, there is only the edge (0,1). When t = 1, this edge still remains. This means
topo_overlap should equal 1 for node 0 at t=0 and 0 for node 2:
>>> topo_overlap[0,0]
1.0
>>> topo_overlap[2,0]
0.0
At t=2, there is now also an edge between (0,2), this means node 0's topological overlap at t=1 decreases as
its edges have decreased in their persistency at the next time point (i.e. some change has occured). It equals ca. 0.71
>>> topo_overlap[0,1]
0.7071067811865475
If we want the average topological overlap, we simply add the calc argument to be 'node'.
>>> avg_topo_overlap = teneto.networkmeasures.topological_overlap(G, calc='node')
Now this is an array with a length of 3 (one per node).
>>> avg_topo_overlap
array([0.85355339, 1. , 0. ])
Here we see that node 1 had all its connections persist, node 2 had no connections persisting, and node 0 was in between.
To calculate the temporal correlation coefficient,
>>> temp_corr_coeff = teneto.networkmeasures.topological_overlap(G, calc='global')
This produces one value reflecting all of G
>>> temp_corr_coeff
0.617851130197758
References
----------
.. [topo-1] Tang et al (2010) Small-world behavior in time-varying graphs. Phys. Rev. E 81, 055101(R) [`arxiv link <https://arxiv.org/pdf/0909.1712.pdf>`_]
.. [topo-2] Nicosia et al (2013) "Graph Metrics for Temporal Networks" In: Holme P., Saramäki J. (eds) Temporal Networks. Understanding Complex Systems. Springer.
[`arxiv link <https://arxiv.org/pdf/1306.0493.pdf>`_]
"""
tnet = process_input(tnet, ['C', 'G', 'TN'])[0]
numerator = np.sum(tnet[:, :, :-1] * tnet[:, :, 1:], axis=1)
denominator = np.sqrt(
np.sum(tnet[:, :, :-1], axis=1) * np.sum(tnet[:, :, 1:], axis=1))
topo_overlap = numerator / denominator
topo_overlap[np.isnan(topo_overlap)] = 0
if calc == 'time':
# Add missing timepoint as nan to end of time series
topo_overlap = np.hstack(
[topo_overlap, np.zeros([topo_overlap.shape[0], 1])*np.nan])
else:
topo_overlap = np.mean(topo_overlap, axis=1)
if calc == 'node':
pass
elif calc == 'global':
topo_overlap = np.mean(topo_overlap)
return topo_overlap | r"""
Topological overlap quantifies the persistency of edges through time. If two consequtive time-points have similar edges, this becomes high (max 1). If there is high change, this becomes 0.
References: [topo-1]_, [topo-2]_
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Nettype: 'bu'.
calc: str
which version of topological overlap to calculate:
'node' - calculates for each node, averaging over time.
'time' - (default) calculates for each node per time points.
'global' - (default) calculates for each node per time points.
Returns
-------
topo_overlap : array
if calc = 'time', array is (node,time) in size.
if calc = 'node', array is (node) in size.
if calc = 'global', array is (1) in size. The final time point returns as nan.
Notes
------
When edges persist over time, the topological overlap increases. It can be calculated as a global valu, per node, per node-time.
When calc='time', then the topological overlap is:
.. math:: TopoOverlap_{i,t} = {\sum_j G_{i,j,t} G_{i,j,t+1} \over \sqrt{\sum_j G_{i,j,t} \sum_j G_{i,j,t+1}}}
When calc='node', then the topological overlap is the mean of math:`TopoOverlap_{i,t}`:
.. math:: AvgTopoOverlap_{i} = {1 \over T-1} \sum_t TopoOverlap_{i,t}
where T is the number of time-points. This is called the *average topological overlap*.
When calc='global', the *temporal-correlation coefficient* is calculated
.. math:: TempCorrCoeff = {1 \over N} \sum_i AvgTopoOverlap_i
where N is the number of nodes.
For all the three measures above, the value is between 0 and 1 where 0 entails "all edges changes" and 1 entails "no edges change".
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Then make an temporal network with 3 nodes and 4 time-points.
>>> G = np.zeros([3, 3, 3])
>>> i_ind = np.array([0, 0, 0, 0,])
>>> j_ind = np.array([1, 1, 1, 2,])
>>> t_ind = np.array([0, 1, 2, 2,])
>>> G[i_ind, j_ind, t_ind] = 1
>>> G = G + G.transpose([1,0,2]) # Make symmetric
Now the topological overlap can be calculated:
>>> topo_overlap = teneto.networkmeasures.topological_overlap(G)
This returns *topo_overlap* which is a (node,time) array. Looking above at how we defined G,
when t = 0, there is only the edge (0,1). When t = 1, this edge still remains. This means
topo_overlap should equal 1 for node 0 at t=0 and 0 for node 2:
>>> topo_overlap[0,0]
1.0
>>> topo_overlap[2,0]
0.0
At t=2, there is now also an edge between (0,2), this means node 0's topological overlap at t=1 decreases as
its edges have decreased in their persistency at the next time point (i.e. some change has occured). It equals ca. 0.71
>>> topo_overlap[0,1]
0.7071067811865475
If we want the average topological overlap, we simply add the calc argument to be 'node'.
>>> avg_topo_overlap = teneto.networkmeasures.topological_overlap(G, calc='node')
Now this is an array with a length of 3 (one per node).
>>> avg_topo_overlap
array([0.85355339, 1. , 0. ])
Here we see that node 1 had all its connections persist, node 2 had no connections persisting, and node 0 was in between.
To calculate the temporal correlation coefficient,
>>> temp_corr_coeff = teneto.networkmeasures.topological_overlap(G, calc='global')
This produces one value reflecting all of G
>>> temp_corr_coeff
0.617851130197758
References
----------
.. [topo-1] Tang et al (2010) Small-world behavior in time-varying graphs. Phys. Rev. E 81, 055101(R) [`arxiv link <https://arxiv.org/pdf/0909.1712.pdf>`_]
.. [topo-2] Nicosia et al (2013) "Graph Metrics for Temporal Networks" In: Holme P., Saramäki J. (eds) Temporal Networks. Understanding Complex Systems. Springer.
[`arxiv link <https://arxiv.org/pdf/1306.0493.pdf>`_] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/topological_overlap.py#L5-L137 |
wiheto/teneto | teneto/temporalcommunity/recruitment.py | recruitment | def recruitment(temporalcommunities, staticcommunities):
"""
Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Rcoeff = np.zeros(len(staticcommunities))
for i, statcom in enumerate(staticcommunities):
Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])
return Rcoeff | python | def recruitment(temporalcommunities, staticcommunities):
"""
Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Rcoeff = np.zeros(len(staticcommunities))
for i, statcom in enumerate(staticcommunities):
Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])
return Rcoeff | Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/recruitment.py#L5-L44 |
wiheto/teneto | teneto/plot/circle_plot.py | circle_plot | def circle_plot(netIn, ax, nodelabels=None, linestyle='k-', nodesize=1000, cmap='Set2'):
r'''
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show()
'''
# Get input type (C or G)
inputType = checkInput(netIn, conMat=1)
if nodelabels is None:
nodelabels = []
# Convert C representation to G
if inputType == 'M':
shape = np.shape(netIn)
edg = np.where(np.abs(netIn) > 0)
contacts = [tuple([edg[0][i], edg[1][i]])
for i in range(0, len(edg[0]))]
netIn = {}
netIn['contacts'] = contacts
netIn['netshape'] = shape
elif inputType == 'G':
netIn = graphlet2contact(netIn)
inputType = 'C'
if inputType == 'C':
edgeList = [tuple(np.array(e[0:2]) + e[2] * netIn['netshape'][0])
for e in netIn['contacts']]
elif inputType == 'M':
edgeList = netIn['contacts']
n = netIn['netshape'][0]
# Get positions of node on unit circle
posx = [math.cos((2 * math.pi * i) / n) for i in range(0, n)]
posy = [math.sin((2 * math.pi * i) / n) for i in range(0, n)]
# Get Bezier lines in a circle
cmap = cm.get_cmap(cmap)(np.linspace(0, 1, n))
for edge in edgeList:
bvx, bvy = bezier_circle(
(posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), 20)
ax.plot(bvx, bvy, linestyle, zorder=0)
for i in range(n):
ax.scatter(posx[i], posy[i], s=nodesize, c=cmap[i], zorder=1)
# Remove things that make plot unpretty
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
# make plot a square
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect((x1 - x0) / (y1 - y0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
return ax | python | def circle_plot(netIn, ax, nodelabels=None, linestyle='k-', nodesize=1000, cmap='Set2'):
r'''
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show()
'''
# Get input type (C or G)
inputType = checkInput(netIn, conMat=1)
if nodelabels is None:
nodelabels = []
# Convert C representation to G
if inputType == 'M':
shape = np.shape(netIn)
edg = np.where(np.abs(netIn) > 0)
contacts = [tuple([edg[0][i], edg[1][i]])
for i in range(0, len(edg[0]))]
netIn = {}
netIn['contacts'] = contacts
netIn['netshape'] = shape
elif inputType == 'G':
netIn = graphlet2contact(netIn)
inputType = 'C'
if inputType == 'C':
edgeList = [tuple(np.array(e[0:2]) + e[2] * netIn['netshape'][0])
for e in netIn['contacts']]
elif inputType == 'M':
edgeList = netIn['contacts']
n = netIn['netshape'][0]
# Get positions of node on unit circle
posx = [math.cos((2 * math.pi * i) / n) for i in range(0, n)]
posy = [math.sin((2 * math.pi * i) / n) for i in range(0, n)]
# Get Bezier lines in a circle
cmap = cm.get_cmap(cmap)(np.linspace(0, 1, n))
for edge in edgeList:
bvx, bvy = bezier_circle(
(posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), 20)
ax.plot(bvx, bvy, linestyle, zorder=0)
for i in range(n):
ax.scatter(posx[i], posy[i], s=nodesize, c=cmap[i], zorder=1)
# Remove things that make plot unpretty
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
# make plot a square
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect((x1 - x0) / (y1 - y0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
return ax | r'''
Function draws "circle plot" and exports axis handles
Parameters
-------------
netIn : temporal network input (graphlet or contact)
ax : matplotlib ax handles.
nodelabels : list
nodes labels. List of strings
linestyle : str
line style
nodesize : int
size of nodes
cmap : str
matplotlib colormap
Returns
-------
ax : axis handle
Example
-------
>>> import teneto
>>> import numpy
>>> import matplotlib.pyplot as plt
>>> G = np.zeros([6, 6])
>>> i = [0, 0, 0, 1, 2, 3, 4]
>>> j = [3, 4, 5, 5, 4, 5, 5]
>>> G[i, j] = 1
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.circle_plot(G, ax)
>>> fig.show()
.. plot::
import teneto
import numpy
import matplotlib.pyplot as plt
G = np.zeros([6, 6])
i = [0, 0, 0, 1, 2, 3, 4]
j = [3, 4, 5, 5, 4, 5, 5]
G[i, j] = 1
fig, ax = plt.subplots(1)
teneto.plot.circle_plot(G, ax)
fig.show() | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/plot/circle_plot.py#L9-L108 |
wiheto/teneto | teneto/temporalcommunity/integration.py | integration | def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | python | def integration(temporalcommunities, staticcommunities):
"""
Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533.
"""
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/integration.py#L5-L45 |
wiheto/teneto | teneto/networkmeasures/intercontacttimes.py | intercontacttimes | def intercontacttimes(tnet):
"""
Calculates the intercontacttimes of each edge in a network.
Parameters
-----------
tnet : array, dict
Temporal network (craphlet or contact). Nettype: 'bu', 'bd'
Returns
---------
contacts : dict
Intercontact times as numpy array in dictionary. contacts['intercontacttimes']
Notes
------
The inter-contact times is calculated by the time between consequecutive "active" edges (where active means
that the value is 1 in a binary network).
Examples
--------
This example goes through how inter-contact times are calculated.
>>> import teneto
>>> import numpy as np
Make a network with 2 nodes and 4 time-points with 4 edges spaced out.
>>> G = np.zeros([2,2,10])
>>> edge_on = [1,3,5,9]
>>> G[0,1,edge_on] = 1
The network visualised below make it clear what the inter-contact times are between the two nodes:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
G = np.zeros([2,2,10])
edge_on = [1,3,5,9]
G[0,1,edge_on] = 1
fig, ax = plt.subplots(1, figsize=(4,2))
teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2')
ax.set_ylim(-0.25, 1.25)
plt.tight_layout()
fig.show()
Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1.
>>> ict = teneto.networkmeasures.intercontacttimes(G)
The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN.
So the icts between nodes 0 and 1 are found by:
>>> ict['intercontacttimes'][0,1]
array([2, 2, 4])
"""
# Process input
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
print('WARNING: assuming connections to be binary when computing intercontacttimes')
# Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:].
# Then discard the noninformative ones (done automatically)
# Finally return back as np array
contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0])
if tnet.nettype[1] == 'u':
for i in range(0, tnet.netshape[0]):
for j in range(i + 1, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
contacts[j, i] = np.array(edge_on_diff)
else:
contacts[i, j] = []
contacts[j, i] = []
elif tnet.nettype[1] == 'd':
for i in range(0, tnet.netshape[0]):
for j in range(0, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
else:
contacts[i, j] = []
out = {}
out['intercontacttimes'] = contacts
out['nettype'] = tnet.nettype
return out | python | def intercontacttimes(tnet):
"""
Calculates the intercontacttimes of each edge in a network.
Parameters
-----------
tnet : array, dict
Temporal network (craphlet or contact). Nettype: 'bu', 'bd'
Returns
---------
contacts : dict
Intercontact times as numpy array in dictionary. contacts['intercontacttimes']
Notes
------
The inter-contact times is calculated by the time between consequecutive "active" edges (where active means
that the value is 1 in a binary network).
Examples
--------
This example goes through how inter-contact times are calculated.
>>> import teneto
>>> import numpy as np
Make a network with 2 nodes and 4 time-points with 4 edges spaced out.
>>> G = np.zeros([2,2,10])
>>> edge_on = [1,3,5,9]
>>> G[0,1,edge_on] = 1
The network visualised below make it clear what the inter-contact times are between the two nodes:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
G = np.zeros([2,2,10])
edge_on = [1,3,5,9]
G[0,1,edge_on] = 1
fig, ax = plt.subplots(1, figsize=(4,2))
teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2')
ax.set_ylim(-0.25, 1.25)
plt.tight_layout()
fig.show()
Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1.
>>> ict = teneto.networkmeasures.intercontacttimes(G)
The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN.
So the icts between nodes 0 and 1 are found by:
>>> ict['intercontacttimes'][0,1]
array([2, 2, 4])
"""
# Process input
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
print('WARNING: assuming connections to be binary when computing intercontacttimes')
# Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:].
# Then discard the noninformative ones (done automatically)
# Finally return back as np array
contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0])
if tnet.nettype[1] == 'u':
for i in range(0, tnet.netshape[0]):
for j in range(i + 1, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
contacts[j, i] = np.array(edge_on_diff)
else:
contacts[i, j] = []
contacts[j, i] = []
elif tnet.nettype[1] == 'd':
for i in range(0, tnet.netshape[0]):
for j in range(0, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
else:
contacts[i, j] = []
out = {}
out['intercontacttimes'] = contacts
out['nettype'] = tnet.nettype
return out | Calculates the intercontacttimes of each edge in a network.
Parameters
-----------
tnet : array, dict
Temporal network (craphlet or contact). Nettype: 'bu', 'bd'
Returns
---------
contacts : dict
Intercontact times as numpy array in dictionary. contacts['intercontacttimes']
Notes
------
The inter-contact times is calculated by the time between consequecutive "active" edges (where active means
that the value is 1 in a binary network).
Examples
--------
This example goes through how inter-contact times are calculated.
>>> import teneto
>>> import numpy as np
Make a network with 2 nodes and 4 time-points with 4 edges spaced out.
>>> G = np.zeros([2,2,10])
>>> edge_on = [1,3,5,9]
>>> G[0,1,edge_on] = 1
The network visualised below make it clear what the inter-contact times are between the two nodes:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
G = np.zeros([2,2,10])
edge_on = [1,3,5,9]
G[0,1,edge_on] = 1
fig, ax = plt.subplots(1, figsize=(4,2))
teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2')
ax.set_ylim(-0.25, 1.25)
plt.tight_layout()
fig.show()
Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1.
>>> ict = teneto.networkmeasures.intercontacttimes(G)
The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN.
So the icts between nodes 0 and 1 are found by:
>>> ict['intercontacttimes'][0,1]
array([2, 2, 4]) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/intercontacttimes.py#L9-L108 |
wiheto/teneto | teneto/timeseries/report.py | gen_report | def gen_report(report, sdir='./', report_name='report.html'):
"""
Generates report of derivation and postprocess steps in teneto.derive
"""
# Create report directory
if not os.path.exists(sdir):
os.makedirs(sdir)
# Add a slash to file directory if not included to avoid DirNameFleName
# instead of DirName/FileName being creaated
if sdir[-1] != '/':
sdir += '/'
report_html = '<html><body>'
if 'method' in report.keys():
report_html += "<h1>Method: " + report['method'] + "</h1><p>"
for i in report[report['method']]:
if i == 'taper_window':
fig, ax = plt.subplots(1)
ax.plot(report[report['method']]['taper_window'],
report[report['method']]['taper'])
ax.set_xlabel('Window (time). 0 in middle of window.')
ax.set_title(
'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).')
fig.savefig(sdir + 'taper.png')
report_html += "<img src='./taper.png' width=500>" + "<p>"
else:
report_html += "- <b>" + i + "</b>: " + \
str(report[report['method']][i]) + "<br>"
if 'postprocess' in report.keys():
report_html += "<p><h2>Postprocessing:</h2><p>"
report_html += "<b>Pipeline: </b>"
for i in report['postprocess']:
report_html += " " + i + ","
for i in report['postprocess']:
report_html += "<p><h3>" + i + "</h3><p>"
for j in report[i]:
if j == 'lambda':
report_html += "- <b>" + j + "</b>: " + "<br>"
lambda_val = np.array(report['boxcox']['lambda'])
fig, ax = plt.subplots(1)
ax.hist(lambda_val[:, -1])
ax.set_xlabel('lambda')
ax.set_ylabel('frequency')
ax.set_title('Histogram of lambda parameter')
fig.savefig(sdir + 'boxcox_lambda.png')
report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>"
report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>"
np.savetxt(sdir + "boxcox_lambda.csv",
lambda_val, delimiter=",")
else:
report_html += "- <b>" + j + "</b>: " + \
str(report[i][j]) + "<br>"
report_html += '</body></html>'
with open(sdir + report_name, 'w') as file:
file.write(report_html)
file.close() | python | def gen_report(report, sdir='./', report_name='report.html'):
"""
Generates report of derivation and postprocess steps in teneto.derive
"""
# Create report directory
if not os.path.exists(sdir):
os.makedirs(sdir)
# Add a slash to file directory if not included to avoid DirNameFleName
# instead of DirName/FileName being creaated
if sdir[-1] != '/':
sdir += '/'
report_html = '<html><body>'
if 'method' in report.keys():
report_html += "<h1>Method: " + report['method'] + "</h1><p>"
for i in report[report['method']]:
if i == 'taper_window':
fig, ax = plt.subplots(1)
ax.plot(report[report['method']]['taper_window'],
report[report['method']]['taper'])
ax.set_xlabel('Window (time). 0 in middle of window.')
ax.set_title(
'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).')
fig.savefig(sdir + 'taper.png')
report_html += "<img src='./taper.png' width=500>" + "<p>"
else:
report_html += "- <b>" + i + "</b>: " + \
str(report[report['method']][i]) + "<br>"
if 'postprocess' in report.keys():
report_html += "<p><h2>Postprocessing:</h2><p>"
report_html += "<b>Pipeline: </b>"
for i in report['postprocess']:
report_html += " " + i + ","
for i in report['postprocess']:
report_html += "<p><h3>" + i + "</h3><p>"
for j in report[i]:
if j == 'lambda':
report_html += "- <b>" + j + "</b>: " + "<br>"
lambda_val = np.array(report['boxcox']['lambda'])
fig, ax = plt.subplots(1)
ax.hist(lambda_val[:, -1])
ax.set_xlabel('lambda')
ax.set_ylabel('frequency')
ax.set_title('Histogram of lambda parameter')
fig.savefig(sdir + 'boxcox_lambda.png')
report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>"
report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>"
np.savetxt(sdir + "boxcox_lambda.csv",
lambda_val, delimiter=",")
else:
report_html += "- <b>" + j + "</b>: " + \
str(report[i][j]) + "<br>"
report_html += '</body></html>'
with open(sdir + report_name, 'w') as file:
file.write(report_html)
file.close() | Generates report of derivation and postprocess steps in teneto.derive | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/timeseries/report.py#L10-L92 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.add_history | def add_history(self, fname, fargs, init=0):
"""
Adds a processing step to TenetoBIDS.history.
"""
if init == 1:
self.history = []
self.history.append([fname, fargs]) | python | def add_history(self, fname, fargs, init=0):
"""
Adds a processing step to TenetoBIDS.history.
"""
if init == 1:
self.history = []
self.history.append([fname, fargs]) | Adds a processing step to TenetoBIDS.history. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L129-L135 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.export_history | def export_history(self, dirname):
"""
Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname
Parameters
---------
dirname : str
directory to export entire TenetoBIDS history.
"""
mods = [(m.__name__, m.__version__)
for m in sys.modules.values() if m if hasattr(m, '__version__')]
with open(dirname + '/requirements.txt', 'w') as f:
for m in mods:
m = list(m)
if not isinstance(m[1], str):
m[1] = m[1].decode("utf-8")
f.writelines(m[0] + ' == ' + m[1] + '\n')
with open(dirname + '/TenetoBIDShistory.py', 'w') as f:
f.writelines('import teneto\n')
for func, args in self.history:
f.writelines(func + '(**' + str(args) + ')\n')
with open(dirname + '/tenetoinfo.json', 'w') as f:
json.dump(self.tenetoinfo, f) | python | def export_history(self, dirname):
"""
Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname
Parameters
---------
dirname : str
directory to export entire TenetoBIDS history.
"""
mods = [(m.__name__, m.__version__)
for m in sys.modules.values() if m if hasattr(m, '__version__')]
with open(dirname + '/requirements.txt', 'w') as f:
for m in mods:
m = list(m)
if not isinstance(m[1], str):
m[1] = m[1].decode("utf-8")
f.writelines(m[0] + ' == ' + m[1] + '\n')
with open(dirname + '/TenetoBIDShistory.py', 'w') as f:
f.writelines('import teneto\n')
for func, args in self.history:
f.writelines(func + '(**' + str(args) + ')\n')
with open(dirname + '/tenetoinfo.json', 'w') as f:
json.dump(self.tenetoinfo, f) | Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname
Parameters
---------
dirname : str
directory to export entire TenetoBIDS history. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L137-L162 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.derive_temporalnetwork | def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True):
"""
Derive time-varying connectivity on the selected files.
Parameters
----------
params : dict.
See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons)
update_pipeline : bool
If true, the object updates the selected files with those derived here.
njobs : int
How many parallel jobs to run
confound_corr_report : bool
If true, histograms and summary statistics of TVC and confounds are plotted in a report directory.
tag : str
any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]'
Returns
-------
dfc : files
saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
confound_files = self.get_selected_files(quiet=1, pipeline='confound')
if confound_files:
confounds_exist = True
else:
confounds_exist = False
if not confound_corr_report:
confounds_exist = False
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params,
confounds_exist, confound_files) for i, f in enumerate(files) if f}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline = self.pipeline
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('tvc')
self.set_bids_suffix('tvcconn') | python | def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True):
"""
Derive time-varying connectivity on the selected files.
Parameters
----------
params : dict.
See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons)
update_pipeline : bool
If true, the object updates the selected files with those derived here.
njobs : int
How many parallel jobs to run
confound_corr_report : bool
If true, histograms and summary statistics of TVC and confounds are plotted in a report directory.
tag : str
any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]'
Returns
-------
dfc : files
saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
confound_files = self.get_selected_files(quiet=1, pipeline='confound')
if confound_files:
confounds_exist = True
else:
confounds_exist = False
if not confound_corr_report:
confounds_exist = False
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params,
confounds_exist, confound_files) for i, f in enumerate(files) if f}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline = self.pipeline
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('tvc')
self.set_bids_suffix('tvcconn') | Derive time-varying connectivity on the selected files.
Parameters
----------
params : dict.
See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons)
update_pipeline : bool
If true, the object updates the selected files with those derived here.
njobs : int
How many parallel jobs to run
confound_corr_report : bool
If true, histograms and summary statistics of TVC and confounds are plotted in a report directory.
tag : str
any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]'
Returns
-------
dfc : files
saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L164-L219 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS._derive_temporalnetwork | def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
"""
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
"""
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, 'tvc', 'tvcconn')
if 'weight-var' in params.keys():
if params['weight-var'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-var'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
if 'weight-mean' in params.keys():
if params['weight-mean'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-mean'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
params['report'] = 'yes'
params['report_path'] = save_dir + '/report/'
params['report_filename'] = save_name + '_derivationreport.html'
if not os.path.exists(params['report_path']):
os.makedirs(params['report_path'])
if 'dimord' not in params:
params['dimord'] = 'time,node'
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype='wu')
dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t')
sidecar = get_sidecar(f)
sidecar['tvc'] = params
if 'weight-var' in sidecar['tvc']:
sidecar['tvc']['weight-var'] = True
sidecar['tvc']['fc source'] = fc_files
if 'weight-mean' in sidecar['tvc']:
sidecar['tvc']['weight-mean'] = True
sidecar['tvc']['fc source'] = fc_files
sidecar['tvc']['inputfile'] = f
sidecar['tvc']['description'] = 'Time varying connectivity information.'
with open(save_dir + save_name + '.json', 'w') as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step = 'tvc-derive'
df = pd.read_csv(confound_files[i], sep='\t')
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
# If windowed, prune df so that it matches with dfc_df
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params['report_path'] + \
'/' + save_name + '_confoundcorr/'
confound_report_figdir = confound_report_dir + 'figures/'
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report = '<html><body>'
report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>'
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + '.png')
plt.close(fig)
report += '<h2>' + c + '</h2>'
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) + ': '
report += str(r) + '<br>'
report += 'Distribution of corrlation values:'
report += '<img src=' + \
os.path.abspath(confound_report_figdir) + \
'/' + c + '.png><br><br>'
report += '</body></html>'
with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file:
file.write(report) | python | def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
"""
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
"""
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, 'tvc', 'tvcconn')
if 'weight-var' in params.keys():
if params['weight-var'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-var'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
if 'weight-mean' in params.keys():
if params['weight-mean'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-mean'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
params['report'] = 'yes'
params['report_path'] = save_dir + '/report/'
params['report_filename'] = save_name + '_derivationreport.html'
if not os.path.exists(params['report_path']):
os.makedirs(params['report_path'])
if 'dimord' not in params:
params['dimord'] = 'time,node'
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype='wu')
dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t')
sidecar = get_sidecar(f)
sidecar['tvc'] = params
if 'weight-var' in sidecar['tvc']:
sidecar['tvc']['weight-var'] = True
sidecar['tvc']['fc source'] = fc_files
if 'weight-mean' in sidecar['tvc']:
sidecar['tvc']['weight-mean'] = True
sidecar['tvc']['fc source'] = fc_files
sidecar['tvc']['inputfile'] = f
sidecar['tvc']['description'] = 'Time varying connectivity information.'
with open(save_dir + save_name + '.json', 'w') as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step = 'tvc-derive'
df = pd.read_csv(confound_files[i], sep='\t')
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
# If windowed, prune df so that it matches with dfc_df
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params['report_path'] + \
'/' + save_name + '_confoundcorr/'
confound_report_figdir = confound_report_dir + 'figures/'
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report = '<html><body>'
report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>'
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + '.png')
plt.close(fig)
report += '<h2>' + c + '</h2>'
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) + ': '
report += str(r) + '<br>'
report += 'Distribution of corrlation values:'
report += '<img src=' + \
os.path.abspath(confound_report_figdir) + \
'/' + c + '.png><br><br>'
report += '</body></html>'
with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file:
file.write(report) | Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L221-L320 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.make_functional_connectivity | def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None):
"""
Makes connectivity matrix for each of the subjects.
Parameters
----------
returngroup : bool, default=False
If true, returns the group average connectivity matrix.
njobs : int
How many parallel jobs to run
file_idx : bool
Default False, true if to ignore index column in loaded file.
file_hdr : bool
Default False, true if to ignore header row in loaded file.
Returns
-------
Saves data in derivatives/teneto_<version>/.../fc/
R_group : array
if returngroup is true, the average connectivity matrix is returned.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
R_group = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files}
for j in as_completed(job):
R_group.append(j.result())
if returngroup:
# Fisher tranform -> mean -> inverse fisher tranform
R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0))
return np.array(R_group) | python | def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None):
"""
Makes connectivity matrix for each of the subjects.
Parameters
----------
returngroup : bool, default=False
If true, returns the group average connectivity matrix.
njobs : int
How many parallel jobs to run
file_idx : bool
Default False, true if to ignore index column in loaded file.
file_hdr : bool
Default False, true if to ignore header row in loaded file.
Returns
-------
Saves data in derivatives/teneto_<version>/.../fc/
R_group : array
if returngroup is true, the average connectivity matrix is returned.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
R_group = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files}
for j in as_completed(job):
R_group.append(j.result())
if returngroup:
# Fisher tranform -> mean -> inverse fisher tranform
R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0))
return np.array(R_group) | Makes connectivity matrix for each of the subjects.
Parameters
----------
returngroup : bool, default=False
If true, returns the group average connectivity matrix.
njobs : int
How many parallel jobs to run
file_idx : bool
Default False, true if to ignore index column in loaded file.
file_hdr : bool
Default False, true if to ignore header row in loaded file.
Returns
-------
Saves data in derivatives/teneto_<version>/.../fc/
R_group : array
if returngroup is true, the average connectivity matrix is returned. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L360-L398 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS._save_namepaths_bids_derivatives | def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None):
"""
Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/])
"""
file_name = f.split('/')[-1].split('.')[0]
if tag != '':
tag = '_' + tag
if suffix:
file_name, _ = drop_bids_suffix(file_name)
save_name = file_name + tag
save_name += '_' + suffix
else:
save_name = file_name + tag
paths_post_pipeline = f.split(self.pipeline)
if self.pipeline_subdir:
paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[
0]
else:
paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0]
base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \
teneto.__version__ + '/' + paths_post_pipeline + '/'
save_dir = base_dir + '/' + save_directory + '/'
if not os.path.exists(save_dir):
# A case has happened where this has been done in parallel and an error was raised. So do try/except
try:
os.makedirs(save_dir)
except:
# Wait 2 seconds so that the error does not try and save something in the directory before it is created
time.sleep(2)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'):
try:
with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs:
json.dump(self.tenetoinfo, fs)
except:
# Same as above, just in case parallel does duplicaiton
time.sleep(2)
return save_name, save_dir, base_dir | python | def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None):
"""
Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/])
"""
file_name = f.split('/')[-1].split('.')[0]
if tag != '':
tag = '_' + tag
if suffix:
file_name, _ = drop_bids_suffix(file_name)
save_name = file_name + tag
save_name += '_' + suffix
else:
save_name = file_name + tag
paths_post_pipeline = f.split(self.pipeline)
if self.pipeline_subdir:
paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[
0]
else:
paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0]
base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \
teneto.__version__ + '/' + paths_post_pipeline + '/'
save_dir = base_dir + '/' + save_directory + '/'
if not os.path.exists(save_dir):
# A case has happened where this has been done in parallel and an error was raised. So do try/except
try:
os.makedirs(save_dir)
except:
# Wait 2 seconds so that the error does not try and save something in the directory before it is created
time.sleep(2)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'):
try:
with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs:
json.dump(self.tenetoinfo, fs)
except:
# Same as above, just in case parallel does duplicaiton
time.sleep(2)
return save_name, save_dir, base_dir | Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L409-L466 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.get_tags | def get_tags(self, tag, quiet=1):
"""
Returns which tag alternatives can be identified in the BIDS derivatives structure.
"""
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives(quiet)
else:
if tag == 'sub':
datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/'
tag_alternatives = [
f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f]
elif tag == 'ses':
tag_alternatives = []
for sub in self.bids_tags['sub']:
tag_alternatives += [f.split('ses-')[1] for f in os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f]
tag_alternatives = set(tag_alternatives)
else:
files = self.get_selected_files(quiet=1)
tag_alternatives = []
for f in files:
f = f.split('.')[0]
f = f.split('/')[-1]
tag_alternatives += [t.split('-')[1]
for t in f.split('_') if t.split('-')[0] == tag]
tag_alternatives = set(tag_alternatives)
if quiet == 0:
print(tag + ' alternatives: ' + ', '.join(tag_alternatives))
return list(tag_alternatives) | python | def get_tags(self, tag, quiet=1):
"""
Returns which tag alternatives can be identified in the BIDS derivatives structure.
"""
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives(quiet)
else:
if tag == 'sub':
datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/'
tag_alternatives = [
f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f]
elif tag == 'ses':
tag_alternatives = []
for sub in self.bids_tags['sub']:
tag_alternatives += [f.split('ses-')[1] for f in os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f]
tag_alternatives = set(tag_alternatives)
else:
files = self.get_selected_files(quiet=1)
tag_alternatives = []
for f in files:
f = f.split('.')[0]
f = f.split('/')[-1]
tag_alternatives += [t.split('-')[1]
for t in f.split('_') if t.split('-')[0] == tag]
tag_alternatives = set(tag_alternatives)
if quiet == 0:
print(tag + ' alternatives: ' + ', '.join(tag_alternatives))
return list(tag_alternatives) | Returns which tag alternatives can be identified in the BIDS derivatives structure. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L468-L497 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.get_pipeline_alternatives | def get_pipeline_alternatives(self, quiet=0):
"""
The pipeline are the different outputs that are placed in the ./derivatives directory.
get_pipeline_alternatives gets those which are found in the specified BIDS directory structure.
"""
if not os.path.exists(self.BIDS_dir + '/derivatives/'):
print('Derivative directory not found. Is the data preprocessed?')
else:
pipeline_alternatives = os.listdir(self.BIDS_dir + '/derivatives/')
if quiet == 0:
print('Derivative alternatives: ' +
', '.join(pipeline_alternatives))
return list(pipeline_alternatives) | python | def get_pipeline_alternatives(self, quiet=0):
"""
The pipeline are the different outputs that are placed in the ./derivatives directory.
get_pipeline_alternatives gets those which are found in the specified BIDS directory structure.
"""
if not os.path.exists(self.BIDS_dir + '/derivatives/'):
print('Derivative directory not found. Is the data preprocessed?')
else:
pipeline_alternatives = os.listdir(self.BIDS_dir + '/derivatives/')
if quiet == 0:
print('Derivative alternatives: ' +
', '.join(pipeline_alternatives))
return list(pipeline_alternatives) | The pipeline are the different outputs that are placed in the ./derivatives directory.
get_pipeline_alternatives gets those which are found in the specified BIDS directory structure. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L499-L512 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.get_pipeline_subdir_alternatives | def get_pipeline_subdir_alternatives(self, quiet=0):
"""
Note
-----
This function currently returns the wrong folders and will be fixed in the future.
This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir
But it does not care about ses-yy at the moment.
"""
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives()
else:
pipeline_subdir_alternatives = []
for s in self.bids_tags['sub']:
derdir_files = os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/')
pipeline_subdir_alternatives += [
f for f in derdir_files if os.path.isdir(self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/' + f)]
pipeline_subdir_alternatives = set(pipeline_subdir_alternatives)
if quiet == 0:
print('Pipeline_subdir alternatives: ' +
', '.join(pipeline_subdir_alternatives))
return list(pipeline_subdir_alternatives) | python | def get_pipeline_subdir_alternatives(self, quiet=0):
"""
Note
-----
This function currently returns the wrong folders and will be fixed in the future.
This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir
But it does not care about ses-yy at the moment.
"""
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives()
else:
pipeline_subdir_alternatives = []
for s in self.bids_tags['sub']:
derdir_files = os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/')
pipeline_subdir_alternatives += [
f for f in derdir_files if os.path.isdir(self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/' + f)]
pipeline_subdir_alternatives = set(pipeline_subdir_alternatives)
if quiet == 0:
print('Pipeline_subdir alternatives: ' +
', '.join(pipeline_subdir_alternatives))
return list(pipeline_subdir_alternatives) | Note
-----
This function currently returns the wrong folders and will be fixed in the future.
This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir
But it does not care about ses-yy at the moment. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L514-L538 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.get_selected_files | def get_selected_files(self, pipeline='pipeline', forfile=None, quiet=0, allowedfileformats='default'):
"""
Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function.
"""
# This could be mnade better
file_dict = dict(self.bids_tags)
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
if forfile:
if isinstance(forfile, str):
forfile = get_bids_tag(forfile, 'all')
for n in forfile.keys():
file_dict[n] = [forfile[n]]
non_entries = []
for n in file_dict:
if not file_dict[n]:
non_entries.append(n)
for n in non_entries:
file_dict.pop(n)
# Only keep none empty elemenets
file_components = []
for k in ['sub', 'ses', 'task', 'run']:
if k in file_dict:
file_components.append([k + '-' + t for t in file_dict[k]])
file_list = list(itertools.product(*file_components))
# Specify main directory
if pipeline == 'pipeline':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'confound' and self.confound_pipeline:
mdir = self.BIDS_dir + '/derivatives/' + self.confound_pipeline
elif pipeline == 'confound':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'functionalconnectivity':
mdir = self.BIDS_dir + '/derivatives/teneto_' + teneto.__version__
else:
raise ValueError('unknown request')
found_files = []
for f in file_list:
wdir = str(mdir)
sub = [t for t in f if t.startswith('sub')]
ses = [t for t in f if t.startswith('ses')]
wdir += '/' + sub[0] + '/'
if ses:
wdir += '/' + ses[0] + '/'
wdir += '/func/'
if pipeline == 'pipeline':
wdir += '/' + self.pipeline_subdir + '/'
fileending = [self.bids_suffix +
f for f in allowedfileformats]
elif pipeline == 'functionalconnectivity':
wdir += '/fc/'
fileending = ['conn' + f for f in allowedfileformats]
elif pipeline == 'confound':
fileending = ['confounds' + f for f in allowedfileformats]
if os.path.exists(wdir):
# make filenames
found = []
# Check that the tags are in the specified bids tags
for ff in os.listdir(wdir):
ftags = get_bids_tag(ff, 'all')
t = [t for t in ftags if t in file_dict and ftags[t]
in file_dict[t]]
if len(t) == len(file_dict):
found.append(ff)
found = [f for f in found for e in fileending if f.endswith(e)]
# Include only if all analysis step tags are present
# Exclude if confounds tag is present
if pipeline == 'confound':
found = [i for i in found if '_confounds' in i]
else:
found = [i for i in found if '_confounds' not in i]
# Make full paths
found = list(
map(str.__add__, [re.sub('/+', '/', wdir)]*len(found), found))
# Remove any files in bad files (could add json subcar reading here)
found = [i for i in found if not any(
[bf in i for bf in self.bad_files])]
if found:
found_files += found
if quiet == -1:
print(wdir)
found_files = list(set(found_files))
if quiet == 0:
print(found_files)
return found_files | python | def get_selected_files(self, pipeline='pipeline', forfile=None, quiet=0, allowedfileformats='default'):
"""
Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function.
"""
# This could be mnade better
file_dict = dict(self.bids_tags)
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
if forfile:
if isinstance(forfile, str):
forfile = get_bids_tag(forfile, 'all')
for n in forfile.keys():
file_dict[n] = [forfile[n]]
non_entries = []
for n in file_dict:
if not file_dict[n]:
non_entries.append(n)
for n in non_entries:
file_dict.pop(n)
# Only keep none empty elemenets
file_components = []
for k in ['sub', 'ses', 'task', 'run']:
if k in file_dict:
file_components.append([k + '-' + t for t in file_dict[k]])
file_list = list(itertools.product(*file_components))
# Specify main directory
if pipeline == 'pipeline':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'confound' and self.confound_pipeline:
mdir = self.BIDS_dir + '/derivatives/' + self.confound_pipeline
elif pipeline == 'confound':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'functionalconnectivity':
mdir = self.BIDS_dir + '/derivatives/teneto_' + teneto.__version__
else:
raise ValueError('unknown request')
found_files = []
for f in file_list:
wdir = str(mdir)
sub = [t for t in f if t.startswith('sub')]
ses = [t for t in f if t.startswith('ses')]
wdir += '/' + sub[0] + '/'
if ses:
wdir += '/' + ses[0] + '/'
wdir += '/func/'
if pipeline == 'pipeline':
wdir += '/' + self.pipeline_subdir + '/'
fileending = [self.bids_suffix +
f for f in allowedfileformats]
elif pipeline == 'functionalconnectivity':
wdir += '/fc/'
fileending = ['conn' + f for f in allowedfileformats]
elif pipeline == 'confound':
fileending = ['confounds' + f for f in allowedfileformats]
if os.path.exists(wdir):
# make filenames
found = []
# Check that the tags are in the specified bids tags
for ff in os.listdir(wdir):
ftags = get_bids_tag(ff, 'all')
t = [t for t in ftags if t in file_dict and ftags[t]
in file_dict[t]]
if len(t) == len(file_dict):
found.append(ff)
found = [f for f in found for e in fileending if f.endswith(e)]
# Include only if all analysis step tags are present
# Exclude if confounds tag is present
if pipeline == 'confound':
found = [i for i in found if '_confounds' in i]
else:
found = [i for i in found if '_confounds' not in i]
# Make full paths
found = list(
map(str.__add__, [re.sub('/+', '/', wdir)]*len(found), found))
# Remove any files in bad files (could add json subcar reading here)
found = [i for i in found if not any(
[bf in i for bf in self.bad_files])]
if found:
found_files += found
if quiet == -1:
print(wdir)
found_files = list(set(found_files))
if quiet == 0:
print(found_files)
return found_files | Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L540-L648 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.set_exclusion_file | def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'):
"""
Excludes subjects given a certain exclusion criteria.
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
confound_stat : str or list
Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.).
Returns
--------
calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if isinstance(confound, str):
confound = [confound]
if isinstance(exclusion_criteria, str):
exclusion_criteria = [exclusion_criteria]
if isinstance(confound_stat, str):
confound_stat = [confound_stat]
if len(exclusion_criteria) != len(confound):
raise ValueError(
'Same number of confound names and exclusion criteria must be given')
if len(confound_stat) != len(confound):
raise ValueError(
'Same number of confound names and confound stats must be given')
relex, crit = process_exclusion_criteria(exclusion_criteria)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
bad_files = []
bs = 0
foundconfound = []
foundreason = []
for s, cfile in enumerate(confound_files):
df = load_tabular_file(cfile, index_col=None)
found_bad_subject = False
for i, _ in enumerate(confound):
if confound_stat[i] == 'median':
if relex[i](df[confound[i]].median(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'mean':
if relex[i](df[confound[i]].mean(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'std':
if relex[i](df[i][confound[i]].std(), crit[i]):
found_bad_subject = True
if found_bad_subject:
foundconfound.append(confound[i])
foundreason.append(exclusion_criteria[i])
if found_bad_subject:
bad_files.append(files[s])
bs += 1
self.set_bad_files(
bad_files, reason='excluded file (confound over specfied stat threshold)')
for i, f in enumerate(bad_files):
sidecar = get_sidecar(f)
sidecar['file_exclusion'] = {}
sidecar['confound'] = foundconfound[i]
sidecar['threshold'] = foundreason[i]
for af in ['.tsv', '.nii.gz']:
f = f.split(af)[0]
f += '.json'
with open(f, 'w') as fs:
json.dump(sidecar, fs)
print('Removed ' + str(bs) + ' files from inclusion.') | python | def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'):
"""
Excludes subjects given a certain exclusion criteria.
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
confound_stat : str or list
Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.).
Returns
--------
calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if isinstance(confound, str):
confound = [confound]
if isinstance(exclusion_criteria, str):
exclusion_criteria = [exclusion_criteria]
if isinstance(confound_stat, str):
confound_stat = [confound_stat]
if len(exclusion_criteria) != len(confound):
raise ValueError(
'Same number of confound names and exclusion criteria must be given')
if len(confound_stat) != len(confound):
raise ValueError(
'Same number of confound names and confound stats must be given')
relex, crit = process_exclusion_criteria(exclusion_criteria)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
bad_files = []
bs = 0
foundconfound = []
foundreason = []
for s, cfile in enumerate(confound_files):
df = load_tabular_file(cfile, index_col=None)
found_bad_subject = False
for i, _ in enumerate(confound):
if confound_stat[i] == 'median':
if relex[i](df[confound[i]].median(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'mean':
if relex[i](df[confound[i]].mean(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'std':
if relex[i](df[i][confound[i]].std(), crit[i]):
found_bad_subject = True
if found_bad_subject:
foundconfound.append(confound[i])
foundreason.append(exclusion_criteria[i])
if found_bad_subject:
bad_files.append(files[s])
bs += 1
self.set_bad_files(
bad_files, reason='excluded file (confound over specfied stat threshold)')
for i, f in enumerate(bad_files):
sidecar = get_sidecar(f)
sidecar['file_exclusion'] = {}
sidecar['confound'] = foundconfound[i]
sidecar['threshold'] = foundreason[i]
for af in ['.tsv', '.nii.gz']:
f = f.split(af)[0]
f += '.json'
with open(f, 'w') as fs:
json.dump(sidecar, fs)
print('Removed ' + str(bs) + ' files from inclusion.') | Excludes subjects given a certain exclusion criteria.
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
confound_stat : str or list
Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.).
Returns
--------
calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L650-L719 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.set_exclusion_timepoint | def set_exclusion_timepoint(self, confound, exclusion_criteria, replace_with, tol=1, overwrite=True, desc=None):
"""
Excludes subjects given a certain exclusion criteria. Does not work on nifti files, only csv, numpy or tsc. Assumes data is node,time
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files. Assumes data is node,time
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
replace_with : str
Can be 'nan' (bad values become nans) or 'cubicspline' (bad values are interpolated). If bad value occurs at 0 or -1 index, then these values are kept and no interpolation occurs.
tol : float
Tolerance of exlcuded time-points allowed before becoming a bad subject.
overwrite : bool (default=True)
If true, if their are files in the teneto derivatives directory with the same name, these will be overwritten with this step.
The json sidecar is updated with the new information about the file.
desc : str
String to add desc tag to filenames if overwrite is set to true.
Returns
------
Loads the TenetoBIDS.selected_files and replaces any instances of confound meeting the exclusion_criteria with replace_with.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if isinstance(confound, str):
confound = [confound]
if isinstance(exclusion_criteria, str):
exclusion_criteria = [exclusion_criteria]
if len(exclusion_criteria) != len(confound):
raise ValueError(
'Same number of confound names and exclusion criteria must be given')
relex, crit = process_exclusion_criteria(exclusion_criteria)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
bad_files = []
for i, cfile in enumerate(confound_files):
data = load_tabular_file(files[i]).values
df = load_tabular_file(cfile, index_col=None)
ind = []
# Can't interpolate values if nanind is at the beginning or end. So keep these as their original values.
for ci, c in enumerate(confound):
ind = df[relex[ci](df[c], crit[ci])].index
if replace_with == 'cubicspline':
if 0 in ind:
ind = np.delete(ind, np.where(ind == 0))
if df.index.max():
ind = np.delete(ind, np.where(ind == df.index.max()))
data[:, ind.astype(int)] = np.nan
nanind = np.where(np.isnan(data[0, :]))[0]
badpoints_n = len(nanind)
# Bad file if the number of ratio bad points are greater than the tolerance.
if badpoints_n / np.array(len(df)) > tol:
bad_files.append(files[i])
nonnanind = np.where(np.isnan(data[0, :]) == 0)[0]
nanind = nanind[nanind > nonnanind.min()]
nanind = nanind[nanind < nonnanind.max()]
if replace_with == 'cubicspline':
for n in range(data.shape[0]):
interp = interp1d(
nonnanind, data[n, nonnanind], kind='cubic')
data[n, nanind] = interp(nanind)
# only save if the subject is not excluded
data = pd.DataFrame(data)
sname, _ = drop_bids_suffix(files[i])
# Move files to teneto derivatives if the pipeline isn't already set to it
if self.pipeline != 'teneto_' + teneto.__version__:
sname = sname.split('/')[-1]
spath = self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/'
tags = get_bids_tag(sname, ['sub', 'ses'])
spath += 'sub-' + tags['sub'] + '/'
if 'ses' in tags:
spath += 'ses-' + tags['ses'] + '/'
spath += 'func/'
if self.pipeline_subdir:
spath += self.pipeline_subdir + '/'
make_directories(spath)
sname = spath + sname
if 'desc' in sname and desc:
desctag = get_bids_tag(sname.split('/')[-1], 'desc')
sname = ''.join(sname.split('desc-' + desctag['desc']))
sname += '_desc-' + desc
if os.path.exists(sname + self.bids_suffix + '.tsv') and overwrite == False:
raise ValueError(
'overwrite is set to False, but non-unique filename. Set unique desc tag')
data.to_csv(sname + '_' + self.bids_suffix + '.tsv', sep='\t')
# Update json sidecar
sidecar = get_sidecar(files[i])
sidecar['scrubbed_timepoints'] = {}
sidecar['scrubbed_timepoints']['description'] = 'Scrubbing which censors timepoints where the confounds where above a certain time-points.\
Censored time-points are replaced with replacement value (nans or cubic spline). \
Output of teneto.TenetoBIDS.set_exclusion_timepoint.'
sidecar['scrubbed_timepoints']['confound'] = ','.join(confound)
sidecar['scrubbed_timepoints']['threshold'] = ','.join(
exclusion_criteria)
sidecar['scrubbed_timepoints']['replacement'] = replace_with
sidecar['scrubbed_timepoints']['badpoint_number'] = badpoints_n
sidecar['scrubbed_timepoints']['badpoint_ratio'] = badpoints_n / \
np.array(len(df))
sidecar['scrubbed_timepoints']['file_exclusion_when_badpoint_ratio'] = tol
with open(sname + '_' + self.bids_suffix + '.json', 'w') as fs:
json.dump(sidecar, fs)
self.set_bad_files(
bad_files, reason='scrubbing (number of points over threshold)')
self.set_pipeline('teneto_' + teneto.__version__)
if desc:
self.set_bids_tags({'desc': desc.split('-')[1]}) | python | def set_exclusion_timepoint(self, confound, exclusion_criteria, replace_with, tol=1, overwrite=True, desc=None):
"""
Excludes subjects given a certain exclusion criteria. Does not work on nifti files, only csv, numpy or tsc. Assumes data is node,time
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files. Assumes data is node,time
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
replace_with : str
Can be 'nan' (bad values become nans) or 'cubicspline' (bad values are interpolated). If bad value occurs at 0 or -1 index, then these values are kept and no interpolation occurs.
tol : float
Tolerance of exlcuded time-points allowed before becoming a bad subject.
overwrite : bool (default=True)
If true, if their are files in the teneto derivatives directory with the same name, these will be overwritten with this step.
The json sidecar is updated with the new information about the file.
desc : str
String to add desc tag to filenames if overwrite is set to true.
Returns
------
Loads the TenetoBIDS.selected_files and replaces any instances of confound meeting the exclusion_criteria with replace_with.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if isinstance(confound, str):
confound = [confound]
if isinstance(exclusion_criteria, str):
exclusion_criteria = [exclusion_criteria]
if len(exclusion_criteria) != len(confound):
raise ValueError(
'Same number of confound names and exclusion criteria must be given')
relex, crit = process_exclusion_criteria(exclusion_criteria)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
bad_files = []
for i, cfile in enumerate(confound_files):
data = load_tabular_file(files[i]).values
df = load_tabular_file(cfile, index_col=None)
ind = []
# Can't interpolate values if nanind is at the beginning or end. So keep these as their original values.
for ci, c in enumerate(confound):
ind = df[relex[ci](df[c], crit[ci])].index
if replace_with == 'cubicspline':
if 0 in ind:
ind = np.delete(ind, np.where(ind == 0))
if df.index.max():
ind = np.delete(ind, np.where(ind == df.index.max()))
data[:, ind.astype(int)] = np.nan
nanind = np.where(np.isnan(data[0, :]))[0]
badpoints_n = len(nanind)
# Bad file if the number of ratio bad points are greater than the tolerance.
if badpoints_n / np.array(len(df)) > tol:
bad_files.append(files[i])
nonnanind = np.where(np.isnan(data[0, :]) == 0)[0]
nanind = nanind[nanind > nonnanind.min()]
nanind = nanind[nanind < nonnanind.max()]
if replace_with == 'cubicspline':
for n in range(data.shape[0]):
interp = interp1d(
nonnanind, data[n, nonnanind], kind='cubic')
data[n, nanind] = interp(nanind)
# only save if the subject is not excluded
data = pd.DataFrame(data)
sname, _ = drop_bids_suffix(files[i])
# Move files to teneto derivatives if the pipeline isn't already set to it
if self.pipeline != 'teneto_' + teneto.__version__:
sname = sname.split('/')[-1]
spath = self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/'
tags = get_bids_tag(sname, ['sub', 'ses'])
spath += 'sub-' + tags['sub'] + '/'
if 'ses' in tags:
spath += 'ses-' + tags['ses'] + '/'
spath += 'func/'
if self.pipeline_subdir:
spath += self.pipeline_subdir + '/'
make_directories(spath)
sname = spath + sname
if 'desc' in sname and desc:
desctag = get_bids_tag(sname.split('/')[-1], 'desc')
sname = ''.join(sname.split('desc-' + desctag['desc']))
sname += '_desc-' + desc
if os.path.exists(sname + self.bids_suffix + '.tsv') and overwrite == False:
raise ValueError(
'overwrite is set to False, but non-unique filename. Set unique desc tag')
data.to_csv(sname + '_' + self.bids_suffix + '.tsv', sep='\t')
# Update json sidecar
sidecar = get_sidecar(files[i])
sidecar['scrubbed_timepoints'] = {}
sidecar['scrubbed_timepoints']['description'] = 'Scrubbing which censors timepoints where the confounds where above a certain time-points.\
Censored time-points are replaced with replacement value (nans or cubic spline). \
Output of teneto.TenetoBIDS.set_exclusion_timepoint.'
sidecar['scrubbed_timepoints']['confound'] = ','.join(confound)
sidecar['scrubbed_timepoints']['threshold'] = ','.join(
exclusion_criteria)
sidecar['scrubbed_timepoints']['replacement'] = replace_with
sidecar['scrubbed_timepoints']['badpoint_number'] = badpoints_n
sidecar['scrubbed_timepoints']['badpoint_ratio'] = badpoints_n / \
np.array(len(df))
sidecar['scrubbed_timepoints']['file_exclusion_when_badpoint_ratio'] = tol
with open(sname + '_' + self.bids_suffix + '.json', 'w') as fs:
json.dump(sidecar, fs)
self.set_bad_files(
bad_files, reason='scrubbing (number of points over threshold)')
self.set_pipeline('teneto_' + teneto.__version__)
if desc:
self.set_bids_tags({'desc': desc.split('-')[1]}) | Excludes subjects given a certain exclusion criteria. Does not work on nifti files, only csv, numpy or tsc. Assumes data is node,time
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files. Assumes data is node,time
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
replace_with : str
Can be 'nan' (bad values become nans) or 'cubicspline' (bad values are interpolated). If bad value occurs at 0 or -1 index, then these values are kept and no interpolation occurs.
tol : float
Tolerance of exlcuded time-points allowed before becoming a bad subject.
overwrite : bool (default=True)
If true, if their are files in the teneto derivatives directory with the same name, these will be overwritten with this step.
The json sidecar is updated with the new information about the file.
desc : str
String to add desc tag to filenames if overwrite is set to true.
Returns
------
Loads the TenetoBIDS.selected_files and replaces any instances of confound meeting the exclusion_criteria with replace_with. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L721-L829 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.make_parcellation | def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None):
"""
Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end.
Parameters
-----------
parcellation : str
specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278'
parc_type : str
can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
network : str
if "defaults", it selects static parcellation, _if available_ (other options will be made available soon).
removeconfounds : bool
if true, regresses out confounds that are specfied in self.set_confounds with linear regression.
update_pipeline : bool
TenetoBIDS gets updated with the parcellated files being selected.
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
clean_params : dict
**kwargs for nilearn function nilearn.signal.clean
yeonetworkn : int (7 or 17)
Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks
njobs : n
number of processes to run. Overrides TenetoBIDS.njobs
Returns
-------
Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/.
To load these files call TenetoBIDS.load_parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
parc_name = parcellation.split('_')[0].lower()
# Check confounds have been specified
if not self.confounds and removeconfounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.')
# Check confounds have been specified
if update_pipeline == False and removeconfounds:
raise ValueError(
'Pipeline must be updated in order to remove confounds within this funciton.')
# In theory these should be the same. So at the moment, it goes through each element and checks they are matched.
# A matching algorithem may be needed if cases arise where this isnt the case
files = self.get_selected_files(quiet=1)
# Load network communities, if possible.
self.set_network_communities(parcellation, netn=yeonetworkn)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if not parc_params:
parc_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation,
parc_name, parc_type, parc_params) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline(self.pipeline)
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('parcellation')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]})
self.set_bids_suffix('roi')
if removeconfounds:
self.removeconfounds(
clean_params=clean_params, transpose=None, njobs=njobs) | python | def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None):
"""
Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end.
Parameters
-----------
parcellation : str
specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278'
parc_type : str
can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
network : str
if "defaults", it selects static parcellation, _if available_ (other options will be made available soon).
removeconfounds : bool
if true, regresses out confounds that are specfied in self.set_confounds with linear regression.
update_pipeline : bool
TenetoBIDS gets updated with the parcellated files being selected.
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
clean_params : dict
**kwargs for nilearn function nilearn.signal.clean
yeonetworkn : int (7 or 17)
Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks
njobs : n
number of processes to run. Overrides TenetoBIDS.njobs
Returns
-------
Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/.
To load these files call TenetoBIDS.load_parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
parc_name = parcellation.split('_')[0].lower()
# Check confounds have been specified
if not self.confounds and removeconfounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.')
# Check confounds have been specified
if update_pipeline == False and removeconfounds:
raise ValueError(
'Pipeline must be updated in order to remove confounds within this funciton.')
# In theory these should be the same. So at the moment, it goes through each element and checks they are matched.
# A matching algorithem may be needed if cases arise where this isnt the case
files = self.get_selected_files(quiet=1)
# Load network communities, if possible.
self.set_network_communities(parcellation, netn=yeonetworkn)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if not parc_params:
parc_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation,
parc_name, parc_type, parc_params) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline(self.pipeline)
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('parcellation')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]})
self.set_bids_suffix('roi')
if removeconfounds:
self.removeconfounds(
clean_params=clean_params, transpose=None, njobs=njobs) | Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end.
Parameters
-----------
parcellation : str
specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278'
parc_type : str
can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
network : str
if "defaults", it selects static parcellation, _if available_ (other options will be made available soon).
removeconfounds : bool
if true, regresses out confounds that are specfied in self.set_confounds with linear regression.
update_pipeline : bool
TenetoBIDS gets updated with the parcellated files being selected.
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
clean_params : dict
**kwargs for nilearn function nilearn.signal.clean
yeonetworkn : int (7 or 17)
Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks
njobs : n
number of processes to run. Overrides TenetoBIDS.njobs
Returns
-------
Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/.
To load these files call TenetoBIDS.load_parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L847-L930 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.communitydetection | def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None):
"""
Calls temporal_louvain_with_consensus on connectivity data
Parameters
----------
community_detection_params : dict
kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus
community_type : str
Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint.
file_idx : bool (default false)
if true, index column present in data and this will be ignored
file_hdr : bool (default false)
if true, header row present in data and this will be ignored
njobs : int
number of processes to run. Overrides TenetoBIDS.njobs
Note
----
All non-positive edges are made to zero.
Returns
-------
List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if community_type == 'temporal':
files = self.get_selected_files(quiet=True)
# Run check to make sure files are tvc input
for f in files:
if 'tvc' not in f:
raise ValueError(
'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).')
elif community_type == 'static':
files = self.get_selected_files(
quiet=True, pipeline='functionalconnectivity')
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr,
file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])}
for j in as_completed(job):
j.result() | python | def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None):
"""
Calls temporal_louvain_with_consensus on connectivity data
Parameters
----------
community_detection_params : dict
kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus
community_type : str
Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint.
file_idx : bool (default false)
if true, index column present in data and this will be ignored
file_hdr : bool (default false)
if true, header row present in data and this will be ignored
njobs : int
number of processes to run. Overrides TenetoBIDS.njobs
Note
----
All non-positive edges are made to zero.
Returns
-------
List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if community_type == 'temporal':
files = self.get_selected_files(quiet=True)
# Run check to make sure files are tvc input
for f in files:
if 'tvc' not in f:
raise ValueError(
'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).')
elif community_type == 'static':
files = self.get_selected_files(
quiet=True, pipeline='functionalconnectivity')
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr,
file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])}
for j in as_completed(job):
j.result() | Calls temporal_louvain_with_consensus on connectivity data
Parameters
----------
community_detection_params : dict
kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus
community_type : str
Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint.
file_idx : bool (default false)
if true, index column present in data and this will be ignored
file_hdr : bool (default false)
if true, header row present in data and this will be ignored
njobs : int
number of processes to run. Overrides TenetoBIDS.njobs
Note
----
All non-positive edges are made to zero.
Returns
-------
List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L950-L1001 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.removeconfounds | def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None):
"""
Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not self.confounds and not confounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.')
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if confounds:
self.set_confounds(confounds)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
if not clean_params:
clean_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
self.set_pipeline('teneto_' + teneto.__version__)
self.set_bids_suffix('roi')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]}) | python | def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None):
"""
Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not self.confounds and not confounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.')
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if confounds:
self.set_confounds(confounds)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
if not clean_params:
clean_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
self.set_pipeline('teneto_' + teneto.__version__)
self.set_bids_suffix('roi')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]}) | Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1036-L1094 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.networkmeasures | def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None):
"""
Calculates a network measure
For available funcitons see: teneto.networkmeasures
Parameters
----------
measure : str or list
Mame of function(s) from teneto.networkmeasures that will be run.
measure_params : dict or list of dctionaries)
Containing kwargs for the argument in measure.
See note regarding Communities key.
tag : str
Add additional tag to saved filenames.
Note
----
In measure_params, if communities can equal 'template', 'static', or 'temporal'.
These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy
Returns
-------
Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/
Load the measure with tenetoBIDS.load_network_measure
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
# measure can be string or list
if isinstance(measure, str):
measure = [measure]
# measure_params can be dictionaary or list of dictionaries
if isinstance(measure_params, dict):
measure_params = [measure_params]
if measure_params and len(measure) != len(measure_params):
raise ValueError('Number of identified measure_params (' + str(len(measure_params)) +
') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted')
files = self.get_selected_files(quiet=1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_networkmeasures, f, tag, measure, measure_params) for f in files}
for j in as_completed(job):
j.result() | python | def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None):
"""
Calculates a network measure
For available funcitons see: teneto.networkmeasures
Parameters
----------
measure : str or list
Mame of function(s) from teneto.networkmeasures that will be run.
measure_params : dict or list of dctionaries)
Containing kwargs for the argument in measure.
See note regarding Communities key.
tag : str
Add additional tag to saved filenames.
Note
----
In measure_params, if communities can equal 'template', 'static', or 'temporal'.
These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy
Returns
-------
Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/
Load the measure with tenetoBIDS.load_network_measure
"""
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
# measure can be string or list
if isinstance(measure, str):
measure = [measure]
# measure_params can be dictionaary or list of dictionaries
if isinstance(measure_params, dict):
measure_params = [measure_params]
if measure_params and len(measure) != len(measure_params):
raise ValueError('Number of identified measure_params (' + str(len(measure_params)) +
') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted')
files = self.get_selected_files(quiet=1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_networkmeasures, f, tag, measure, measure_params) for f in files}
for j in as_completed(job):
j.result() | Calculates a network measure
For available funcitons see: teneto.networkmeasures
Parameters
----------
measure : str or list
Mame of function(s) from teneto.networkmeasures that will be run.
measure_params : dict or list of dctionaries)
Containing kwargs for the argument in measure.
See note regarding Communities key.
tag : str
Add additional tag to saved filenames.
Note
----
In measure_params, if communities can equal 'template', 'static', or 'temporal'.
These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy
Returns
-------
Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/
Load the measure with tenetoBIDS.load_network_measure | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1153-L1209 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.set_confound_pipeline | def set_confound_pipeline(self, confound_pipeline):
"""
There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + confound_pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.confound_pipeline = confound_pipeline | python | def set_confound_pipeline(self, confound_pipeline):
"""
There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + confound_pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.confound_pipeline = confound_pipeline | There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1319-L1340 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.set_bids_suffix | def set_bids_suffix(self, bids_suffix):
"""
The last analysis step is the final tag that is present in files.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
self.bids_suffix = bids_suffix | python | def set_bids_suffix(self, bids_suffix):
"""
The last analysis step is the final tag that is present in files.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
self.bids_suffix = bids_suffix | The last analysis step is the final tag that is present in files. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1426-L1431 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.set_pipeline | def set_pipeline(self, pipeline):
"""
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.pipeline = pipeline | python | def set_pipeline(self, pipeline):
"""
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.pipeline = pipeline | Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1433-L1443 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.print_dataset_summary | def print_dataset_summary(self):
"""
Prints information about the the BIDS data and the files currently selected.
"""
print('--- DATASET INFORMATION ---')
print('--- Subjects ---')
if self.raw_data_exists:
if self.BIDS.get_subjects():
print('Number of subjects (in dataset): ' +
str(len(self.BIDS.get_subjects())))
print('Subjects (in dataset): ' +
', '.join(self.BIDS.get_subjects()))
else:
print('NO SUBJECTS FOUND (is the BIDS directory specified correctly?)')
print('Number of subjects (selected): ' +
str(len(self.bids_tags['sub'])))
print('Subjects (selected): ' + ', '.join(self.bids_tags['sub']))
if isinstance(self.bad_subjects, list):
print('Bad subjects: ' + ', '.join(self.bad_subjects))
else:
print('Bad subjects: 0')
print('--- Tasks ---')
if self.raw_data_exists:
if self.BIDS.get_tasks():
print('Number of tasks (in dataset): ' +
str(len(self.BIDS.get_tasks())))
print('Tasks (in dataset): ' + ', '.join(self.BIDS.get_tasks()))
if 'task' in self.bids_tags:
print('Number of tasks (selected): ' +
str(len(self.bids_tags['task'])))
print('Tasks (selected): ' + ', '.join(self.bids_tags['task']))
else:
print('No task names found')
print('--- Runs ---')
if self.raw_data_exists:
if self.BIDS.get_runs():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_runs())))
print('Runs (in dataset): ' + ', '.join(self.BIDS.get_runs()))
if 'run' in self.bids_tags:
print('Number of runs (selected): ' +
str(len(self.bids_tags['run'])))
print('Rubs (selected): ' + ', '.join(self.bids_tags['run']))
else:
print('No run names found')
print('--- Sessions ---')
if self.raw_data_exists:
if self.BIDS.get_sessions():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_sessions())))
print('Sessions (in dataset): ' +
', '.join(self.BIDS.get_sessions()))
if 'ses' in self.bids_tags:
print('Number of sessions (selected): ' +
str(len(self.bids_tags['ses'])))
print('Sessions (selected): ' + ', '.join(self.bids_tags['ses']))
else:
print('No session names found')
print('--- PREPROCESSED DATA (Pipelines/Derivatives) ---')
if not self.pipeline:
print('Derivative pipeline not set. To set, run TN.set_pipeline()')
else:
print('Pipeline: ' + self.pipeline)
if self.pipeline_subdir:
print('Pipeline subdirectories: ' + self.pipeline_subdir)
selected_files = self.get_selected_files(quiet=1)
if selected_files:
print('--- SELECTED DATA ---')
print('Numnber of selected files: ' + str(len(selected_files)))
print('\n - '.join(selected_files)) | python | def print_dataset_summary(self):
"""
Prints information about the the BIDS data and the files currently selected.
"""
print('--- DATASET INFORMATION ---')
print('--- Subjects ---')
if self.raw_data_exists:
if self.BIDS.get_subjects():
print('Number of subjects (in dataset): ' +
str(len(self.BIDS.get_subjects())))
print('Subjects (in dataset): ' +
', '.join(self.BIDS.get_subjects()))
else:
print('NO SUBJECTS FOUND (is the BIDS directory specified correctly?)')
print('Number of subjects (selected): ' +
str(len(self.bids_tags['sub'])))
print('Subjects (selected): ' + ', '.join(self.bids_tags['sub']))
if isinstance(self.bad_subjects, list):
print('Bad subjects: ' + ', '.join(self.bad_subjects))
else:
print('Bad subjects: 0')
print('--- Tasks ---')
if self.raw_data_exists:
if self.BIDS.get_tasks():
print('Number of tasks (in dataset): ' +
str(len(self.BIDS.get_tasks())))
print('Tasks (in dataset): ' + ', '.join(self.BIDS.get_tasks()))
if 'task' in self.bids_tags:
print('Number of tasks (selected): ' +
str(len(self.bids_tags['task'])))
print('Tasks (selected): ' + ', '.join(self.bids_tags['task']))
else:
print('No task names found')
print('--- Runs ---')
if self.raw_data_exists:
if self.BIDS.get_runs():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_runs())))
print('Runs (in dataset): ' + ', '.join(self.BIDS.get_runs()))
if 'run' in self.bids_tags:
print('Number of runs (selected): ' +
str(len(self.bids_tags['run'])))
print('Rubs (selected): ' + ', '.join(self.bids_tags['run']))
else:
print('No run names found')
print('--- Sessions ---')
if self.raw_data_exists:
if self.BIDS.get_sessions():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_sessions())))
print('Sessions (in dataset): ' +
', '.join(self.BIDS.get_sessions()))
if 'ses' in self.bids_tags:
print('Number of sessions (selected): ' +
str(len(self.bids_tags['ses'])))
print('Sessions (selected): ' + ', '.join(self.bids_tags['ses']))
else:
print('No session names found')
print('--- PREPROCESSED DATA (Pipelines/Derivatives) ---')
if not self.pipeline:
print('Derivative pipeline not set. To set, run TN.set_pipeline()')
else:
print('Pipeline: ' + self.pipeline)
if self.pipeline_subdir:
print('Pipeline subdirectories: ' + self.pipeline_subdir)
selected_files = self.get_selected_files(quiet=1)
if selected_files:
print('--- SELECTED DATA ---')
print('Numnber of selected files: ' + str(len(selected_files)))
print('\n - '.join(selected_files)) | Prints information about the the BIDS data and the files currently selected. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1454-L1532 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.load_frompickle | def load_frompickle(cls, fname, reload_object=False):
"""
Loaded saved instance of
fname : str
path to pickle object (output of TenetoBIDS.save_aspickle)
reload_object : bool (default False)
reloads object by calling teneto.TenetoBIDS (some information lost, for development)
Returns
-------
self :
TenetoBIDS instance
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'rb') as f:
tnet = pickle.load(f)
if reload_object:
reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix,
bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs)
reloadnet.histroy = tnet.history
tnet = reloadnet
return tnet | python | def load_frompickle(cls, fname, reload_object=False):
"""
Loaded saved instance of
fname : str
path to pickle object (output of TenetoBIDS.save_aspickle)
reload_object : bool (default False)
reloads object by calling teneto.TenetoBIDS (some information lost, for development)
Returns
-------
self :
TenetoBIDS instance
"""
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'rb') as f:
tnet = pickle.load(f)
if reload_object:
reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix,
bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs)
reloadnet.histroy = tnet.history
tnet = reloadnet
return tnet | Loaded saved instance of
fname : str
path to pickle object (output of TenetoBIDS.save_aspickle)
reload_object : bool (default False)
reloads object by calling teneto.TenetoBIDS (some information lost, for development)
Returns
-------
self :
TenetoBIDS instance | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1541-L1564 |
wiheto/teneto | teneto/classes/bids.py | TenetoBIDS.load_data | def load_data(self, datatype='tvc', tag=None, measure=''):
"""
Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function.
The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory.
Data is placed in teneto.tvc_data_
Parameters
----------
datatype : str
\'tvc\', \'parcellation\', \'participant\', \'temporalnetwork\'
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
measure : str
retquired when datatype is temporalnetwork. A networkmeasure that should be loaded.
Returns
-------
tvc_data_ : numpy array
Containing the parcellation data. Each file is appended to the first dimension of the numpy array.
tvc_trialinfo_ : pandas data frame
Containing the subject info (all BIDS tags) in the numpy array.
"""
if datatype == 'temporalnetwork' and not measure:
raise ValueError(
'When datatype is temporalnetwork, \'measure\' must also be specified.')
self.add_history(inspect.stack()[0][3], locals(), 1)
data_list = []
trialinfo_list = []
for s in self.bids_tags['sub']:
# Define base folder
base_path, file_list, datainfo = self._get_filelist(
datatype, s, tag, measure=measure)
if base_path:
for f in file_list:
# Include only if all analysis step tags are present
# Get all BIDS tags. i.e. in 'sub-AAA', get 'sub' as key and 'AAA' as item.
# Ignore if tsv file is empty
try:
filetags = get_bids_tag(f, 'all')
data_list.append(load_tabular_file(base_path + f))
# Only return trialinfo if datatype is trlinfo
if datainfo == 'trlinfo':
trialinfo_list.append(
pd.DataFrame(filetags, index=[0]))
except pd.errors.EmptyDataError:
pass
# If group data and length of output is one, don't make it a list
if datatype == 'group' and len(data_list) == 1:
data_list = data_list[0]
if measure:
data_list = {measure: data_list}
setattr(self, datatype + '_data_', data_list)
if trialinfo_list:
out_trialinfo = pd.concat(trialinfo_list)
out_trialinfo.reset_index(inplace=True, drop=True)
setattr(self, datatype + '_trialinfo_', out_trialinfo) | python | def load_data(self, datatype='tvc', tag=None, measure=''):
"""
Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function.
The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory.
Data is placed in teneto.tvc_data_
Parameters
----------
datatype : str
\'tvc\', \'parcellation\', \'participant\', \'temporalnetwork\'
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
measure : str
retquired when datatype is temporalnetwork. A networkmeasure that should be loaded.
Returns
-------
tvc_data_ : numpy array
Containing the parcellation data. Each file is appended to the first dimension of the numpy array.
tvc_trialinfo_ : pandas data frame
Containing the subject info (all BIDS tags) in the numpy array.
"""
if datatype == 'temporalnetwork' and not measure:
raise ValueError(
'When datatype is temporalnetwork, \'measure\' must also be specified.')
self.add_history(inspect.stack()[0][3], locals(), 1)
data_list = []
trialinfo_list = []
for s in self.bids_tags['sub']:
# Define base folder
base_path, file_list, datainfo = self._get_filelist(
datatype, s, tag, measure=measure)
if base_path:
for f in file_list:
# Include only if all analysis step tags are present
# Get all BIDS tags. i.e. in 'sub-AAA', get 'sub' as key and 'AAA' as item.
# Ignore if tsv file is empty
try:
filetags = get_bids_tag(f, 'all')
data_list.append(load_tabular_file(base_path + f))
# Only return trialinfo if datatype is trlinfo
if datainfo == 'trlinfo':
trialinfo_list.append(
pd.DataFrame(filetags, index=[0]))
except pd.errors.EmptyDataError:
pass
# If group data and length of output is one, don't make it a list
if datatype == 'group' and len(data_list) == 1:
data_list = data_list[0]
if measure:
data_list = {measure: data_list}
setattr(self, datatype + '_data_', data_list)
if trialinfo_list:
out_trialinfo = pd.concat(trialinfo_list)
out_trialinfo.reset_index(inplace=True, drop=True)
setattr(self, datatype + '_trialinfo_', out_trialinfo) | Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function.
The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory.
Data is placed in teneto.tvc_data_
Parameters
----------
datatype : str
\'tvc\', \'parcellation\', \'participant\', \'temporalnetwork\'
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
measure : str
retquired when datatype is temporalnetwork. A networkmeasure that should be loaded.
Returns
-------
tvc_data_ : numpy array
Containing the parcellation data. Each file is appended to the first dimension of the numpy array.
tvc_trialinfo_ : pandas data frame
Containing the subject info (all BIDS tags) in the numpy array. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L1636-L1698 |
wiheto/teneto | teneto/networkmeasures/temporal_closeness_centrality.py | temporal_closeness_centrality | def temporal_closeness_centrality(tnet=None, paths=None):
'''
Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
closeness = np.nansum(1 / np.nanmean(pathmat, axis=2),
axis=1) / (pathmat.shape[1] - 1)
return closeness | python | def temporal_closeness_centrality(tnet=None, paths=None):
'''
Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
closeness = np.nansum(1 / np.nanmean(pathmat, axis=2),
axis=1) / (pathmat.shape[1] - 1)
return closeness | Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_closeness_centrality.py#L9-L52 |
wiheto/teneto | teneto/networkmeasures/sid.py | sid | def sid(tnet, communities, axis=0, calc='global', decay=0):
r"""
Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_
Parameters
----------
tnet: array, dict
Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd'
communities : array
a Nx1 vector or NxT array of community assignment.
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
'global' returns temporal degree centrality (a 1xnode vector) (default);
'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing;
'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities.
decay: int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$.
Returns
-------
sid: array
segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time)
Notes
------
SID tries to quantify if there is more segergation or intgration compared to other time-points.
If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual.
There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'),
the third is a value for each community-community pairing (calc='community_pairs').
First we calculate the temporal strength for each edge. This is calculate by
.. math:: S_{i,t} = \sum_j G_{i,j,t}
The pairwise SID, when the network is undirected, is calculated by
.. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t})
Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A.
When calculating the SID for a community, it is calculated byL
.. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t})
Where C is the number of communities.
When calculating the SID globally, it is calculated byL
.. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t})
References
-----------
.. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_]
"""
tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN'])
D = temporal_degree_centrality(
tnet, calc='time', communities=communities, decay=decay)
# Check network output (order of communitiesworks)
network_ids = np.unique(communities)
communities_size = np.array([sum(communities == n) for n in network_ids])
sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]])
for n in network_ids:
for m in network_ids:
betweenmodulescaling = 1/(communities_size[n]*communities_size[m])
if netinfo['nettype'][1] == 'd':
withinmodulescaling = 1 / \
(communities_size[n]*communities_size[n])
elif netinfo['nettype'][1] == 'u':
withinmodulescaling = 2 / \
(communities_size[n]*(communities_size[n]-1))
if n == m:
betweenmodulescaling = withinmodulescaling
sid[n, m, :] = withinmodulescaling * \
D[n, n, :] - betweenmodulescaling * D[n, m, :]
# If nans emerge than there is no connection between networks at time point, so make these 0.
sid[np.isnan(sid)] = 0
if calc == 'global':
return np.sum(np.sum(sid, axis=1), axis=0)
elif calc == 'communities_avg':
return np.sum(sid, axis=axis)
else:
return sid | python | def sid(tnet, communities, axis=0, calc='global', decay=0):
r"""
Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_
Parameters
----------
tnet: array, dict
Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd'
communities : array
a Nx1 vector or NxT array of community assignment.
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
'global' returns temporal degree centrality (a 1xnode vector) (default);
'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing;
'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities.
decay: int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$.
Returns
-------
sid: array
segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time)
Notes
------
SID tries to quantify if there is more segergation or intgration compared to other time-points.
If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual.
There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'),
the third is a value for each community-community pairing (calc='community_pairs').
First we calculate the temporal strength for each edge. This is calculate by
.. math:: S_{i,t} = \sum_j G_{i,j,t}
The pairwise SID, when the network is undirected, is calculated by
.. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t})
Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A.
When calculating the SID for a community, it is calculated byL
.. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t})
Where C is the number of communities.
When calculating the SID globally, it is calculated byL
.. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t})
References
-----------
.. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_]
"""
tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN'])
D = temporal_degree_centrality(
tnet, calc='time', communities=communities, decay=decay)
# Check network output (order of communitiesworks)
network_ids = np.unique(communities)
communities_size = np.array([sum(communities == n) for n in network_ids])
sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]])
for n in network_ids:
for m in network_ids:
betweenmodulescaling = 1/(communities_size[n]*communities_size[m])
if netinfo['nettype'][1] == 'd':
withinmodulescaling = 1 / \
(communities_size[n]*communities_size[n])
elif netinfo['nettype'][1] == 'u':
withinmodulescaling = 2 / \
(communities_size[n]*(communities_size[n]-1))
if n == m:
betweenmodulescaling = withinmodulescaling
sid[n, m, :] = withinmodulescaling * \
D[n, n, :] - betweenmodulescaling * D[n, m, :]
# If nans emerge than there is no connection between networks at time point, so make these 0.
sid[np.isnan(sid)] = 0
if calc == 'global':
return np.sum(np.sum(sid, axis=1), axis=0)
elif calc == 'communities_avg':
return np.sum(sid, axis=axis)
else:
return sid | r"""
Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_
Parameters
----------
tnet: array, dict
Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd'
communities : array
a Nx1 vector or NxT array of community assignment.
axis : int
Dimension that is returned 0 or 1 (default 0).
Note, only relevant for directed networks.
i.e. if 0, node i has Aijt summed over j and t.
and if 1, node j has Aijt summed over i and t.
calc : str
'global' returns temporal degree centrality (a 1xnode vector) (default);
'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing;
'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities.
decay: int
if calc = 'time', then decay is possible where the centrality of
the previous time point is carried over to the next time point but decays
at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$.
Returns
-------
sid: array
segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time)
Notes
------
SID tries to quantify if there is more segergation or intgration compared to other time-points.
If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual.
There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'),
the third is a value for each community-community pairing (calc='community_pairs').
First we calculate the temporal strength for each edge. This is calculate by
.. math:: S_{i,t} = \sum_j G_{i,j,t}
The pairwise SID, when the network is undirected, is calculated by
.. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t})
Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A.
When calculating the SID for a community, it is calculated byL
.. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t})
Where C is the number of communities.
When calculating the SID globally, it is calculated byL
.. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t})
References
-----------
.. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/sid.py#L8-L111 |
wiheto/teneto | teneto/networkmeasures/bursty_coeff.py | bursty_coeff | def bursty_coeff(data, calc='edge', nodes='all', communities=None, threshold_type=None, threshold_level=None, threshold_params=None):
r"""
Calculates the bursty coefficient.[1][2]
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
A weighted network can be applied if you specify threshold_type and threshold_value which will make it binary.
calc : str
Caclulate the bursty coeff over what. Options include 'edge': calculate B on all ICTs between node i and j. (Default); 'node': caclulate B on all ICTs connected to node i.;
'communities': calculate B for each communities (argument communities then required);
'meanEdgePerNode': first calculate the ICTs between node i and j, then take the mean over all j.
nodes: list or str
Options: 'all': do for all nodes (default) or list of node indexes to calculate.
communities : array, optional
None (default) or Nx1 vector of communities assignment. This returns a "centrality" per communities instead of per node.
threshold_type : str, optional
If input is weighted. Specify binarizing threshold type. See teneto.utils.binarize
threshold_level : str, optional
If input is weighted. Specify binarizing threshold level. See teneto.utils.binarize
threhsold_params : dict
If input is weighted. Dictionawy with kwargs for teneto.utils.binarize
Returns
-------
B : array
Bursty coefficienct per (edge or node measure).
Notes
------
The burstiness coefficent, B, is defined in refs [1,2] as:
.. math:: B = {{\sigma_{ICT} - \mu_{ICT}} \over {\sigma_{ICT} + \mu_{ICT}}}
Where :math:`\sigma_{ICT}` and :math:`\mu_{ICT}` are the standard deviation and mean of the inter-contact times respectively (see teneto.networkmeasures.intercontacttimes)
When B > 0, indicates bursty intercontact times. When B < 0, indicates periodic/tonic intercontact times. When B = 0, indicates random.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
The two networks look like this:
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
G_bursty = np.zeros([2, 2, 60])
G_bursty[:,:,ts_bursty] = 1
G_periodic = np.zeros([2, 2, 60])
ts_periodic = np.arange(0, 60, 2)
G_periodic[:,:,ts_periodic] = 1
fig,ax = plt.subplots(2, 1, figsize=(10,3))
teneto.plot.slice_plot(G_bursty, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
teneto.plot.slice_plot(G_periodic, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
ax[0].set_title('G_bursty')
ax[1].set_title('G_periodic')
ax[0].set_ylim([-0.25,1.25])
ax[1].set_ylim([-0.25,1.25])
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
plt.tight_layout()
fig.show()
Now we call bursty_coeff.
>>> B_periodic = teneto.networkmeasures.bursty_coeff(G_periodic)
>>> B_periodic
array([[nan, -1.],
[-1., nan]])
Above we can see that between node 0 and 1, B=-1 (the diagonal is nan).
Doing the same for the second example:
>>> B_bursty = teneto.networkmeasures.bursty_coeff(G_bursty)
>>> B_bursty
array([[ nan, 0.13311003],
[0.13311003, nan]])
gives a positive value, indicating the inter-contact times between node 0 and 1 is bursty.
References
----------
.. [1] Goh, KI & Barabasi, AL (2008) Burstiness and Memory in Complex Systems. EPL (Europhysics Letters), 81: 4 [`Link <https://arxiv.org/pdf/physics/0610233.pdf>`_]
.. [2] Holme, P & Saramäki J (2012) Temporal networks. Physics Reports. 519: 3. [`Link <https://arxiv.org/pdf/1108.1780.pdf>`_] (Discrete formulation used here)
"""
if threshold_type is not None:
if threshold_params is None:
threshold_params = {}
data = binarize(data, threshold_type,
threshold_level, **threshold_params)
if calc == 'communities' and communities is None:
raise ValueError(
"Specified calc='communities' but no communities argument provided (list of clusters/modules)")
ict = 0 # are ict present
if isinstance(data, dict):
# This could be done better
if [k for k in list(data.keys()) if k == 'intercontacttimes'] == ['intercontacttimes']:
ict = 1
# if shortest paths are not calculated, calculate them
if ict == 0:
data = intercontacttimes(data)
ict_shape = data['intercontacttimes'].shape
if len(ict_shape) == 2:
node_len = ict_shape[0] * ict_shape[1]
elif len(ict_shape) == 1:
node_len = 1
else:
raise ValueError('more than two dimensions of intercontacttimes')
if isinstance(nodes, list) and len(ict_shape) > 1:
node_combinations = [[list(set(nodes))[t], list(set(nodes))[tt]] for t in range(
0, len(nodes)) for tt in range(0, len(nodes)) if t != tt]
do_nodes = [np.ravel_multi_index(n, ict_shape)
for n in node_combinations]
else:
do_nodes = np.arange(0, node_len)
# Reshae ICTs
if calc == 'node':
ict = np.concatenate(data['intercontacttimes']
[do_nodes, do_nodes], axis=1)
elif calc == 'communities':
unique_communities = np.unique(communities)
ict_shape = (len(unique_communities), len(unique_communities))
ict = np.array([[None] * ict_shape[0]] * ict_shape[1])
for i, s1 in enumerate(unique_communities):
for j, s2 in enumerate(unique_communities):
if s1 == s2:
ind = np.triu_indices(sum(communities == s1), k=1)
ict[i, j] = np.concatenate(
data['intercontacttimes'][ind[0], ind[1]])
else:
ict[i, j] = np.concatenate(np.concatenate(
data['intercontacttimes'][communities == s1, :][:, communities == s2]))
# Quick fix, but could be better
data['intercontacttimes'] = ict
do_nodes = np.arange(0, ict_shape[0]*ict_shape[1])
if len(ict_shape) > 1:
ict = data['intercontacttimes'].reshape(ict_shape[0] * ict_shape[1])
b_coeff = np.zeros(len(ict)) * np.nan
else:
b_coeff = np.zeros(1) * np.nan
ict = [data['intercontacttimes']]
for i in do_nodes:
if isinstance(ict[i], np.ndarray):
mu_ict = np.mean(ict[i])
sigma_ict = np.std(ict[i])
b_coeff[i] = (sigma_ict - mu_ict) / (sigma_ict + mu_ict)
else:
b_coeff[i] = np.nan
if len(ict_shape) > 1:
b_coeff = b_coeff.reshape(ict_shape)
return b_coeff | python | def bursty_coeff(data, calc='edge', nodes='all', communities=None, threshold_type=None, threshold_level=None, threshold_params=None):
r"""
Calculates the bursty coefficient.[1][2]
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
A weighted network can be applied if you specify threshold_type and threshold_value which will make it binary.
calc : str
Caclulate the bursty coeff over what. Options include 'edge': calculate B on all ICTs between node i and j. (Default); 'node': caclulate B on all ICTs connected to node i.;
'communities': calculate B for each communities (argument communities then required);
'meanEdgePerNode': first calculate the ICTs between node i and j, then take the mean over all j.
nodes: list or str
Options: 'all': do for all nodes (default) or list of node indexes to calculate.
communities : array, optional
None (default) or Nx1 vector of communities assignment. This returns a "centrality" per communities instead of per node.
threshold_type : str, optional
If input is weighted. Specify binarizing threshold type. See teneto.utils.binarize
threshold_level : str, optional
If input is weighted. Specify binarizing threshold level. See teneto.utils.binarize
threhsold_params : dict
If input is weighted. Dictionawy with kwargs for teneto.utils.binarize
Returns
-------
B : array
Bursty coefficienct per (edge or node measure).
Notes
------
The burstiness coefficent, B, is defined in refs [1,2] as:
.. math:: B = {{\sigma_{ICT} - \mu_{ICT}} \over {\sigma_{ICT} + \mu_{ICT}}}
Where :math:`\sigma_{ICT}` and :math:`\mu_{ICT}` are the standard deviation and mean of the inter-contact times respectively (see teneto.networkmeasures.intercontacttimes)
When B > 0, indicates bursty intercontact times. When B < 0, indicates periodic/tonic intercontact times. When B = 0, indicates random.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
The two networks look like this:
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
G_bursty = np.zeros([2, 2, 60])
G_bursty[:,:,ts_bursty] = 1
G_periodic = np.zeros([2, 2, 60])
ts_periodic = np.arange(0, 60, 2)
G_periodic[:,:,ts_periodic] = 1
fig,ax = plt.subplots(2, 1, figsize=(10,3))
teneto.plot.slice_plot(G_bursty, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
teneto.plot.slice_plot(G_periodic, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
ax[0].set_title('G_bursty')
ax[1].set_title('G_periodic')
ax[0].set_ylim([-0.25,1.25])
ax[1].set_ylim([-0.25,1.25])
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
plt.tight_layout()
fig.show()
Now we call bursty_coeff.
>>> B_periodic = teneto.networkmeasures.bursty_coeff(G_periodic)
>>> B_periodic
array([[nan, -1.],
[-1., nan]])
Above we can see that between node 0 and 1, B=-1 (the diagonal is nan).
Doing the same for the second example:
>>> B_bursty = teneto.networkmeasures.bursty_coeff(G_bursty)
>>> B_bursty
array([[ nan, 0.13311003],
[0.13311003, nan]])
gives a positive value, indicating the inter-contact times between node 0 and 1 is bursty.
References
----------
.. [1] Goh, KI & Barabasi, AL (2008) Burstiness and Memory in Complex Systems. EPL (Europhysics Letters), 81: 4 [`Link <https://arxiv.org/pdf/physics/0610233.pdf>`_]
.. [2] Holme, P & Saramäki J (2012) Temporal networks. Physics Reports. 519: 3. [`Link <https://arxiv.org/pdf/1108.1780.pdf>`_] (Discrete formulation used here)
"""
if threshold_type is not None:
if threshold_params is None:
threshold_params = {}
data = binarize(data, threshold_type,
threshold_level, **threshold_params)
if calc == 'communities' and communities is None:
raise ValueError(
"Specified calc='communities' but no communities argument provided (list of clusters/modules)")
ict = 0 # are ict present
if isinstance(data, dict):
# This could be done better
if [k for k in list(data.keys()) if k == 'intercontacttimes'] == ['intercontacttimes']:
ict = 1
# if shortest paths are not calculated, calculate them
if ict == 0:
data = intercontacttimes(data)
ict_shape = data['intercontacttimes'].shape
if len(ict_shape) == 2:
node_len = ict_shape[0] * ict_shape[1]
elif len(ict_shape) == 1:
node_len = 1
else:
raise ValueError('more than two dimensions of intercontacttimes')
if isinstance(nodes, list) and len(ict_shape) > 1:
node_combinations = [[list(set(nodes))[t], list(set(nodes))[tt]] for t in range(
0, len(nodes)) for tt in range(0, len(nodes)) if t != tt]
do_nodes = [np.ravel_multi_index(n, ict_shape)
for n in node_combinations]
else:
do_nodes = np.arange(0, node_len)
# Reshae ICTs
if calc == 'node':
ict = np.concatenate(data['intercontacttimes']
[do_nodes, do_nodes], axis=1)
elif calc == 'communities':
unique_communities = np.unique(communities)
ict_shape = (len(unique_communities), len(unique_communities))
ict = np.array([[None] * ict_shape[0]] * ict_shape[1])
for i, s1 in enumerate(unique_communities):
for j, s2 in enumerate(unique_communities):
if s1 == s2:
ind = np.triu_indices(sum(communities == s1), k=1)
ict[i, j] = np.concatenate(
data['intercontacttimes'][ind[0], ind[1]])
else:
ict[i, j] = np.concatenate(np.concatenate(
data['intercontacttimes'][communities == s1, :][:, communities == s2]))
# Quick fix, but could be better
data['intercontacttimes'] = ict
do_nodes = np.arange(0, ict_shape[0]*ict_shape[1])
if len(ict_shape) > 1:
ict = data['intercontacttimes'].reshape(ict_shape[0] * ict_shape[1])
b_coeff = np.zeros(len(ict)) * np.nan
else:
b_coeff = np.zeros(1) * np.nan
ict = [data['intercontacttimes']]
for i in do_nodes:
if isinstance(ict[i], np.ndarray):
mu_ict = np.mean(ict[i])
sigma_ict = np.std(ict[i])
b_coeff[i] = (sigma_ict - mu_ict) / (sigma_ict + mu_ict)
else:
b_coeff[i] = np.nan
if len(ict_shape) > 1:
b_coeff = b_coeff.reshape(ict_shape)
return b_coeff | r"""
Calculates the bursty coefficient.[1][2]
Parameters
----------
data : array, dict
This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*).
A weighted network can be applied if you specify threshold_type and threshold_value which will make it binary.
calc : str
Caclulate the bursty coeff over what. Options include 'edge': calculate B on all ICTs between node i and j. (Default); 'node': caclulate B on all ICTs connected to node i.;
'communities': calculate B for each communities (argument communities then required);
'meanEdgePerNode': first calculate the ICTs between node i and j, then take the mean over all j.
nodes: list or str
Options: 'all': do for all nodes (default) or list of node indexes to calculate.
communities : array, optional
None (default) or Nx1 vector of communities assignment. This returns a "centrality" per communities instead of per node.
threshold_type : str, optional
If input is weighted. Specify binarizing threshold type. See teneto.utils.binarize
threshold_level : str, optional
If input is weighted. Specify binarizing threshold level. See teneto.utils.binarize
threhsold_params : dict
If input is weighted. Dictionawy with kwargs for teneto.utils.binarize
Returns
-------
B : array
Bursty coefficienct per (edge or node measure).
Notes
------
The burstiness coefficent, B, is defined in refs [1,2] as:
.. math:: B = {{\sigma_{ICT} - \mu_{ICT}} \over {\sigma_{ICT} + \mu_{ICT}}}
Where :math:`\sigma_{ICT}` and :math:`\mu_{ICT}` are the standard deviation and mean of the inter-contact times respectively (see teneto.networkmeasures.intercontacttimes)
When B > 0, indicates bursty intercontact times. When B < 0, indicates periodic/tonic intercontact times. When B = 0, indicates random.
Examples
---------
First import all necessary packages
>>> import teneto
>>> import numpy as np
Now create 2 temporal network of 2 nodes and 60 time points. The first has periodict edges, repeating every other time-point:
>>> G_periodic = np.zeros([2, 2, 60])
>>> ts_periodic = np.arange(0, 60, 2)
>>> G_periodic[:,:,ts_periodic] = 1
The second has a more bursty pattern of edges:
>>> ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
>>> G_bursty = np.zeros([2, 2, 60])
>>> G_bursty[:,:,ts_bursty] = 1
The two networks look like this:
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
ts_bursty = [1, 8, 9, 32, 33, 34, 39, 40, 50, 51, 52, 55]
G_bursty = np.zeros([2, 2, 60])
G_bursty[:,:,ts_bursty] = 1
G_periodic = np.zeros([2, 2, 60])
ts_periodic = np.arange(0, 60, 2)
G_periodic[:,:,ts_periodic] = 1
fig,ax = plt.subplots(2, 1, figsize=(10,3))
teneto.plot.slice_plot(G_bursty, ax[0], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
teneto.plot.slice_plot(G_periodic, ax[1], cmap='Pastel2', nodesize=20, nLabs=['0', '1'])
ax[0].set_title('G_bursty')
ax[1].set_title('G_periodic')
ax[0].set_ylim([-0.25,1.25])
ax[1].set_ylim([-0.25,1.25])
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
plt.tight_layout()
fig.show()
Now we call bursty_coeff.
>>> B_periodic = teneto.networkmeasures.bursty_coeff(G_periodic)
>>> B_periodic
array([[nan, -1.],
[-1., nan]])
Above we can see that between node 0 and 1, B=-1 (the diagonal is nan).
Doing the same for the second example:
>>> B_bursty = teneto.networkmeasures.bursty_coeff(G_bursty)
>>> B_bursty
array([[ nan, 0.13311003],
[0.13311003, nan]])
gives a positive value, indicating the inter-contact times between node 0 and 1 is bursty.
References
----------
.. [1] Goh, KI & Barabasi, AL (2008) Burstiness and Memory in Complex Systems. EPL (Europhysics Letters), 81: 4 [`Link <https://arxiv.org/pdf/physics/0610233.pdf>`_]
.. [2] Holme, P & Saramäki J (2012) Temporal networks. Physics Reports. 519: 3. [`Link <https://arxiv.org/pdf/1108.1780.pdf>`_] (Discrete formulation used here) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/bursty_coeff.py#L10-L204 |
ianlini/flatten-dict | flatten_dict/flatten_dict.py | flatten | def flatten(d, reducer='tuple', inverse=False):
"""Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict
"""
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
flat_dict = {}
def _flatten(d, parent=None):
for key, value in six.viewitems(d):
flat_key = reducer(parent, key)
if isinstance(value, Mapping):
_flatten(value, flat_key)
else:
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
_flatten(d)
return flat_dict | python | def flatten(d, reducer='tuple', inverse=False):
"""Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict
"""
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
flat_dict = {}
def _flatten(d, parent=None):
for key, value in six.viewitems(d):
flat_key = reducer(parent, key)
if isinstance(value, Mapping):
_flatten(value, flat_key)
else:
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
_flatten(d)
return flat_dict | Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict | https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L20-L56 |
ianlini/flatten-dict | flatten_dict/flatten_dict.py | nested_set_dict | def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys
Parameters
----------
d: Mapping
keys: Sequence[str]
value: Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value) | python | def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys
Parameters
----------
d: Mapping
keys: Sequence[str]
value: Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value) | Set a value to a sequence of nested keys
Parameters
----------
d: Mapping
keys: Sequence[str]
value: Any | https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L59-L76 |
ianlini/flatten-dict | flatten_dict/flatten_dict.py | unflatten | def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict | python | def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict | Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict | https://github.com/ianlini/flatten-dict/blob/77a2bf669ea6dc7446b8ad1596dc2a41d4c5a7fa/flatten_dict/flatten_dict.py#L79-L108 |
salu133445/pypianoroll | pypianoroll/plot.py | plot_pianoroll | def plot_pianoroll(ax, pianoroll, is_drum=False, beat_resolution=None,
downbeats=None, preset='default', cmap='Blues', xtick='auto',
ytick='octave', xticklabel=True, yticklabel='auto',
tick_loc=None, tick_direction='in', label='both',
grid='both', grid_linestyle=':', grid_linewidth=.5):
"""
Plot a pianoroll given as a numpy array.
Parameters
----------
ax : matplotlib.axes.Axes object
A :class:`matplotlib.axes.Axes` object where the pianoroll will be
plotted on.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
if pianoroll.ndim not in (2, 3):
raise ValueError("`pianoroll` must be a 2D or 3D numpy array")
if pianoroll.shape[1] != 128:
raise ValueError("The length of the second axis of `pianoroll` "
"must be 128.")
if xtick not in ('auto', 'beat', 'step', 'off'):
raise ValueError("`xtick` must be one of {'auto', 'beat', 'step', "
"'none'}.")
if xtick == 'beat' and beat_resolution is None:
raise ValueError("`beat_resolution` must be specified when `xtick` "
"is 'beat'.")
if ytick not in ('octave', 'pitch', 'off'):
raise ValueError("`ytick` must be one of {octave', 'pitch', 'off'}.")
if not isinstance(xticklabel, bool):
raise TypeError("`xticklabel` must be bool.")
if yticklabel not in ('auto', 'name', 'number', 'off'):
raise ValueError("`yticklabel` must be one of {'auto', 'name', "
"'number', 'off'}.")
if tick_direction not in ('in', 'out', 'inout'):
raise ValueError("`tick_direction` must be one of {'in', 'out',"
"'inout'}.")
if label not in ('x', 'y', 'both', 'off'):
raise ValueError("`label` must be one of {'x', 'y', 'both', 'off'}.")
if grid not in ('x', 'y', 'both', 'off'):
raise ValueError("`grid` must be one of {'x', 'y', 'both', 'off'}.")
# plotting
if pianoroll.ndim > 2:
to_plot = pianoroll.transpose(1, 0, 2)
else:
to_plot = pianoroll.T
if (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.floating)):
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=1,
origin='lower', interpolation='none')
elif np.issubdtype(pianoroll.dtype, np.integer):
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=127,
origin='lower', interpolation='none')
else:
raise TypeError("Unsupported data type for `pianoroll`.")
# tick setting
if tick_loc is None:
tick_loc = ('bottom', 'left')
if xtick == 'auto':
xtick = 'beat' if beat_resolution is not None else 'step'
if yticklabel == 'auto':
yticklabel = 'name' if ytick == 'octave' else 'number'
if preset == 'plain':
ax.axis('off')
elif preset == 'frame':
ax.tick_params(direction=tick_direction, bottom=False, top=False,
left=False, right=False, labelbottom=False,
labeltop=False, labelleft=False, labelright=False)
else:
ax.tick_params(direction=tick_direction, bottom=('bottom' in tick_loc),
top=('top' in tick_loc), left=('left' in tick_loc),
right=('right' in tick_loc),
labelbottom=(xticklabel != 'off'),
labelleft=(yticklabel != 'off'),
labeltop=False, labelright=False)
# x-axis
if xtick == 'beat' and preset != 'frame':
num_beat = pianoroll.shape[0]//beat_resolution
xticks_major = beat_resolution * np.arange(0, num_beat)
xticks_minor = beat_resolution * (0.5 + np.arange(0, num_beat))
xtick_labels = np.arange(1, 1 + num_beat)
ax.set_xticks(xticks_major)
ax.set_xticklabels('')
ax.set_xticks(xticks_minor, minor=True)
ax.set_xticklabels(xtick_labels, minor=True)
ax.tick_params(axis='x', which='minor', width=0)
# y-axis
if ytick == 'octave':
ax.set_yticks(np.arange(0, 128, 12))
if yticklabel == 'name':
ax.set_yticklabels(['C{}'.format(i - 2) for i in range(11)])
elif ytick == 'step':
ax.set_yticks(np.arange(0, 128))
if yticklabel == 'name':
if is_drum:
ax.set_yticklabels([pretty_midi.note_number_to_drum_name(i)
for i in range(128)])
else:
ax.set_yticklabels([pretty_midi.note_number_to_name(i)
for i in range(128)])
# axis labels
if label == 'x' or label == 'both':
if xtick == 'step' or not xticklabel:
ax.set_xlabel('time (step)')
else:
ax.set_xlabel('time (beat)')
if label == 'y' or label == 'both':
if is_drum:
ax.set_ylabel('key name')
else:
ax.set_ylabel('pitch')
# grid
if grid != 'off':
ax.grid(axis=grid, color='k', linestyle=grid_linestyle,
linewidth=grid_linewidth)
# downbeat boarder
if downbeats is not None and preset != 'plain':
for step in downbeats:
ax.axvline(x=step, color='k', linewidth=1) | python | def plot_pianoroll(ax, pianoroll, is_drum=False, beat_resolution=None,
downbeats=None, preset='default', cmap='Blues', xtick='auto',
ytick='octave', xticklabel=True, yticklabel='auto',
tick_loc=None, tick_direction='in', label='both',
grid='both', grid_linestyle=':', grid_linewidth=.5):
"""
Plot a pianoroll given as a numpy array.
Parameters
----------
ax : matplotlib.axes.Axes object
A :class:`matplotlib.axes.Axes` object where the pianoroll will be
plotted on.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
if pianoroll.ndim not in (2, 3):
raise ValueError("`pianoroll` must be a 2D or 3D numpy array")
if pianoroll.shape[1] != 128:
raise ValueError("The length of the second axis of `pianoroll` "
"must be 128.")
if xtick not in ('auto', 'beat', 'step', 'off'):
raise ValueError("`xtick` must be one of {'auto', 'beat', 'step', "
"'none'}.")
if xtick == 'beat' and beat_resolution is None:
raise ValueError("`beat_resolution` must be specified when `xtick` "
"is 'beat'.")
if ytick not in ('octave', 'pitch', 'off'):
raise ValueError("`ytick` must be one of {octave', 'pitch', 'off'}.")
if not isinstance(xticklabel, bool):
raise TypeError("`xticklabel` must be bool.")
if yticklabel not in ('auto', 'name', 'number', 'off'):
raise ValueError("`yticklabel` must be one of {'auto', 'name', "
"'number', 'off'}.")
if tick_direction not in ('in', 'out', 'inout'):
raise ValueError("`tick_direction` must be one of {'in', 'out',"
"'inout'}.")
if label not in ('x', 'y', 'both', 'off'):
raise ValueError("`label` must be one of {'x', 'y', 'both', 'off'}.")
if grid not in ('x', 'y', 'both', 'off'):
raise ValueError("`grid` must be one of {'x', 'y', 'both', 'off'}.")
# plotting
if pianoroll.ndim > 2:
to_plot = pianoroll.transpose(1, 0, 2)
else:
to_plot = pianoroll.T
if (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.floating)):
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=1,
origin='lower', interpolation='none')
elif np.issubdtype(pianoroll.dtype, np.integer):
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=0, vmax=127,
origin='lower', interpolation='none')
else:
raise TypeError("Unsupported data type for `pianoroll`.")
# tick setting
if tick_loc is None:
tick_loc = ('bottom', 'left')
if xtick == 'auto':
xtick = 'beat' if beat_resolution is not None else 'step'
if yticklabel == 'auto':
yticklabel = 'name' if ytick == 'octave' else 'number'
if preset == 'plain':
ax.axis('off')
elif preset == 'frame':
ax.tick_params(direction=tick_direction, bottom=False, top=False,
left=False, right=False, labelbottom=False,
labeltop=False, labelleft=False, labelright=False)
else:
ax.tick_params(direction=tick_direction, bottom=('bottom' in tick_loc),
top=('top' in tick_loc), left=('left' in tick_loc),
right=('right' in tick_loc),
labelbottom=(xticklabel != 'off'),
labelleft=(yticklabel != 'off'),
labeltop=False, labelright=False)
# x-axis
if xtick == 'beat' and preset != 'frame':
num_beat = pianoroll.shape[0]//beat_resolution
xticks_major = beat_resolution * np.arange(0, num_beat)
xticks_minor = beat_resolution * (0.5 + np.arange(0, num_beat))
xtick_labels = np.arange(1, 1 + num_beat)
ax.set_xticks(xticks_major)
ax.set_xticklabels('')
ax.set_xticks(xticks_minor, minor=True)
ax.set_xticklabels(xtick_labels, minor=True)
ax.tick_params(axis='x', which='minor', width=0)
# y-axis
if ytick == 'octave':
ax.set_yticks(np.arange(0, 128, 12))
if yticklabel == 'name':
ax.set_yticklabels(['C{}'.format(i - 2) for i in range(11)])
elif ytick == 'step':
ax.set_yticks(np.arange(0, 128))
if yticklabel == 'name':
if is_drum:
ax.set_yticklabels([pretty_midi.note_number_to_drum_name(i)
for i in range(128)])
else:
ax.set_yticklabels([pretty_midi.note_number_to_name(i)
for i in range(128)])
# axis labels
if label == 'x' or label == 'both':
if xtick == 'step' or not xticklabel:
ax.set_xlabel('time (step)')
else:
ax.set_xlabel('time (beat)')
if label == 'y' or label == 'both':
if is_drum:
ax.set_ylabel('key name')
else:
ax.set_ylabel('pitch')
# grid
if grid != 'off':
ax.grid(axis=grid, color='k', linestyle=grid_linestyle,
linewidth=grid_linewidth)
# downbeat boarder
if downbeats is not None and preset != 'plain':
for step in downbeats:
ax.axvline(x=step, color='k', linewidth=1) | Plot a pianoroll given as a numpy array.
Parameters
----------
ax : matplotlib.axes.Axes object
A :class:`matplotlib.axes.Axes` object where the pianoroll will be
plotted on.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/plot.py#L23-L214 |
salu133445/pypianoroll | pypianoroll/plot.py | plot_track | def plot_track(track, filename=None, beat_resolution=None, downbeats=None,
preset='default', cmap='Blues', xtick='auto', ytick='octave',
xticklabel=True, yticklabel='auto', tick_loc=None,
tick_direction='in', label='both', grid='both',
grid_linestyle=':', grid_linewidth=.5):
"""
Plot the pianoroll or save a plot of the pianoroll.
Parameters
----------
filename :
The filename to which the plot is saved. If None, save nothing.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
ax : `matplotlib.axes.Axes` object
A :class:`matplotlib.axes.Axes` object.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
fig, ax = plt.subplots()
plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution,
downbeats, preset=preset, cmap=cmap, xtick=xtick,
ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if filename is not None:
plt.savefig(filename)
return fig, ax | python | def plot_track(track, filename=None, beat_resolution=None, downbeats=None,
preset='default', cmap='Blues', xtick='auto', ytick='octave',
xticklabel=True, yticklabel='auto', tick_loc=None,
tick_direction='in', label='both', grid='both',
grid_linestyle=':', grid_linewidth=.5):
"""
Plot the pianoroll or save a plot of the pianoroll.
Parameters
----------
filename :
The filename to which the plot is saved. If None, save nothing.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
ax : `matplotlib.axes.Axes` object
A :class:`matplotlib.axes.Axes` object.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
fig, ax = plt.subplots()
plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution,
downbeats, preset=preset, cmap=cmap, xtick=xtick,
ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if filename is not None:
plt.savefig(filename)
return fig, ax | Plot the pianoroll or save a plot of the pianoroll.
Parameters
----------
filename :
The filename to which the plot is saved. If None, save nothing.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
ax : `matplotlib.axes.Axes` object
A :class:`matplotlib.axes.Axes` object. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/plot.py#L216-L303 |
salu133445/pypianoroll | pypianoroll/plot.py | plot_multitrack | def plot_multitrack(multitrack, filename=None, mode='separate',
track_label='name', preset='default', cmaps=None,
xtick='auto', ytick='octave', xticklabel=True,
yticklabel='auto', tick_loc=None, tick_direction='in',
label='both', grid='both', grid_linestyle=':',
grid_linewidth=.5):
"""
Plot the pianorolls or save a plot of them.
Parameters
----------
filename : str
The filename to which the plot is saved. If None, save nothing.
mode : {'separate', 'stacked', 'hybrid'}
A string that indicate the plotting mode to use. Defaults to
'separate'.
- In 'separate' mode, all the tracks are plotted separately.
- In 'stacked' mode, a color is assigned based on `cmaps` to the
pianoroll of each track and the pianorolls are stacked and
plotted as a colored image with RGB channels.
- In 'hybrid' mode, the drum tracks are merged into a 'Drums' track,
while the other tracks are merged into an 'Others' track, and the
two merged tracks are then plotted separately.
track_label : {'name', 'program', 'family', 'off'}
A sting that indicates what to use as labels to the track. When
`mode` is 'hybrid', all options other than 'off' will label the two
track with 'Drums' and 'Others'.
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmaps : tuple or list
The `matplotlib.colors.Colormap` instances or colormap codes to use.
- When `mode` is 'separate', each element will be passed to each
call of :func:`matplotlib.pyplot.imshow`. Defaults to ('Blues',
'Oranges', 'Greens', 'Reds', 'Purples', 'Greys').
- When `mode` is stacked, a color is assigned based on `cmaps` to
the pianoroll of each track. Defaults to ('hsv').
- When `mode` is 'hybrid', the first (second) element is used in the
'Drums' ('Others') track. Defaults to ('Blues', 'Greens').
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
axs : list
List of :class:`matplotlib.axes.Axes` object.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
def get_track_label(track_label, track=None):
"""Convenient function to get track labels"""
if track_label == 'name':
return track.name
elif track_label == 'program':
return pretty_midi.program_to_instrument_name(track.program)
elif track_label == 'family':
return pretty_midi.program_to_instrument_class(track.program)
elif track is None:
return track_label
def add_tracklabel(ax, track_label, track=None):
"""Convenient function for adding track labels"""
if not ax.get_ylabel():
return
ax.set_ylabel(get_track_label(track_label, track) + '\n\n'
+ ax.get_ylabel())
multitrack.check_validity()
if not multitrack.tracks:
raise ValueError("There is no track to plot.")
if mode not in ('separate', 'stacked', 'hybrid'):
raise ValueError("`mode` must be one of {'separate', 'stacked', "
"'hybrid'}.")
if track_label not in ('name', 'program', 'family', 'off'):
raise ValueError("`track_label` must be one of {'name', 'program', "
"'family'}.")
if cmaps is None:
if mode == 'separate':
cmaps = ('Blues', 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys')
elif mode == 'stacked':
cmaps = ('hsv')
else:
cmaps = ('Blues', 'Greens')
num_track = len(multitrack.tracks)
downbeats = multitrack.get_downbeat_steps()
if mode == 'separate':
if num_track > 1:
fig, axs = plt.subplots(num_track, sharex=True)
else:
fig, ax = plt.subplots()
axs = [ax]
for idx, track in enumerate(multitrack.tracks):
now_xticklabel = xticklabel if idx < num_track else False
plot_pianoroll(axs[idx], track.pianoroll, False,
multitrack.beat_resolution, downbeats, preset=preset,
cmap=cmaps[idx%len(cmaps)], xtick=xtick, ytick=ytick,
xticklabel=now_xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid,
grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if track_label != 'none':
add_tracklabel(axs[idx], track_label, track)
if num_track > 1:
fig.subplots_adjust(hspace=0)
if filename is not None:
plt.savefig(filename)
return (fig, axs)
elif mode == 'stacked':
is_all_drum = True
for track in multitrack.tracks:
if not track.is_drum:
is_all_drum = False
fig, ax = plt.subplots()
stacked = multitrack.get_stacked_pianorolls()
colormap = matplotlib.cm.get_cmap(cmaps[0])
cmatrix = colormap(np.arange(0, 1, 1 / num_track))[:, :3]
recolored = np.matmul(stacked.reshape(-1, num_track), cmatrix)
stacked = recolored.reshape(stacked.shape[:2] + (3, ))
plot_pianoroll(ax, stacked, is_all_drum, multitrack.beat_resolution,
downbeats, preset=preset, xtick=xtick, ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if track_label != 'none':
patches = [Patch(color=cmatrix[idx],
label=get_track_label(track_label, track))
for idx, track in enumerate(multitrack.tracks)]
plt.legend(handles=patches)
if filename is not None:
plt.savefig(filename)
return (fig, [ax])
elif mode == 'hybrid':
drums = [i for i, track in enumerate(multitrack.tracks)
if track.is_drum]
others = [i for i in range(len(multitrack.tracks)) if i not in drums]
merged_drums = multitrack.get_merged_pianoroll(drums)
merged_others = multitrack.get_merged_pianoroll(others)
fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
plot_pianoroll(ax1, merged_drums, True, multitrack.beat_resolution,
downbeats, preset=preset, cmap=cmaps[0], xtick=xtick,
ytick=ytick, xticklabel=xticklabel,
yticklabel=yticklabel, tick_loc=tick_loc,
tick_direction=tick_direction, label=label, grid=grid,
grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
plot_pianoroll(ax2, merged_others, False, multitrack.beat_resolution,
downbeats, preset=preset, cmap=cmaps[1], ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
fig.subplots_adjust(hspace=0)
if track_label != 'none':
add_tracklabel(ax1, 'Drums')
add_tracklabel(ax2, 'Others')
if filename is not None:
plt.savefig(filename)
return (fig, [ax1, ax2]) | python | def plot_multitrack(multitrack, filename=None, mode='separate',
track_label='name', preset='default', cmaps=None,
xtick='auto', ytick='octave', xticklabel=True,
yticklabel='auto', tick_loc=None, tick_direction='in',
label='both', grid='both', grid_linestyle=':',
grid_linewidth=.5):
"""
Plot the pianorolls or save a plot of them.
Parameters
----------
filename : str
The filename to which the plot is saved. If None, save nothing.
mode : {'separate', 'stacked', 'hybrid'}
A string that indicate the plotting mode to use. Defaults to
'separate'.
- In 'separate' mode, all the tracks are plotted separately.
- In 'stacked' mode, a color is assigned based on `cmaps` to the
pianoroll of each track and the pianorolls are stacked and
plotted as a colored image with RGB channels.
- In 'hybrid' mode, the drum tracks are merged into a 'Drums' track,
while the other tracks are merged into an 'Others' track, and the
two merged tracks are then plotted separately.
track_label : {'name', 'program', 'family', 'off'}
A sting that indicates what to use as labels to the track. When
`mode` is 'hybrid', all options other than 'off' will label the two
track with 'Drums' and 'Others'.
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmaps : tuple or list
The `matplotlib.colors.Colormap` instances or colormap codes to use.
- When `mode` is 'separate', each element will be passed to each
call of :func:`matplotlib.pyplot.imshow`. Defaults to ('Blues',
'Oranges', 'Greens', 'Reds', 'Purples', 'Greys').
- When `mode` is stacked, a color is assigned based on `cmaps` to
the pianoroll of each track. Defaults to ('hsv').
- When `mode` is 'hybrid', the first (second) element is used in the
'Drums' ('Others') track. Defaults to ('Blues', 'Greens').
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
axs : list
List of :class:`matplotlib.axes.Axes` object.
"""
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
def get_track_label(track_label, track=None):
"""Convenient function to get track labels"""
if track_label == 'name':
return track.name
elif track_label == 'program':
return pretty_midi.program_to_instrument_name(track.program)
elif track_label == 'family':
return pretty_midi.program_to_instrument_class(track.program)
elif track is None:
return track_label
def add_tracklabel(ax, track_label, track=None):
"""Convenient function for adding track labels"""
if not ax.get_ylabel():
return
ax.set_ylabel(get_track_label(track_label, track) + '\n\n'
+ ax.get_ylabel())
multitrack.check_validity()
if not multitrack.tracks:
raise ValueError("There is no track to plot.")
if mode not in ('separate', 'stacked', 'hybrid'):
raise ValueError("`mode` must be one of {'separate', 'stacked', "
"'hybrid'}.")
if track_label not in ('name', 'program', 'family', 'off'):
raise ValueError("`track_label` must be one of {'name', 'program', "
"'family'}.")
if cmaps is None:
if mode == 'separate':
cmaps = ('Blues', 'Oranges', 'Greens', 'Reds', 'Purples', 'Greys')
elif mode == 'stacked':
cmaps = ('hsv')
else:
cmaps = ('Blues', 'Greens')
num_track = len(multitrack.tracks)
downbeats = multitrack.get_downbeat_steps()
if mode == 'separate':
if num_track > 1:
fig, axs = plt.subplots(num_track, sharex=True)
else:
fig, ax = plt.subplots()
axs = [ax]
for idx, track in enumerate(multitrack.tracks):
now_xticklabel = xticklabel if idx < num_track else False
plot_pianoroll(axs[idx], track.pianoroll, False,
multitrack.beat_resolution, downbeats, preset=preset,
cmap=cmaps[idx%len(cmaps)], xtick=xtick, ytick=ytick,
xticklabel=now_xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid,
grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if track_label != 'none':
add_tracklabel(axs[idx], track_label, track)
if num_track > 1:
fig.subplots_adjust(hspace=0)
if filename is not None:
plt.savefig(filename)
return (fig, axs)
elif mode == 'stacked':
is_all_drum = True
for track in multitrack.tracks:
if not track.is_drum:
is_all_drum = False
fig, ax = plt.subplots()
stacked = multitrack.get_stacked_pianorolls()
colormap = matplotlib.cm.get_cmap(cmaps[0])
cmatrix = colormap(np.arange(0, 1, 1 / num_track))[:, :3]
recolored = np.matmul(stacked.reshape(-1, num_track), cmatrix)
stacked = recolored.reshape(stacked.shape[:2] + (3, ))
plot_pianoroll(ax, stacked, is_all_drum, multitrack.beat_resolution,
downbeats, preset=preset, xtick=xtick, ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if track_label != 'none':
patches = [Patch(color=cmatrix[idx],
label=get_track_label(track_label, track))
for idx, track in enumerate(multitrack.tracks)]
plt.legend(handles=patches)
if filename is not None:
plt.savefig(filename)
return (fig, [ax])
elif mode == 'hybrid':
drums = [i for i, track in enumerate(multitrack.tracks)
if track.is_drum]
others = [i for i in range(len(multitrack.tracks)) if i not in drums]
merged_drums = multitrack.get_merged_pianoroll(drums)
merged_others = multitrack.get_merged_pianoroll(others)
fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
plot_pianoroll(ax1, merged_drums, True, multitrack.beat_resolution,
downbeats, preset=preset, cmap=cmaps[0], xtick=xtick,
ytick=ytick, xticklabel=xticklabel,
yticklabel=yticklabel, tick_loc=tick_loc,
tick_direction=tick_direction, label=label, grid=grid,
grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
plot_pianoroll(ax2, merged_others, False, multitrack.beat_resolution,
downbeats, preset=preset, cmap=cmaps[1], ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
fig.subplots_adjust(hspace=0)
if track_label != 'none':
add_tracklabel(ax1, 'Drums')
add_tracklabel(ax2, 'Others')
if filename is not None:
plt.savefig(filename)
return (fig, [ax1, ax2]) | Plot the pianorolls or save a plot of them.
Parameters
----------
filename : str
The filename to which the plot is saved. If None, save nothing.
mode : {'separate', 'stacked', 'hybrid'}
A string that indicate the plotting mode to use. Defaults to
'separate'.
- In 'separate' mode, all the tracks are plotted separately.
- In 'stacked' mode, a color is assigned based on `cmaps` to the
pianoroll of each track and the pianorolls are stacked and
plotted as a colored image with RGB channels.
- In 'hybrid' mode, the drum tracks are merged into a 'Drums' track,
while the other tracks are merged into an 'Others' track, and the
two merged tracks are then plotted separately.
track_label : {'name', 'program', 'family', 'off'}
A sting that indicates what to use as labels to the track. When
`mode` is 'hybrid', all options other than 'off' will label the two
track with 'Drums' and 'Others'.
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmaps : tuple or list
The `matplotlib.colors.Colormap` instances or colormap codes to use.
- When `mode` is 'separate', each element will be passed to each
call of :func:`matplotlib.pyplot.imshow`. Defaults to ('Blues',
'Oranges', 'Greens', 'Reds', 'Purples', 'Greys').
- When `mode` is stacked, a color is assigned based on `cmaps` to
the pianoroll of each track. Defaults to ('hsv').
- When `mode` is 'hybrid', the first (second) element is used in the
'Drums' ('Others') track. Defaults to ('Blues', 'Greens').
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
axs : list
List of :class:`matplotlib.axes.Axes` object. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/plot.py#L305-L528 |
salu133445/pypianoroll | pypianoroll/plot.py | save_animation | def save_animation(filename, pianoroll, window, hop=1, fps=None, is_drum=False,
beat_resolution=None, downbeats=None, preset='default',
cmap='Blues', xtick='auto', ytick='octave', xticklabel=True,
yticklabel='auto', tick_loc=None, tick_direction='in',
label='both', grid='both', grid_linestyle=':',
grid_linewidth=.5, **kwargs):
"""
Save a pianoroll to an animation in video or GIF format.
Parameters
----------
filename : str
The filename to which the animation is saved.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
window : int
The window size to be applied to `pianoroll` for the animation.
hop : int
The hop size to be applied to `pianoroll` for the animation.
fps : int
The number of frames per second in the resulting video or GIF file.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if not HAS_MOVIEPY:
raise ImportError("moviepy package is required for animation supports.")
def make_frame(t):
"""Return an image of the frame for time t."""
fig = plt.gcf()
ax = plt.gca()
f_idx = int(t * fps)
start = hop * f_idx
end = start + window
to_plot = transposed[:, start:end]
extent = (start, end - 1, 0, 127)
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax,
origin='lower', interpolation='none', extent=extent)
if xtick == 'beat':
next_major_idx = beat_resolution - start % beat_resolution
if start % beat_resolution < beat_resolution//2:
next_minor_idx = beat_resolution//2 - start % beat_resolution
else:
next_minor_idx = (beat_resolution//2 - start % beat_resolution
+ beat_resolution)
xticks_major = np.arange(next_major_idx, window, beat_resolution)
xticks_minor = np.arange(next_minor_idx, window, beat_resolution)
if end % beat_resolution < beat_resolution//2:
last_minor_idx = beat_resolution//2 - end % beat_resolution
else:
last_minor_idx = (beat_resolution//2 - end % beat_resolution
+ beat_resolution)
xtick_labels = np.arange((start + next_minor_idx)//beat_resolution,
(end + last_minor_idx)//beat_resolution)
ax.set_xticks(xticks_major)
ax.set_xticklabels('')
ax.set_xticks(xticks_minor, minor=True)
ax.set_xticklabels(xtick_labels, minor=True)
ax.tick_params(axis='x', which='minor', width=0)
return mplfig_to_npimage(fig)
if xtick == 'auto':
xtick = 'beat' if beat_resolution is not None else 'step'
fig, ax = plt.subplots()
plot_pianoroll(ax, pianoroll[:window], is_drum, beat_resolution, downbeats,
preset=preset, cmap=cmap, xtick=xtick, ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
num_frame = int((pianoroll.shape[0] - window) / hop)
duration = int(num_frame / fps)
if (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.floating)):
vmax = 1
elif np.issubdtype(pianoroll.dtype, np.integer):
vmax = 127
else:
raise TypeError("Unsupported data type for `pianoroll`.")
vmin = 0
transposed = pianoroll.T
animation = VideoClip(make_frame, duration=duration)
if filename.endswith('.gif'):
animation.write_gif(filename, fps, **kwargs)
else:
animation.write_videofile(filename, fps, **kwargs)
plt.close() | python | def save_animation(filename, pianoroll, window, hop=1, fps=None, is_drum=False,
beat_resolution=None, downbeats=None, preset='default',
cmap='Blues', xtick='auto', ytick='octave', xticklabel=True,
yticklabel='auto', tick_loc=None, tick_direction='in',
label='both', grid='both', grid_linestyle=':',
grid_linewidth=.5, **kwargs):
"""
Save a pianoroll to an animation in video or GIF format.
Parameters
----------
filename : str
The filename to which the animation is saved.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
window : int
The window size to be applied to `pianoroll` for the animation.
hop : int
The hop size to be applied to `pianoroll` for the animation.
fps : int
The number of frames per second in the resulting video or GIF file.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
"""
if not HAS_MOVIEPY:
raise ImportError("moviepy package is required for animation supports.")
def make_frame(t):
"""Return an image of the frame for time t."""
fig = plt.gcf()
ax = plt.gca()
f_idx = int(t * fps)
start = hop * f_idx
end = start + window
to_plot = transposed[:, start:end]
extent = (start, end - 1, 0, 127)
ax.imshow(to_plot, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax,
origin='lower', interpolation='none', extent=extent)
if xtick == 'beat':
next_major_idx = beat_resolution - start % beat_resolution
if start % beat_resolution < beat_resolution//2:
next_minor_idx = beat_resolution//2 - start % beat_resolution
else:
next_minor_idx = (beat_resolution//2 - start % beat_resolution
+ beat_resolution)
xticks_major = np.arange(next_major_idx, window, beat_resolution)
xticks_minor = np.arange(next_minor_idx, window, beat_resolution)
if end % beat_resolution < beat_resolution//2:
last_minor_idx = beat_resolution//2 - end % beat_resolution
else:
last_minor_idx = (beat_resolution//2 - end % beat_resolution
+ beat_resolution)
xtick_labels = np.arange((start + next_minor_idx)//beat_resolution,
(end + last_minor_idx)//beat_resolution)
ax.set_xticks(xticks_major)
ax.set_xticklabels('')
ax.set_xticks(xticks_minor, minor=True)
ax.set_xticklabels(xtick_labels, minor=True)
ax.tick_params(axis='x', which='minor', width=0)
return mplfig_to_npimage(fig)
if xtick == 'auto':
xtick = 'beat' if beat_resolution is not None else 'step'
fig, ax = plt.subplots()
plot_pianoroll(ax, pianoroll[:window], is_drum, beat_resolution, downbeats,
preset=preset, cmap=cmap, xtick=xtick, ytick=ytick,
xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
num_frame = int((pianoroll.shape[0] - window) / hop)
duration = int(num_frame / fps)
if (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.floating)):
vmax = 1
elif np.issubdtype(pianoroll.dtype, np.integer):
vmax = 127
else:
raise TypeError("Unsupported data type for `pianoroll`.")
vmin = 0
transposed = pianoroll.T
animation = VideoClip(make_frame, duration=duration)
if filename.endswith('.gif'):
animation.write_gif(filename, fps, **kwargs)
else:
animation.write_videofile(filename, fps, **kwargs)
plt.close() | Save a pianoroll to an animation in video or GIF format.
Parameters
----------
filename : str
The filename to which the animation is saved.
pianoroll : np.ndarray
A pianoroll to be plotted. The values should be in [0, 1] when data
type is float, and in [0, 127] when data type is integer.
- For a 2D array, shape=(num_time_step, num_pitch).
- For a 3D array, shape=(num_time_step, num_pitch, num_channel),
where channels can be either RGB or RGBA.
window : int
The window size to be applied to `pianoroll` for the animation.
hop : int
The hop size to be applied to `pianoroll` for the animation.
fps : int
The number of frames per second in the resulting video or GIF file.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/plot.py#L530-L681 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.append_track | def append_track(self, track=None, pianoroll=None, program=0, is_drum=False,
name='unknown'):
"""
Append a multitrack.Track instance to the track list or create a new
multitrack.Track object and append it to the track list.
Parameters
----------
track : pianoroll.Track
A :class:`pypianoroll.Track` instance to be appended to the track
list.
pianoroll : np.ndarray, shape=(n_time_steps, 128)
A pianoroll matrix. The first and second dimension represents time
and pitch, respectively. Available datatypes are bool, int and
float. Only effective when `track` is None.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
Only effective when `track` is None.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False. Only effective when `track` is None.
name : str
The name of the track. Defaults to 'unknown'. Only effective when
`track` is None.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if track is not None:
if not isinstance(track, Track):
raise TypeError("`track` must be a pypianoroll.Track instance.")
track.check_validity()
else:
track = Track(pianoroll, program, is_drum, name)
self.tracks.append(track) | python | def append_track(self, track=None, pianoroll=None, program=0, is_drum=False,
name='unknown'):
"""
Append a multitrack.Track instance to the track list or create a new
multitrack.Track object and append it to the track list.
Parameters
----------
track : pianoroll.Track
A :class:`pypianoroll.Track` instance to be appended to the track
list.
pianoroll : np.ndarray, shape=(n_time_steps, 128)
A pianoroll matrix. The first and second dimension represents time
and pitch, respectively. Available datatypes are bool, int and
float. Only effective when `track` is None.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
Only effective when `track` is None.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False. Only effective when `track` is None.
name : str
The name of the track. Defaults to 'unknown'. Only effective when
`track` is None.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if track is not None:
if not isinstance(track, Track):
raise TypeError("`track` must be a pypianoroll.Track instance.")
track.check_validity()
else:
track = Track(pianoroll, program, is_drum, name)
self.tracks.append(track) | Append a multitrack.Track instance to the track list or create a new
multitrack.Track object and append it to the track list.
Parameters
----------
track : pianoroll.Track
A :class:`pypianoroll.Track` instance to be appended to the track
list.
pianoroll : np.ndarray, shape=(n_time_steps, 128)
A pianoroll matrix. The first and second dimension represents time
and pitch, respectively. Available datatypes are bool, int and
float. Only effective when `track` is None.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
Only effective when `track` is None.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False. Only effective when `track` is None.
name : str
The name of the track. Defaults to 'unknown'. Only effective when
`track` is None.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L143-L180 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.check_validity | def check_validity(self):
"""
Raise an error if any invalid attribute found.
Raises
------
TypeError
If an attribute has an invalid type.
ValueError
If an attribute has an invalid value (of the correct type).
"""
# tracks
for track in self.tracks:
if not isinstance(track, Track):
raise TypeError("`tracks` must be a list of "
"`pypianoroll.Track` instances.")
track.check_validity()
# tempo
if not isinstance(self.tempo, np.ndarray):
raise TypeError("`tempo` must be int or a numpy array.")
elif not np.issubdtype(self.tempo.dtype, np.number):
raise TypeError("Data type of `tempo` must be a subdtype of "
"np.number.")
elif self.tempo.ndim != 1:
raise ValueError("`tempo` must be a 1D numpy array.")
if np.any(self.tempo <= 0.0):
raise ValueError("`tempo` should contain only positive numbers.")
# downbeat
if self.downbeat is not None:
if not isinstance(self.downbeat, np.ndarray):
raise TypeError("`downbeat` must be a numpy array.")
if not np.issubdtype(self.downbeat.dtype, np.bool_):
raise TypeError("Data type of `downbeat` must be bool.")
if self.downbeat.ndim != 1:
raise ValueError("`downbeat` must be a 1D numpy array.")
# beat_resolution
if not isinstance(self.beat_resolution, int):
raise TypeError("`beat_resolution` must be int.")
if self.beat_resolution < 1:
raise ValueError("`beat_resolution` must be a positive integer.")
# name
if not isinstance(self.name, string_types):
raise TypeError("`name` must be a string.") | python | def check_validity(self):
"""
Raise an error if any invalid attribute found.
Raises
------
TypeError
If an attribute has an invalid type.
ValueError
If an attribute has an invalid value (of the correct type).
"""
# tracks
for track in self.tracks:
if not isinstance(track, Track):
raise TypeError("`tracks` must be a list of "
"`pypianoroll.Track` instances.")
track.check_validity()
# tempo
if not isinstance(self.tempo, np.ndarray):
raise TypeError("`tempo` must be int or a numpy array.")
elif not np.issubdtype(self.tempo.dtype, np.number):
raise TypeError("Data type of `tempo` must be a subdtype of "
"np.number.")
elif self.tempo.ndim != 1:
raise ValueError("`tempo` must be a 1D numpy array.")
if np.any(self.tempo <= 0.0):
raise ValueError("`tempo` should contain only positive numbers.")
# downbeat
if self.downbeat is not None:
if not isinstance(self.downbeat, np.ndarray):
raise TypeError("`downbeat` must be a numpy array.")
if not np.issubdtype(self.downbeat.dtype, np.bool_):
raise TypeError("Data type of `downbeat` must be bool.")
if self.downbeat.ndim != 1:
raise ValueError("`downbeat` must be a 1D numpy array.")
# beat_resolution
if not isinstance(self.beat_resolution, int):
raise TypeError("`beat_resolution` must be int.")
if self.beat_resolution < 1:
raise ValueError("`beat_resolution` must be a positive integer.")
# name
if not isinstance(self.name, string_types):
raise TypeError("`name` must be a string.") | Raise an error if any invalid attribute found.
Raises
------
TypeError
If an attribute has an invalid type.
ValueError
If an attribute has an invalid value (of the correct type). | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L210-L253 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.clip | def clip(self, lower=0, upper=127):
"""
Clip the pianorolls of all tracks by the given lower and upper bounds.
Parameters
----------
lower : int or float
The lower bound to clip the pianorolls. Defaults to 0.
upper : int or float
The upper bound to clip the pianorolls. Defaults to 127.
"""
for track in self.tracks:
track.clip(lower, upper) | python | def clip(self, lower=0, upper=127):
"""
Clip the pianorolls of all tracks by the given lower and upper bounds.
Parameters
----------
lower : int or float
The lower bound to clip the pianorolls. Defaults to 0.
upper : int or float
The upper bound to clip the pianorolls. Defaults to 127.
"""
for track in self.tracks:
track.clip(lower, upper) | Clip the pianorolls of all tracks by the given lower and upper bounds.
Parameters
----------
lower : int or float
The lower bound to clip the pianorolls. Defaults to 0.
upper : int or float
The upper bound to clip the pianorolls. Defaults to 127. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L255-L268 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_active_length | def get_active_length(self):
"""
Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step.
"""
active_length = 0
for track in self.tracks:
now_length = track.get_active_length()
if active_length < track.get_active_length():
active_length = now_length
return active_length | python | def get_active_length(self):
"""
Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step.
"""
active_length = 0
for track in self.tracks:
now_length = track.get_active_length()
if active_length < track.get_active_length():
active_length = now_length
return active_length | Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L282-L299 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_active_pitch_range | def get_active_pitch_range(self):
"""
Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks.
"""
lowest, highest = self.tracks[0].get_active_pitch_range()
if len(self.tracks) > 1:
for track in self.tracks[1:]:
low, high = track.get_active_pitch_range()
if low < lowest:
lowest = low
if high > highest:
highest = high
return lowest, highest | python | def get_active_pitch_range(self):
"""
Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks.
"""
lowest, highest = self.tracks[0].get_active_pitch_range()
if len(self.tracks) > 1:
for track in self.tracks[1:]:
low, high = track.get_active_pitch_range()
if low < lowest:
lowest = low
if high > highest:
highest = high
return lowest, highest | Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L301-L322 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_downbeat_steps | def get_downbeat_steps(self):
"""
Return the indices of time steps that contain downbeats.
Returns
-------
downbeat_steps : list
The indices of time steps that contain downbeats.
"""
if self.downbeat is None:
return []
downbeat_steps = np.nonzero(self.downbeat)[0].tolist()
return downbeat_steps | python | def get_downbeat_steps(self):
"""
Return the indices of time steps that contain downbeats.
Returns
-------
downbeat_steps : list
The indices of time steps that contain downbeats.
"""
if self.downbeat is None:
return []
downbeat_steps = np.nonzero(self.downbeat)[0].tolist()
return downbeat_steps | Return the indices of time steps that contain downbeats.
Returns
-------
downbeat_steps : list
The indices of time steps that contain downbeats. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L324-L337 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_empty_tracks | def get_empty_tracks(self):
"""
Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls.
"""
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | python | def get_empty_tracks(self):
"""
Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls.
"""
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L339-L351 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_max_length | def get_max_length(self):
"""
Return the maximum length of the pianorolls along the time axis (in
time step).
Returns
-------
max_length : int
The maximum length of the pianorolls along the time axis (in time
step).
"""
max_length = 0
for track in self.tracks:
if max_length < track.pianoroll.shape[0]:
max_length = track.pianoroll.shape[0]
return max_length | python | def get_max_length(self):
"""
Return the maximum length of the pianorolls along the time axis (in
time step).
Returns
-------
max_length : int
The maximum length of the pianorolls along the time axis (in time
step).
"""
max_length = 0
for track in self.tracks:
if max_length < track.pianoroll.shape[0]:
max_length = track.pianoroll.shape[0]
return max_length | Return the maximum length of the pianorolls along the time axis (in
time step).
Returns
-------
max_length : int
The maximum length of the pianorolls along the time axis (in time
step). | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L353-L369 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_merged_pianoroll | def get_merged_pianoroll(self, mode='sum'):
"""
Return the merged pianoroll.
Parameters
----------
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of all the
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among all the
pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the pianorolls has nonzero value at that pixel;
False if all pianorolls are inactive (zero-valued) at that pixel.
Returns
-------
merged : np.ndarray, shape=(n_time_steps, 128)
The merged pianoroll.
"""
stacked = self.get_stacked_pianoroll()
if mode == 'any':
merged = np.any(stacked, axis=2)
elif mode == 'sum':
merged = np.sum(stacked, axis=2)
elif mode == 'max':
merged = np.max(stacked, axis=2)
else:
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
return merged | python | def get_merged_pianoroll(self, mode='sum'):
"""
Return the merged pianoroll.
Parameters
----------
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of all the
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among all the
pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the pianorolls has nonzero value at that pixel;
False if all pianorolls are inactive (zero-valued) at that pixel.
Returns
-------
merged : np.ndarray, shape=(n_time_steps, 128)
The merged pianoroll.
"""
stacked = self.get_stacked_pianoroll()
if mode == 'any':
merged = np.any(stacked, axis=2)
elif mode == 'sum':
merged = np.sum(stacked, axis=2)
elif mode == 'max':
merged = np.max(stacked, axis=2)
else:
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
return merged | Return the merged pianoroll.
Parameters
----------
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of all the
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among all the
pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the pianorolls has nonzero value at that pixel;
False if all pianorolls are inactive (zero-valued) at that pixel.
Returns
-------
merged : np.ndarray, shape=(n_time_steps, 128)
The merged pianoroll. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L371-L407 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.get_stacked_pianoroll | def get_stacked_pianoroll(self):
"""
Return a stacked multitrack pianoroll. The shape of the return array is
(n_time_steps, 128, n_tracks).
Returns
-------
stacked : np.ndarray, shape=(n_time_steps, 128, n_tracks)
The stacked pianoroll.
"""
multitrack = deepcopy(self)
multitrack.pad_to_same()
stacked = np.stack([track.pianoroll for track in multitrack.tracks], -1)
return stacked | python | def get_stacked_pianoroll(self):
"""
Return a stacked multitrack pianoroll. The shape of the return array is
(n_time_steps, 128, n_tracks).
Returns
-------
stacked : np.ndarray, shape=(n_time_steps, 128, n_tracks)
The stacked pianoroll.
"""
multitrack = deepcopy(self)
multitrack.pad_to_same()
stacked = np.stack([track.pianoroll for track in multitrack.tracks], -1)
return stacked | Return a stacked multitrack pianoroll. The shape of the return array is
(n_time_steps, 128, n_tracks).
Returns
-------
stacked : np.ndarray, shape=(n_time_steps, 128, n_tracks)
The stacked pianoroll. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L414-L428 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.load | def load(self, filename):
"""
Load a npz file. Supports only files previously saved by
:meth:`pypianoroll.Multitrack.save`.
Notes
-----
Attribute values will all be overwritten.
Parameters
----------
filename : str
The name of the npz file to be loaded.
"""
def reconstruct_sparse(target_dict, name):
"""Return a reconstructed instance of `scipy.sparse.csc_matrix`."""
return csc_matrix((target_dict[name+'_csc_data'],
target_dict[name+'_csc_indices'],
target_dict[name+'_csc_indptr']),
shape=target_dict[name+'_csc_shape']).toarray()
with np.load(filename) as loaded:
if 'info.json' not in loaded:
raise ValueError("Cannot find 'info.json' in the npz file.")
info_dict = json.loads(loaded['info.json'].decode('utf-8'))
self.name = info_dict['name']
self.beat_resolution = info_dict['beat_resolution']
self.tempo = loaded['tempo']
if 'downbeat' in loaded.files:
self.downbeat = loaded['downbeat']
else:
self.downbeat = None
idx = 0
self.tracks = []
while str(idx) in info_dict:
pianoroll = reconstruct_sparse(
loaded, 'pianoroll_{}'.format(idx))
track = Track(pianoroll, info_dict[str(idx)]['program'],
info_dict[str(idx)]['is_drum'],
info_dict[str(idx)]['name'])
self.tracks.append(track)
idx += 1
self.check_validity() | python | def load(self, filename):
"""
Load a npz file. Supports only files previously saved by
:meth:`pypianoroll.Multitrack.save`.
Notes
-----
Attribute values will all be overwritten.
Parameters
----------
filename : str
The name of the npz file to be loaded.
"""
def reconstruct_sparse(target_dict, name):
"""Return a reconstructed instance of `scipy.sparse.csc_matrix`."""
return csc_matrix((target_dict[name+'_csc_data'],
target_dict[name+'_csc_indices'],
target_dict[name+'_csc_indptr']),
shape=target_dict[name+'_csc_shape']).toarray()
with np.load(filename) as loaded:
if 'info.json' not in loaded:
raise ValueError("Cannot find 'info.json' in the npz file.")
info_dict = json.loads(loaded['info.json'].decode('utf-8'))
self.name = info_dict['name']
self.beat_resolution = info_dict['beat_resolution']
self.tempo = loaded['tempo']
if 'downbeat' in loaded.files:
self.downbeat = loaded['downbeat']
else:
self.downbeat = None
idx = 0
self.tracks = []
while str(idx) in info_dict:
pianoroll = reconstruct_sparse(
loaded, 'pianoroll_{}'.format(idx))
track = Track(pianoroll, info_dict[str(idx)]['program'],
info_dict[str(idx)]['is_drum'],
info_dict[str(idx)]['name'])
self.tracks.append(track)
idx += 1
self.check_validity() | Load a npz file. Supports only files previously saved by
:meth:`pypianoroll.Multitrack.save`.
Notes
-----
Attribute values will all be overwritten.
Parameters
----------
filename : str
The name of the npz file to be loaded. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L438-L484 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.merge_tracks | def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False):
"""
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) | python | def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False):
"""
Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set
"""
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) | Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L486-L538 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.pad_to_same | def pad_to_same(self):
"""Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks."""
max_length = self.get_max_length()
for track in self.tracks:
if track.pianoroll.shape[0] < max_length:
track.pad(max_length - track.pianoroll.shape[0]) | python | def pad_to_same(self):
"""Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks."""
max_length = self.get_max_length()
for track in self.tracks:
if track.pianoroll.shape[0] < max_length:
track.pad(max_length - track.pianoroll.shape[0]) | Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L581-L588 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.parse_midi | def parse_midi(self, filename, **kwargs):
"""
Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation.
"""
pm = pretty_midi.PrettyMIDI(filename)
self.parse_pretty_midi(pm, **kwargs) | python | def parse_midi(self, filename, **kwargs):
"""
Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation.
"""
pm = pretty_midi.PrettyMIDI(filename)
self.parse_pretty_midi(pm, **kwargs) | Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L590-L604 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.parse_pretty_midi | def parse_pretty_midi(self, pm, mode='max', algorithm='normal',
binarized=False, skip_empty_tracks=True,
collect_onsets_only=False, threshold=0,
first_beat_time=None):
"""
Parse a :class:`pretty_midi.PrettyMIDI` object. The data type of the
resulting pianorolls is automatically determined (int if 'mode' is
'sum', np.uint8 if `mode` is 'max' and `binarized` is False, bool if
`mode` is 'max' and `binarized` is True).
Parameters
----------
pm : `pretty_midi.PrettyMIDI` object
A :class:`pretty_midi.PrettyMIDI` object to be parsed.
mode : {'max', 'sum'}
A string that indicates the merging strategy to apply to duplicate
notes. Default to 'max'.
algorithm : {'normal', 'strict', 'custom'}
A string that indicates the method used to get the location of the
first beat. Notes before it will be dropped unless an incomplete
beat before it is found (see Notes for more information). Defaults
to 'normal'.
- The 'normal' algorithm estimates the location of the first beat by
:meth:`pretty_midi.PrettyMIDI.estimate_beat_start`.
- The 'strict' algorithm sets the first beat at the event time of
the first time signature change. Raise a ValueError if no time
signature change event is found.
- The 'custom' algorithm takes argument `first_beat_time` as the
location of the first beat.
binarized : bool
True to binarize the parsed pianorolls before merging duplicate
notes. False to use the original parsed pianorolls. Defaults to
False.
skip_empty_tracks : bool
True to remove tracks with empty pianorolls and compress the pitch
range of the parsed pianorolls. False to retain the empty tracks
and use the original parsed pianorolls. Deafault to True.
collect_onsets_only : bool
True to collect only the onset of the notes (i.e. note on events) in
all tracks, where the note off and duration information are dropped.
False to parse regular pianorolls.
threshold : int or float
A threshold used to binarize the parsed pianorolls. Only effective
when `binarized` is True. Defaults to zero.
first_beat_time : float
The location (in sec) of the first beat. Required and only effective
when using 'custom' algorithm.
Notes
-----
If an incomplete beat before the first beat is found, an additional beat
will be added before the (estimated) beat starting time. However, notes
before the (estimated) beat starting time for more than one beat are
dropped.
"""
if mode not in ('max', 'sum'):
raise ValueError("`mode` must be one of {'max', 'sum'}.")
if algorithm not in ('strict', 'normal', 'custom'):
raise ValueError("`algorithm` must be one of {'normal', 'strict', "
" 'custom'}.")
if algorithm == 'custom':
if not isinstance(first_beat_time, (int, float)):
raise TypeError("`first_beat_time` must be int or float when "
"using 'custom' algorithm.")
if first_beat_time < 0.0:
raise ValueError("`first_beat_time` must be a positive number "
"when using 'custom' algorithm.")
# Set first_beat_time for 'normal' and 'strict' modes
if algorithm == 'normal':
if pm.time_signature_changes:
pm.time_signature_changes.sort(key=lambda x: x.time)
first_beat_time = pm.time_signature_changes[0].time
else:
first_beat_time = pm.estimate_beat_start()
elif algorithm == 'strict':
if not pm.time_signature_changes:
raise ValueError("No time signature change event found. Unable "
"to set beat start time using 'strict' "
"algorithm.")
pm.time_signature_changes.sort(key=lambda x: x.time)
first_beat_time = pm.time_signature_changes[0].time
# get tempo change event times and contents
tc_times, tempi = pm.get_tempo_changes()
arg_sorted = np.argsort(tc_times)
tc_times = tc_times[arg_sorted]
tempi = tempi[arg_sorted]
beat_times = pm.get_beats(first_beat_time)
if not len(beat_times):
raise ValueError("Cannot get beat timings to quantize pianoroll.")
beat_times.sort()
n_beats = len(beat_times)
n_time_steps = self.beat_resolution * n_beats
# Parse downbeat array
if not pm.time_signature_changes:
self.downbeat = None
else:
self.downbeat = np.zeros((n_time_steps,), bool)
self.downbeat[0] = True
start = 0
end = start
for idx, tsc in enumerate(pm.time_signature_changes[:-1]):
end += np.searchsorted(beat_times[end:],
pm.time_signature_changes[idx+1].time)
start_idx = start * self.beat_resolution
end_idx = end * self.beat_resolution
stride = tsc.numerator * self.beat_resolution
self.downbeat[start_idx:end_idx:stride] = True
start = end
# Build tempo array
one_more_beat = 2 * beat_times[-1] - beat_times[-2]
beat_times_one_more = np.append(beat_times, one_more_beat)
bpm = 60. / np.diff(beat_times_one_more)
self.tempo = np.tile(bpm, (1, 24)).reshape(-1,)
# Parse pianoroll
self.tracks = []
for instrument in pm.instruments:
if binarized:
pianoroll = np.zeros((n_time_steps, 128), bool)
elif mode == 'max':
pianoroll = np.zeros((n_time_steps, 128), np.uint8)
else:
pianoroll = np.zeros((n_time_steps, 128), int)
pitches = np.array([note.pitch for note in instrument.notes
if note.end > first_beat_time])
note_on_times = np.array([note.start for note in instrument.notes
if note.end > first_beat_time])
beat_indices = np.searchsorted(beat_times, note_on_times) - 1
remained = note_on_times - beat_times[beat_indices]
ratios = remained / (beat_times_one_more[beat_indices + 1]
- beat_times[beat_indices])
rounded = np.round((beat_indices + ratios) * self.beat_resolution)
note_ons = rounded.astype(int)
if collect_onsets_only:
pianoroll[note_ons, pitches] = True
elif instrument.is_drum:
if binarized:
pianoroll[note_ons, pitches] = True
else:
velocities = [note.velocity for note in instrument.notes
if note.end > first_beat_time]
pianoroll[note_ons, pitches] = velocities
else:
note_off_times = np.array([note.end for note in instrument.notes
if note.end > first_beat_time])
beat_indices = np.searchsorted(beat_times, note_off_times) - 1
remained = note_off_times - beat_times[beat_indices]
ratios = remained / (beat_times_one_more[beat_indices + 1]
- beat_times[beat_indices])
note_offs = ((beat_indices + ratios)
* self.beat_resolution).astype(int)
for idx, start in enumerate(note_ons):
end = note_offs[idx]
velocity = instrument.notes[idx].velocity
if velocity < 1:
continue
if binarized and velocity <= threshold:
continue
if start > 0 and start < n_time_steps:
if pianoroll[start - 1, pitches[idx]]:
pianoroll[start - 1, pitches[idx]] = 0
if end < n_time_steps - 1:
if pianoroll[end, pitches[idx]]:
end -= 1
if binarized:
if mode == 'sum':
pianoroll[start:end, pitches[idx]] += 1
elif mode == 'max':
pianoroll[start:end, pitches[idx]] = True
elif mode == 'sum':
pianoroll[start:end, pitches[idx]] += velocity
elif mode == 'max':
maximum = np.maximum(
pianoroll[start:end, pitches[idx]], velocity)
pianoroll[start:end, pitches[idx]] = maximum
if skip_empty_tracks and not np.any(pianoroll):
continue
track = Track(pianoroll, int(instrument.program),
instrument.is_drum, instrument.name)
self.tracks.append(track)
self.check_validity() | python | def parse_pretty_midi(self, pm, mode='max', algorithm='normal',
binarized=False, skip_empty_tracks=True,
collect_onsets_only=False, threshold=0,
first_beat_time=None):
"""
Parse a :class:`pretty_midi.PrettyMIDI` object. The data type of the
resulting pianorolls is automatically determined (int if 'mode' is
'sum', np.uint8 if `mode` is 'max' and `binarized` is False, bool if
`mode` is 'max' and `binarized` is True).
Parameters
----------
pm : `pretty_midi.PrettyMIDI` object
A :class:`pretty_midi.PrettyMIDI` object to be parsed.
mode : {'max', 'sum'}
A string that indicates the merging strategy to apply to duplicate
notes. Default to 'max'.
algorithm : {'normal', 'strict', 'custom'}
A string that indicates the method used to get the location of the
first beat. Notes before it will be dropped unless an incomplete
beat before it is found (see Notes for more information). Defaults
to 'normal'.
- The 'normal' algorithm estimates the location of the first beat by
:meth:`pretty_midi.PrettyMIDI.estimate_beat_start`.
- The 'strict' algorithm sets the first beat at the event time of
the first time signature change. Raise a ValueError if no time
signature change event is found.
- The 'custom' algorithm takes argument `first_beat_time` as the
location of the first beat.
binarized : bool
True to binarize the parsed pianorolls before merging duplicate
notes. False to use the original parsed pianorolls. Defaults to
False.
skip_empty_tracks : bool
True to remove tracks with empty pianorolls and compress the pitch
range of the parsed pianorolls. False to retain the empty tracks
and use the original parsed pianorolls. Deafault to True.
collect_onsets_only : bool
True to collect only the onset of the notes (i.e. note on events) in
all tracks, where the note off and duration information are dropped.
False to parse regular pianorolls.
threshold : int or float
A threshold used to binarize the parsed pianorolls. Only effective
when `binarized` is True. Defaults to zero.
first_beat_time : float
The location (in sec) of the first beat. Required and only effective
when using 'custom' algorithm.
Notes
-----
If an incomplete beat before the first beat is found, an additional beat
will be added before the (estimated) beat starting time. However, notes
before the (estimated) beat starting time for more than one beat are
dropped.
"""
if mode not in ('max', 'sum'):
raise ValueError("`mode` must be one of {'max', 'sum'}.")
if algorithm not in ('strict', 'normal', 'custom'):
raise ValueError("`algorithm` must be one of {'normal', 'strict', "
" 'custom'}.")
if algorithm == 'custom':
if not isinstance(first_beat_time, (int, float)):
raise TypeError("`first_beat_time` must be int or float when "
"using 'custom' algorithm.")
if first_beat_time < 0.0:
raise ValueError("`first_beat_time` must be a positive number "
"when using 'custom' algorithm.")
# Set first_beat_time for 'normal' and 'strict' modes
if algorithm == 'normal':
if pm.time_signature_changes:
pm.time_signature_changes.sort(key=lambda x: x.time)
first_beat_time = pm.time_signature_changes[0].time
else:
first_beat_time = pm.estimate_beat_start()
elif algorithm == 'strict':
if not pm.time_signature_changes:
raise ValueError("No time signature change event found. Unable "
"to set beat start time using 'strict' "
"algorithm.")
pm.time_signature_changes.sort(key=lambda x: x.time)
first_beat_time = pm.time_signature_changes[0].time
# get tempo change event times and contents
tc_times, tempi = pm.get_tempo_changes()
arg_sorted = np.argsort(tc_times)
tc_times = tc_times[arg_sorted]
tempi = tempi[arg_sorted]
beat_times = pm.get_beats(first_beat_time)
if not len(beat_times):
raise ValueError("Cannot get beat timings to quantize pianoroll.")
beat_times.sort()
n_beats = len(beat_times)
n_time_steps = self.beat_resolution * n_beats
# Parse downbeat array
if not pm.time_signature_changes:
self.downbeat = None
else:
self.downbeat = np.zeros((n_time_steps,), bool)
self.downbeat[0] = True
start = 0
end = start
for idx, tsc in enumerate(pm.time_signature_changes[:-1]):
end += np.searchsorted(beat_times[end:],
pm.time_signature_changes[idx+1].time)
start_idx = start * self.beat_resolution
end_idx = end * self.beat_resolution
stride = tsc.numerator * self.beat_resolution
self.downbeat[start_idx:end_idx:stride] = True
start = end
# Build tempo array
one_more_beat = 2 * beat_times[-1] - beat_times[-2]
beat_times_one_more = np.append(beat_times, one_more_beat)
bpm = 60. / np.diff(beat_times_one_more)
self.tempo = np.tile(bpm, (1, 24)).reshape(-1,)
# Parse pianoroll
self.tracks = []
for instrument in pm.instruments:
if binarized:
pianoroll = np.zeros((n_time_steps, 128), bool)
elif mode == 'max':
pianoroll = np.zeros((n_time_steps, 128), np.uint8)
else:
pianoroll = np.zeros((n_time_steps, 128), int)
pitches = np.array([note.pitch for note in instrument.notes
if note.end > first_beat_time])
note_on_times = np.array([note.start for note in instrument.notes
if note.end > first_beat_time])
beat_indices = np.searchsorted(beat_times, note_on_times) - 1
remained = note_on_times - beat_times[beat_indices]
ratios = remained / (beat_times_one_more[beat_indices + 1]
- beat_times[beat_indices])
rounded = np.round((beat_indices + ratios) * self.beat_resolution)
note_ons = rounded.astype(int)
if collect_onsets_only:
pianoroll[note_ons, pitches] = True
elif instrument.is_drum:
if binarized:
pianoroll[note_ons, pitches] = True
else:
velocities = [note.velocity for note in instrument.notes
if note.end > first_beat_time]
pianoroll[note_ons, pitches] = velocities
else:
note_off_times = np.array([note.end for note in instrument.notes
if note.end > first_beat_time])
beat_indices = np.searchsorted(beat_times, note_off_times) - 1
remained = note_off_times - beat_times[beat_indices]
ratios = remained / (beat_times_one_more[beat_indices + 1]
- beat_times[beat_indices])
note_offs = ((beat_indices + ratios)
* self.beat_resolution).astype(int)
for idx, start in enumerate(note_ons):
end = note_offs[idx]
velocity = instrument.notes[idx].velocity
if velocity < 1:
continue
if binarized and velocity <= threshold:
continue
if start > 0 and start < n_time_steps:
if pianoroll[start - 1, pitches[idx]]:
pianoroll[start - 1, pitches[idx]] = 0
if end < n_time_steps - 1:
if pianoroll[end, pitches[idx]]:
end -= 1
if binarized:
if mode == 'sum':
pianoroll[start:end, pitches[idx]] += 1
elif mode == 'max':
pianoroll[start:end, pitches[idx]] = True
elif mode == 'sum':
pianoroll[start:end, pitches[idx]] += velocity
elif mode == 'max':
maximum = np.maximum(
pianoroll[start:end, pitches[idx]], velocity)
pianoroll[start:end, pitches[idx]] = maximum
if skip_empty_tracks and not np.any(pianoroll):
continue
track = Track(pianoroll, int(instrument.program),
instrument.is_drum, instrument.name)
self.tracks.append(track)
self.check_validity() | Parse a :class:`pretty_midi.PrettyMIDI` object. The data type of the
resulting pianorolls is automatically determined (int if 'mode' is
'sum', np.uint8 if `mode` is 'max' and `binarized` is False, bool if
`mode` is 'max' and `binarized` is True).
Parameters
----------
pm : `pretty_midi.PrettyMIDI` object
A :class:`pretty_midi.PrettyMIDI` object to be parsed.
mode : {'max', 'sum'}
A string that indicates the merging strategy to apply to duplicate
notes. Default to 'max'.
algorithm : {'normal', 'strict', 'custom'}
A string that indicates the method used to get the location of the
first beat. Notes before it will be dropped unless an incomplete
beat before it is found (see Notes for more information). Defaults
to 'normal'.
- The 'normal' algorithm estimates the location of the first beat by
:meth:`pretty_midi.PrettyMIDI.estimate_beat_start`.
- The 'strict' algorithm sets the first beat at the event time of
the first time signature change. Raise a ValueError if no time
signature change event is found.
- The 'custom' algorithm takes argument `first_beat_time` as the
location of the first beat.
binarized : bool
True to binarize the parsed pianorolls before merging duplicate
notes. False to use the original parsed pianorolls. Defaults to
False.
skip_empty_tracks : bool
True to remove tracks with empty pianorolls and compress the pitch
range of the parsed pianorolls. False to retain the empty tracks
and use the original parsed pianorolls. Deafault to True.
collect_onsets_only : bool
True to collect only the onset of the notes (i.e. note on events) in
all tracks, where the note off and duration information are dropped.
False to parse regular pianorolls.
threshold : int or float
A threshold used to binarize the parsed pianorolls. Only effective
when `binarized` is True. Defaults to zero.
first_beat_time : float
The location (in sec) of the first beat. Required and only effective
when using 'custom' algorithm.
Notes
-----
If an incomplete beat before the first beat is found, an additional beat
will be added before the (estimated) beat starting time. However, notes
before the (estimated) beat starting time for more than one beat are
dropped. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L606-L804 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.remove_tracks | def remove_tracks(self, track_indices):
"""
Remove tracks specified by `track_indices`.
Parameters
----------
track_indices : list
The indices of the tracks to be removed.
"""
if isinstance(track_indices, int):
track_indices = [track_indices]
self.tracks = [track for idx, track in enumerate(self.tracks)
if idx not in track_indices] | python | def remove_tracks(self, track_indices):
"""
Remove tracks specified by `track_indices`.
Parameters
----------
track_indices : list
The indices of the tracks to be removed.
"""
if isinstance(track_indices, int):
track_indices = [track_indices]
self.tracks = [track for idx, track in enumerate(self.tracks)
if idx not in track_indices] | Remove tracks specified by `track_indices`.
Parameters
----------
track_indices : list
The indices of the tracks to be removed. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L815-L828 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.save | def save(self, filename, compressed=True):
"""
Save the multitrack pianoroll to a (compressed) npz file, which can be
later loaded by :meth:`pypianoroll.Multitrack.load`.
Notes
-----
To reduce the file size, the pianorolls are first converted to instances
of scipy.sparse.csc_matrix, whose component arrays are then collected
and saved to a npz file.
Parameters
----------
filename : str
The name of the npz file to which the mulitrack pianoroll is saved.
compressed : bool
True to save to a compressed npz file. False to save to an
uncompressed npz file. Defaults to True.
"""
def update_sparse(target_dict, sparse_matrix, name):
"""Turn `sparse_matrix` into a scipy.sparse.csc_matrix and update
its component arrays to the `target_dict` with key as `name`
suffixed with its component type string."""
csc = csc_matrix(sparse_matrix)
target_dict[name+'_csc_data'] = csc.data
target_dict[name+'_csc_indices'] = csc.indices
target_dict[name+'_csc_indptr'] = csc.indptr
target_dict[name+'_csc_shape'] = csc.shape
self.check_validity()
array_dict = {'tempo': self.tempo}
info_dict = {'beat_resolution': self.beat_resolution,
'name': self.name}
if self.downbeat is not None:
array_dict['downbeat'] = self.downbeat
for idx, track in enumerate(self.tracks):
update_sparse(array_dict, track.pianoroll,
'pianoroll_{}'.format(idx))
info_dict[str(idx)] = {'program': track.program,
'is_drum': track.is_drum,
'name': track.name}
if not filename.endswith('.npz'):
filename += '.npz'
if compressed:
np.savez_compressed(filename, **array_dict)
else:
np.savez(filename, **array_dict)
compression = zipfile.ZIP_DEFLATED if compressed else zipfile.ZIP_STORED
with zipfile.ZipFile(filename, 'a') as zip_file:
zip_file.writestr('info.json', json.dumps(info_dict), compression) | python | def save(self, filename, compressed=True):
"""
Save the multitrack pianoroll to a (compressed) npz file, which can be
later loaded by :meth:`pypianoroll.Multitrack.load`.
Notes
-----
To reduce the file size, the pianorolls are first converted to instances
of scipy.sparse.csc_matrix, whose component arrays are then collected
and saved to a npz file.
Parameters
----------
filename : str
The name of the npz file to which the mulitrack pianoroll is saved.
compressed : bool
True to save to a compressed npz file. False to save to an
uncompressed npz file. Defaults to True.
"""
def update_sparse(target_dict, sparse_matrix, name):
"""Turn `sparse_matrix` into a scipy.sparse.csc_matrix and update
its component arrays to the `target_dict` with key as `name`
suffixed with its component type string."""
csc = csc_matrix(sparse_matrix)
target_dict[name+'_csc_data'] = csc.data
target_dict[name+'_csc_indices'] = csc.indices
target_dict[name+'_csc_indptr'] = csc.indptr
target_dict[name+'_csc_shape'] = csc.shape
self.check_validity()
array_dict = {'tempo': self.tempo}
info_dict = {'beat_resolution': self.beat_resolution,
'name': self.name}
if self.downbeat is not None:
array_dict['downbeat'] = self.downbeat
for idx, track in enumerate(self.tracks):
update_sparse(array_dict, track.pianoroll,
'pianoroll_{}'.format(idx))
info_dict[str(idx)] = {'program': track.program,
'is_drum': track.is_drum,
'name': track.name}
if not filename.endswith('.npz'):
filename += '.npz'
if compressed:
np.savez_compressed(filename, **array_dict)
else:
np.savez(filename, **array_dict)
compression = zipfile.ZIP_DEFLATED if compressed else zipfile.ZIP_STORED
with zipfile.ZipFile(filename, 'a') as zip_file:
zip_file.writestr('info.json', json.dumps(info_dict), compression) | Save the multitrack pianoroll to a (compressed) npz file, which can be
later loaded by :meth:`pypianoroll.Multitrack.load`.
Notes
-----
To reduce the file size, the pianorolls are first converted to instances
of scipy.sparse.csc_matrix, whose component arrays are then collected
and saved to a npz file.
Parameters
----------
filename : str
The name of the npz file to which the mulitrack pianoroll is saved.
compressed : bool
True to save to a compressed npz file. False to save to an
uncompressed npz file. Defaults to True. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L830-L884 |
salu133445/pypianoroll | pypianoroll/multitrack.py | Multitrack.to_pretty_midi | def to_pretty_midi(self, constant_tempo=None, constant_velocity=100):
"""
Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance.
"""
self.check_validity()
pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0])
# TODO: Add downbeat support -> time signature change events
# TODO: Add tempo support -> tempo change events
if constant_tempo is None:
constant_tempo = self.tempo[0]
time_step_size = 60. / constant_tempo / self.beat_resolution
for track in self.tracks:
instrument = pretty_midi.Instrument(
program=track.program, is_drum=track.is_drum, name=track.name)
copied = track.copy()
if copied.is_binarized():
copied.assign_constant(constant_velocity)
copied.clip()
clipped = copied.pianoroll.astype(np.uint8)
binarized = (clipped > 0)
padded = np.pad(binarized, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_on_times = time_step_size * note_ons
note_offs = np.nonzero((diff < 0).T)[1]
note_off_times = time_step_size * note_offs
for idx, pitch in enumerate(pitches):
velocity = np.mean(clipped[note_ons[idx]:note_offs[idx], pitch])
note = pretty_midi.Note(
velocity=int(velocity), pitch=pitch,
start=note_on_times[idx], end=note_off_times[idx])
instrument.notes.append(note)
instrument.notes.sort(key=lambda x: x.start)
pm.instruments.append(instrument)
return pm | python | def to_pretty_midi(self, constant_tempo=None, constant_velocity=100):
"""
Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance.
"""
self.check_validity()
pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0])
# TODO: Add downbeat support -> time signature change events
# TODO: Add tempo support -> tempo change events
if constant_tempo is None:
constant_tempo = self.tempo[0]
time_step_size = 60. / constant_tempo / self.beat_resolution
for track in self.tracks:
instrument = pretty_midi.Instrument(
program=track.program, is_drum=track.is_drum, name=track.name)
copied = track.copy()
if copied.is_binarized():
copied.assign_constant(constant_velocity)
copied.clip()
clipped = copied.pianoroll.astype(np.uint8)
binarized = (clipped > 0)
padded = np.pad(binarized, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_on_times = time_step_size * note_ons
note_offs = np.nonzero((diff < 0).T)[1]
note_off_times = time_step_size * note_offs
for idx, pitch in enumerate(pitches):
velocity = np.mean(clipped[note_ons[idx]:note_offs[idx], pitch])
note = pretty_midi.Note(
velocity=int(velocity), pitch=pitch,
start=note_on_times[idx], end=note_off_times[idx])
instrument.notes.append(note)
instrument.notes.sort(key=lambda x: x.start)
pm.instruments.append(instrument)
return pm | Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance. | https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/multitrack.py#L886-L952 |