code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib.gridspec import GridSpec
from .. import profile
from ..errors import ModelUseError
__all__ = [
'line_plot',
'raster_plot',
'animate_2D',
'animate_1D',
]
def line_plot(ts,
val_matrix,
plot_ids=None,
ax=None,
xlim=None,
ylim=None,
xlabel='Time (ms)',
ylabel='value',
legend=None,
title=None,
show=False):
"""Show the specified value in the given object (Neurons or Synapses.)
Parameters
----------
ts : np.ndarray
The time steps.
val_matrix : np.ndarray
The value matrix which record the history trajectory.
It can be easily accessed by specifying the ``monitors``
of NeuGroup/SynConn by:
``neu/syn = NeuGroup/SynConn(..., monitors=[k1, k2])``
plot_ids : None, int, tuple, a_list
The index of the value to plot.
ax : None, Axes
The figure to plot.
xlim : list, tuple
The xlim.
ylim : list, tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
legend : str
The prefix of legend for plot.
show : bool
Whether show the figure.
"""
# get plot_ids
if plot_ids is None:
plot_ids = [0]
elif isinstance(plot_ids, int):
plot_ids = [plot_ids]
try:
assert isinstance(plot_ids, (list, tuple))
except AssertionError:
raise ModelUseError('"plot_ids" specifies the value index to plot, '
'it must be a list/tuple.')
# get ax
if ax is None:
ax = plt
# plot
val_matrix = val_matrix.reshape((val_matrix.shape[0], -1))
if legend:
for idx in plot_ids:
ax.plot(ts, val_matrix[:, idx], label=f'{legend}-{idx}')
else:
for idx in plot_ids:
ax.plot(ts, val_matrix[:, idx])
# legend
if legend:
ax.legend()
# xlim
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
# ylim
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
# xlable
if xlabel:
plt.xlabel(xlabel)
# ylabel
if ylabel:
plt.ylabel(ylabel)
# title
if title:
plt.title(title)
# show
if show:
plt.show()
def raster_plot(ts,
sp_matrix,
ax=None,
marker='.',
markersize=2,
color='k',
xlabel='Time (ms)',
ylabel='Neuron index',
xlim=None,
ylim=None,
title=None,
show=False):
"""Show the rater plot of the spikes.
Parameters
----------
ts : np.ndarray
The run times.
sp_matrix : np.ndarray
The spike matrix which records the spike information.
It can be easily accessed by specifying the ``monitors``
of NeuGroup by: ``neu = NeuGroup(..., monitors=['spike'])``
ax : Axes
The figure.
markersize : int
The size of the marker.
color : str
The color of the marker.
xlim : list, tuple
The xlim.
ylim : list, tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
show : bool
Show the figure.
"""
# get index and time
elements = np.where(sp_matrix > 0.)
index = elements[1]
time = ts[elements[0]]
# plot rater
if ax is None:
ax = plt
ax.plot(time, index, marker + color, markersize=markersize)
# xlable
if xlabel:
plt.xlabel(xlabel)
# ylabel
if ylabel:
plt.ylabel(ylabel)
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
if title:
plt.title(title)
if show:
plt.show()
def animate_2D(values,
net_size,
dt=None,
val_min=None,
val_max=None,
cmap=None,
frame_delay=1.,
frame_step=1,
title_size=10,
figsize=None,
gif_dpi=None,
video_fps=None,
save_path=None,
show=True):
"""Animate the potentials of the neuron group.
Parameters
----------
values : np.ndarray
The membrane potentials of the neuron group.
net_size : tuple
The size of the neuron group.
dt : float
The time duration of each step.
val_min : float, int
The minimum of the potential.
val_max : float, int
The maximum of the potential.
cmap : str
The colormap.
frame_delay : int, float
The delay to show each frame.
frame_step : int
The step to show the potential. If `frame_step=3`, then each
frame shows one of the every three steps.
title_size : int
The size of the title.
figsize : None, tuple
The size of the figure.
gif_dpi : int
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie. If
``None``, use defaults in matplotlib.
video_fps : int
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
save_path : None, str
The save path of the animation.
show : bool
Whether show the animation.
Returns
-------
figure : plt.figure
The created figure instance.
"""
dt = profile.get_dt() if dt is None else dt
num_step, num_neuron = values.shape
height, width = net_size
val_min = values.min() if val_min is None else val_min
val_max = values.max() if val_max is None else val_max
figsize = figsize or (6, 6)
fig = plt.figure(figsize=(figsize[0], figsize[1]), constrained_layout=True)
gs = GridSpec(1, 1, figure=fig)
fig.add_subplot(gs[0, 0])
def frame(t):
img = values[t]
fig.clf()
plt.pcolor(img, cmap=cmap, vmin=val_min, vmax=val_max)
plt.colorbar()
plt.axis('off')
fig.suptitle("Time: {:.2f} ms".format((t + 1) * dt),
fontsize=title_size, fontweight='bold')
return [fig.gca()]
values = values.reshape((num_step, height, width))
anim_result = animation.FuncAnimation(
fig, frame, frames=list(range(1, num_step, frame_step)),
init_func=None, interval=frame_delay, repeat_delay=3000)
if save_path is None:
if show:
plt.show()
else:
if save_path[-3:] == 'gif':
anim_result.save(save_path, dpi=gif_dpi, writer='imagemagick')
elif save_path[-3:] == 'mp4':
anim_result.save(save_path, writer='ffmpeg', fps=video_fps, bitrate=3000)
else:
anim_result.save(save_path + '.mp4', writer='ffmpeg', fps=video_fps, bitrate=3000)
return fig
def animate_1D(dynamical_vars,
static_vars=(),
dt=None,
xlim=None,
ylim=None,
xlabel=None,
ylabel=None,
frame_delay=50.,
frame_step=1,
title_size=10,
figsize=None,
gif_dpi=None,
video_fps=None,
save_path=None,
show=True):
"""Animation of one-dimensional data.
Parameters
----------
dynamical_vars : dict, np.ndarray, list of np.ndarray, list of dict
The dynamical variables which will be animated.
static_vars : dict, np.ndarray, list of np.ndarray, list of dict
The static variables.
xticks : list, np.ndarray
The xticks.
dt : float
The numerical integration step.
xlim : tuple
The xlim.
ylim : tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
frame_delay : int, float
The delay to show each frame.
frame_step : int
The step to show the potential. If `frame_step=3`, then each
frame shows one of the every three steps.
title_size : int
The size of the title.
figsize : None, tuple
The size of the figure.
gif_dpi : int
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie. If
``None``, use defaults in matplotlib.
video_fps : int
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
save_path : None, str
The save path of the animation.
show : bool
Whether show the animation.
Returns
-------
figure : plt.figure
The created figure instance.
"""
# check dt
dt = profile.get_dt() if dt is None else dt
# check figure
fig = plt.figure(figsize=(figsize or (6, 6)), constrained_layout=True)
gs = GridSpec(1, 1, figure=fig)
fig.add_subplot(gs[0, 0])
# check dynamical variables
final_dynamic_vars = []
lengths = []
has_legend = False
if isinstance(dynamical_vars, (tuple, list)):
for var in dynamical_vars:
if isinstance(var, dict):
assert 'ys' in var, 'Must provide "ys" item.'
if 'legend' not in var:
var['legend'] = None
else:
has_legend = True
if 'xs' not in var:
var['xs'] = np.arange(var['ys'].shape[1])
elif isinstance(var, np.ndarray):
var = {'ys': var,
'xs': np.arange(var.shape[1]),
'legend': None}
else:
raise ValueError(f'Unknown data type: {type(var)}')
assert np.ndim(var['ys']) == 2, "Dynamic variable must be 2D data."
lengths.append(var['ys'].shape[0])
final_dynamic_vars.append(var)
elif isinstance(dynamical_vars, np.ndarray):
assert np.ndim(dynamical_vars) == 2, "Dynamic variable must be 2D data."
lengths.append(dynamical_vars.shape[0])
final_dynamic_vars.append({'ys': dynamical_vars,
'xs': np.arange(dynamical_vars.shape[1]),
'legend': None})
elif isinstance(dynamical_vars, dict):
assert 'ys' in dynamical_vars, 'Must provide "ys" item.'
if 'legend' not in dynamical_vars:
dynamical_vars['legend'] = None
else:
has_legend = True
if 'xs' not in dynamical_vars:
dynamical_vars['xs'] = np.arange(dynamical_vars['ys'].shape[1])
lengths.append(dynamical_vars['ys'].shape[0])
final_dynamic_vars.append(dynamical_vars)
else:
raise ValueError(f'Unknown dynamical data type: {type(dynamical_vars)}')
lengths = np.array(lengths)
assert np.all(lengths == lengths[0]), 'Dynamic variables must have equal length.'
# check static variables
final_static_vars = []
if isinstance(static_vars, (tuple, list)):
for var in static_vars:
if isinstance(var, dict):
assert 'data' in var, 'Must provide "ys" item.'
if 'legend' not in var:
var['legend'] = None
else:
has_legend = True
elif isinstance(var, np.ndarray):
var = {'data': var, 'legend': None}
else:
raise ValueError(f'Unknown data type: {type(var)}')
assert np.ndim(var['data']) == 1, "Static variable must be 1D data."
final_static_vars.append(var)
elif isinstance(static_vars, np.ndarray):
final_static_vars.append({'data': static_vars,
'xs': np.arange(static_vars.shape[0]),
'legend': None})
elif isinstance(static_vars, dict):
assert 'ys' in static_vars, 'Must provide "ys" item.'
if 'legend' not in static_vars:
static_vars['legend'] = None
else:
has_legend = True
if 'xs' not in static_vars:
static_vars['xs'] = np.arange(static_vars['ys'].shape[0])
final_static_vars.append(static_vars)
else:
raise ValueError(f'Unknown static data type: {type(static_vars)}')
# ylim
if ylim is None:
ylim_min = np.inf
ylim_max = -np.inf
for var in final_dynamic_vars + final_static_vars:
if var['ys'].max() > ylim_max:
ylim_max = var['ys'].max()
if var['ys'].min() < ylim_min:
ylim_min = var['ys'].min()
if ylim_min > 0:
ylim_min = ylim_min * 0.98
else:
ylim_min = ylim_min * 1.02
if ylim_max > 0:
ylim_max = ylim_max * 1.02
else:
ylim_max = ylim_max * 0.98
ylim = (ylim_min, ylim_max)
def frame(t):
fig.clf()
for dvar in final_dynamic_vars:
plt.plot(dvar['xs'], dvar['ys'][t], label=dvar['legend'])
for svar in final_static_vars:
plt.plot(svar['xs'], svar['ys'], label=svar['legend'])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if has_legend:
plt.legend()
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
plt.ylim(ylim[0], ylim[1])
fig.suptitle(t="Time: {:.2f} ms".format((t + 1) * dt),
fontsize=title_size,
fontweight='bold')
return [fig.gca()]
anim_result = animation.FuncAnimation(fig=fig,
func=frame,
frames=range(1, lengths[0], frame_step),
init_func=None,
interval=frame_delay,
repeat_delay=3000)
# save or show
if save_path is None:
if show: plt.show()
else:
if save_path[-3:] == 'gif':
anim_result.save(save_path, dpi=gif_dpi, writer='imagemagick')
elif save_path[-3:] == 'mp4':
anim_result.save(save_path, writer='ffmpeg', fps=video_fps, bitrate=3000)
else:
anim_result.save(save_path + '.mp4', writer='ffmpeg', fps=video_fps, bitrate=3000)
return fig | scikit-brain | /scikit-brain-0.3.3.tar.gz/scikit-brain-0.3.3/brainpy/visualization/plots.py | plots.py |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib.gridspec import GridSpec
from .. import profile
from ..errors import ModelUseError
__all__ = [
'line_plot',
'raster_plot',
'animate_2D',
'animate_1D',
]
def line_plot(ts,
val_matrix,
plot_ids=None,
ax=None,
xlim=None,
ylim=None,
xlabel='Time (ms)',
ylabel='value',
legend=None,
title=None,
show=False):
"""Show the specified value in the given object (Neurons or Synapses.)
Parameters
----------
ts : np.ndarray
The time steps.
val_matrix : np.ndarray
The value matrix which record the history trajectory.
It can be easily accessed by specifying the ``monitors``
of NeuGroup/SynConn by:
``neu/syn = NeuGroup/SynConn(..., monitors=[k1, k2])``
plot_ids : None, int, tuple, a_list
The index of the value to plot.
ax : None, Axes
The figure to plot.
xlim : list, tuple
The xlim.
ylim : list, tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
legend : str
The prefix of legend for plot.
show : bool
Whether show the figure.
"""
# get plot_ids
if plot_ids is None:
plot_ids = [0]
elif isinstance(plot_ids, int):
plot_ids = [plot_ids]
try:
assert isinstance(plot_ids, (list, tuple))
except AssertionError:
raise ModelUseError('"plot_ids" specifies the value index to plot, '
'it must be a list/tuple.')
# get ax
if ax is None:
ax = plt
# plot
val_matrix = val_matrix.reshape((val_matrix.shape[0], -1))
if legend:
for idx in plot_ids:
ax.plot(ts, val_matrix[:, idx], label=f'{legend}-{idx}')
else:
for idx in plot_ids:
ax.plot(ts, val_matrix[:, idx])
# legend
if legend:
ax.legend()
# xlim
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
# ylim
if ylim is not None:
plt.ylim(ylim[0], ylim[1])
# xlable
if xlabel:
plt.xlabel(xlabel)
# ylabel
if ylabel:
plt.ylabel(ylabel)
# title
if title:
plt.title(title)
# show
if show:
plt.show()
def raster_plot(ts,
sp_matrix,
ax=None,
marker='.',
markersize=2,
color='k',
xlabel='Time (ms)',
ylabel='Neuron index',
xlim=None,
ylim=None,
title=None,
show=False):
"""Show the rater plot of the spikes.
Parameters
----------
ts : np.ndarray
The run times.
sp_matrix : np.ndarray
The spike matrix which records the spike information.
It can be easily accessed by specifying the ``monitors``
of NeuGroup by: ``neu = NeuGroup(..., monitors=['spike'])``
ax : Axes
The figure.
markersize : int
The size of the marker.
color : str
The color of the marker.
xlim : list, tuple
The xlim.
ylim : list, tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
show : bool
Show the figure.
"""
# get index and time
elements = np.where(sp_matrix > 0.)
index = elements[1]
time = ts[elements[0]]
# plot rater
if ax is None:
ax = plt
ax.plot(time, index, marker + color, markersize=markersize)
# xlable
if xlabel:
plt.xlabel(xlabel)
# ylabel
if ylabel:
plt.ylabel(ylabel)
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
if title:
plt.title(title)
if show:
plt.show()
def animate_2D(values,
net_size,
dt=None,
val_min=None,
val_max=None,
cmap=None,
frame_delay=1.,
frame_step=1,
title_size=10,
figsize=None,
gif_dpi=None,
video_fps=None,
save_path=None,
show=True):
"""Animate the potentials of the neuron group.
Parameters
----------
values : np.ndarray
The membrane potentials of the neuron group.
net_size : tuple
The size of the neuron group.
dt : float
The time duration of each step.
val_min : float, int
The minimum of the potential.
val_max : float, int
The maximum of the potential.
cmap : str
The colormap.
frame_delay : int, float
The delay to show each frame.
frame_step : int
The step to show the potential. If `frame_step=3`, then each
frame shows one of the every three steps.
title_size : int
The size of the title.
figsize : None, tuple
The size of the figure.
gif_dpi : int
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie. If
``None``, use defaults in matplotlib.
video_fps : int
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
save_path : None, str
The save path of the animation.
show : bool
Whether show the animation.
Returns
-------
figure : plt.figure
The created figure instance.
"""
dt = profile.get_dt() if dt is None else dt
num_step, num_neuron = values.shape
height, width = net_size
val_min = values.min() if val_min is None else val_min
val_max = values.max() if val_max is None else val_max
figsize = figsize or (6, 6)
fig = plt.figure(figsize=(figsize[0], figsize[1]), constrained_layout=True)
gs = GridSpec(1, 1, figure=fig)
fig.add_subplot(gs[0, 0])
def frame(t):
img = values[t]
fig.clf()
plt.pcolor(img, cmap=cmap, vmin=val_min, vmax=val_max)
plt.colorbar()
plt.axis('off')
fig.suptitle("Time: {:.2f} ms".format((t + 1) * dt),
fontsize=title_size, fontweight='bold')
return [fig.gca()]
values = values.reshape((num_step, height, width))
anim_result = animation.FuncAnimation(
fig, frame, frames=list(range(1, num_step, frame_step)),
init_func=None, interval=frame_delay, repeat_delay=3000)
if save_path is None:
if show:
plt.show()
else:
if save_path[-3:] == 'gif':
anim_result.save(save_path, dpi=gif_dpi, writer='imagemagick')
elif save_path[-3:] == 'mp4':
anim_result.save(save_path, writer='ffmpeg', fps=video_fps, bitrate=3000)
else:
anim_result.save(save_path + '.mp4', writer='ffmpeg', fps=video_fps, bitrate=3000)
return fig
def animate_1D(dynamical_vars,
static_vars=(),
dt=None,
xlim=None,
ylim=None,
xlabel=None,
ylabel=None,
frame_delay=50.,
frame_step=1,
title_size=10,
figsize=None,
gif_dpi=None,
video_fps=None,
save_path=None,
show=True):
"""Animation of one-dimensional data.
Parameters
----------
dynamical_vars : dict, np.ndarray, list of np.ndarray, list of dict
The dynamical variables which will be animated.
static_vars : dict, np.ndarray, list of np.ndarray, list of dict
The static variables.
xticks : list, np.ndarray
The xticks.
dt : float
The numerical integration step.
xlim : tuple
The xlim.
ylim : tuple
The ylim.
xlabel : str
The xlabel.
ylabel : str
The ylabel.
frame_delay : int, float
The delay to show each frame.
frame_step : int
The step to show the potential. If `frame_step=3`, then each
frame shows one of the every three steps.
title_size : int
The size of the title.
figsize : None, tuple
The size of the figure.
gif_dpi : int
Controls the dots per inch for the movie frames. This combined with
the figure's size in inches controls the size of the movie. If
``None``, use defaults in matplotlib.
video_fps : int
Frames per second in the movie. Defaults to ``None``, which will use
the animation's specified interval to set the frames per second.
save_path : None, str
The save path of the animation.
show : bool
Whether show the animation.
Returns
-------
figure : plt.figure
The created figure instance.
"""
# check dt
dt = profile.get_dt() if dt is None else dt
# check figure
fig = plt.figure(figsize=(figsize or (6, 6)), constrained_layout=True)
gs = GridSpec(1, 1, figure=fig)
fig.add_subplot(gs[0, 0])
# check dynamical variables
final_dynamic_vars = []
lengths = []
has_legend = False
if isinstance(dynamical_vars, (tuple, list)):
for var in dynamical_vars:
if isinstance(var, dict):
assert 'ys' in var, 'Must provide "ys" item.'
if 'legend' not in var:
var['legend'] = None
else:
has_legend = True
if 'xs' not in var:
var['xs'] = np.arange(var['ys'].shape[1])
elif isinstance(var, np.ndarray):
var = {'ys': var,
'xs': np.arange(var.shape[1]),
'legend': None}
else:
raise ValueError(f'Unknown data type: {type(var)}')
assert np.ndim(var['ys']) == 2, "Dynamic variable must be 2D data."
lengths.append(var['ys'].shape[0])
final_dynamic_vars.append(var)
elif isinstance(dynamical_vars, np.ndarray):
assert np.ndim(dynamical_vars) == 2, "Dynamic variable must be 2D data."
lengths.append(dynamical_vars.shape[0])
final_dynamic_vars.append({'ys': dynamical_vars,
'xs': np.arange(dynamical_vars.shape[1]),
'legend': None})
elif isinstance(dynamical_vars, dict):
assert 'ys' in dynamical_vars, 'Must provide "ys" item.'
if 'legend' not in dynamical_vars:
dynamical_vars['legend'] = None
else:
has_legend = True
if 'xs' not in dynamical_vars:
dynamical_vars['xs'] = np.arange(dynamical_vars['ys'].shape[1])
lengths.append(dynamical_vars['ys'].shape[0])
final_dynamic_vars.append(dynamical_vars)
else:
raise ValueError(f'Unknown dynamical data type: {type(dynamical_vars)}')
lengths = np.array(lengths)
assert np.all(lengths == lengths[0]), 'Dynamic variables must have equal length.'
# check static variables
final_static_vars = []
if isinstance(static_vars, (tuple, list)):
for var in static_vars:
if isinstance(var, dict):
assert 'data' in var, 'Must provide "ys" item.'
if 'legend' not in var:
var['legend'] = None
else:
has_legend = True
elif isinstance(var, np.ndarray):
var = {'data': var, 'legend': None}
else:
raise ValueError(f'Unknown data type: {type(var)}')
assert np.ndim(var['data']) == 1, "Static variable must be 1D data."
final_static_vars.append(var)
elif isinstance(static_vars, np.ndarray):
final_static_vars.append({'data': static_vars,
'xs': np.arange(static_vars.shape[0]),
'legend': None})
elif isinstance(static_vars, dict):
assert 'ys' in static_vars, 'Must provide "ys" item.'
if 'legend' not in static_vars:
static_vars['legend'] = None
else:
has_legend = True
if 'xs' not in static_vars:
static_vars['xs'] = np.arange(static_vars['ys'].shape[0])
final_static_vars.append(static_vars)
else:
raise ValueError(f'Unknown static data type: {type(static_vars)}')
# ylim
if ylim is None:
ylim_min = np.inf
ylim_max = -np.inf
for var in final_dynamic_vars + final_static_vars:
if var['ys'].max() > ylim_max:
ylim_max = var['ys'].max()
if var['ys'].min() < ylim_min:
ylim_min = var['ys'].min()
if ylim_min > 0:
ylim_min = ylim_min * 0.98
else:
ylim_min = ylim_min * 1.02
if ylim_max > 0:
ylim_max = ylim_max * 1.02
else:
ylim_max = ylim_max * 0.98
ylim = (ylim_min, ylim_max)
def frame(t):
fig.clf()
for dvar in final_dynamic_vars:
plt.plot(dvar['xs'], dvar['ys'][t], label=dvar['legend'])
for svar in final_static_vars:
plt.plot(svar['xs'], svar['ys'], label=svar['legend'])
if xlim is not None:
plt.xlim(xlim[0], xlim[1])
if has_legend:
plt.legend()
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
plt.ylim(ylim[0], ylim[1])
fig.suptitle(t="Time: {:.2f} ms".format((t + 1) * dt),
fontsize=title_size,
fontweight='bold')
return [fig.gca()]
anim_result = animation.FuncAnimation(fig=fig,
func=frame,
frames=range(1, lengths[0], frame_step),
init_func=None,
interval=frame_delay,
repeat_delay=3000)
# save or show
if save_path is None:
if show: plt.show()
else:
if save_path[-3:] == 'gif':
anim_result.save(save_path, dpi=gif_dpi, writer='imagemagick')
elif save_path[-3:] == 'mp4':
anim_result.save(save_path, writer='ffmpeg', fps=video_fps, bitrate=3000)
else:
anim_result.save(save_path + '.mp4', writer='ffmpeg', fps=video_fps, bitrate=3000)
return fig | 0.887302 | 0.682984 |
from __future__ import annotations
import contextlib
import logging
import os
import re
import sys
from typing import Any
__all__ = ["logger", "raw_logger", "ScikitBuildLogger", "rich_print"]
def __dir__() -> list[str]:
return __all__
raw_logger = logging.getLogger(
"scikit_build_core"
) # TODO: maybe should be scikit-build?
raw_logger.setLevel(logging.DEBUG) # TODO: configure
class FStringMessage:
"This class captures a formatted string message and only produces it on demand."
def __init__(self, fmt: str, *args: object, **kwargs: object) -> None:
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self) -> str:
return self.fmt.format(*self.args, **self.kwargs)
def __repr__(self) -> str:
return (
f"<FStringMessage {self.fmt!r} args={self.args!r} kwargs={self.kwargs!r}>"
)
if sys.version_info < (3, 8):
opts: Any = {}
else:
opts = {"stacklevel": 2}
class ScikitBuildLogger:
# pylint: disable-next=redefined-outer-name
def __init__(self, logger: logging.Logger) -> None:
self.logger = logger
def debug(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.debug(FStringMessage(msg, *args, **kwargs), **opts)
def info(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.info(FStringMessage(msg, *args, **kwargs), **opts)
def warning(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.warning(FStringMessage(msg, *args, **kwargs), **opts)
def error(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.error(FStringMessage(msg, *args, **kwargs), **opts)
def critical(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.critical(FStringMessage(msg, *args, **kwargs), **opts)
def exception(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.exception(FStringMessage(msg, *args, **kwargs), **opts)
def log(self, level: int, msg: str, *args: object, **kwargs: object) -> None:
self.logger.log(level, FStringMessage(msg, *args, **kwargs), **opts)
def setLevel(self, level: int) -> None:
self.logger.setLevel(level)
def addHandler(self, handler: logging.Handler) -> None:
self.logger.addHandler(handler)
logger = ScikitBuildLogger(raw_logger)
ANY_ESCAPE = re.compile(r"\[([\w\s/]*)\]")
_COLORS = {
"red": "\33[91m",
"green": "\33[92m",
"yellow": "\33[93m",
"blue": "\33[94m",
"magenta": "\33[95m",
"cyan": "\33[96m",
"bold": "\33[1m",
"/red": "\33[0m",
"/green": "\33[0m",
"/blue": "\33[0m",
"/yellow": "\33[0m",
"/magenta": "\33[0m",
"/cyan": "\33[0m",
"/bold": "\33[22m",
"reset": "\33[0m",
}
_NO_COLORS = {color: "" for color in _COLORS}
def colors() -> dict[str, str]:
if "NO_COLOR" in os.environ:
return _NO_COLORS
# Pip reroutes sys.stdout, so FORCE_COLOR is required there
if os.environ.get("FORCE_COLOR", ""):
return _COLORS
# Avoid ValueError: I/O operation on closed file
with contextlib.suppress(ValueError):
# Assume sys.stderr is similar to sys.stdout
isatty = sys.stdout.isatty()
if isatty and not sys.platform.startswith("win"):
return _COLORS
return _NO_COLORS
def _process_rich(msg: object) -> str:
return ANY_ESCAPE.sub(
lambda m: "".join(colors()[x] for x in m.group(1).split()),
str(msg),
)
def rich_print(*args: object, **kwargs: object) -> None:
args_2 = tuple(_process_rich(arg) for arg in args)
if args != args_2:
args_2 = (*args_2[:-1], args_2[-1] + colors()["reset"])
print(*args_2, **kwargs, flush=True) # type: ignore[call-overload] # noqa: T201 | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/_logging.py | _logging.py | from __future__ import annotations
import contextlib
import logging
import os
import re
import sys
from typing import Any
__all__ = ["logger", "raw_logger", "ScikitBuildLogger", "rich_print"]
def __dir__() -> list[str]:
return __all__
raw_logger = logging.getLogger(
"scikit_build_core"
) # TODO: maybe should be scikit-build?
raw_logger.setLevel(logging.DEBUG) # TODO: configure
class FStringMessage:
"This class captures a formatted string message and only produces it on demand."
def __init__(self, fmt: str, *args: object, **kwargs: object) -> None:
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self) -> str:
return self.fmt.format(*self.args, **self.kwargs)
def __repr__(self) -> str:
return (
f"<FStringMessage {self.fmt!r} args={self.args!r} kwargs={self.kwargs!r}>"
)
if sys.version_info < (3, 8):
opts: Any = {}
else:
opts = {"stacklevel": 2}
class ScikitBuildLogger:
# pylint: disable-next=redefined-outer-name
def __init__(self, logger: logging.Logger) -> None:
self.logger = logger
def debug(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.debug(FStringMessage(msg, *args, **kwargs), **opts)
def info(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.info(FStringMessage(msg, *args, **kwargs), **opts)
def warning(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.warning(FStringMessage(msg, *args, **kwargs), **opts)
def error(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.error(FStringMessage(msg, *args, **kwargs), **opts)
def critical(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.critical(FStringMessage(msg, *args, **kwargs), **opts)
def exception(self, msg: str, *args: object, **kwargs: object) -> None:
self.logger.exception(FStringMessage(msg, *args, **kwargs), **opts)
def log(self, level: int, msg: str, *args: object, **kwargs: object) -> None:
self.logger.log(level, FStringMessage(msg, *args, **kwargs), **opts)
def setLevel(self, level: int) -> None:
self.logger.setLevel(level)
def addHandler(self, handler: logging.Handler) -> None:
self.logger.addHandler(handler)
logger = ScikitBuildLogger(raw_logger)
ANY_ESCAPE = re.compile(r"\[([\w\s/]*)\]")
_COLORS = {
"red": "\33[91m",
"green": "\33[92m",
"yellow": "\33[93m",
"blue": "\33[94m",
"magenta": "\33[95m",
"cyan": "\33[96m",
"bold": "\33[1m",
"/red": "\33[0m",
"/green": "\33[0m",
"/blue": "\33[0m",
"/yellow": "\33[0m",
"/magenta": "\33[0m",
"/cyan": "\33[0m",
"/bold": "\33[22m",
"reset": "\33[0m",
}
_NO_COLORS = {color: "" for color in _COLORS}
def colors() -> dict[str, str]:
if "NO_COLOR" in os.environ:
return _NO_COLORS
# Pip reroutes sys.stdout, so FORCE_COLOR is required there
if os.environ.get("FORCE_COLOR", ""):
return _COLORS
# Avoid ValueError: I/O operation on closed file
with contextlib.suppress(ValueError):
# Assume sys.stderr is similar to sys.stdout
isatty = sys.stdout.isatty()
if isatty and not sys.platform.startswith("win"):
return _COLORS
return _NO_COLORS
def _process_rich(msg: object) -> str:
return ANY_ESCAPE.sub(
lambda m: "".join(colors()[x] for x in m.group(1).split()),
str(msg),
)
def rich_print(*args: object, **kwargs: object) -> None:
args_2 = tuple(_process_rich(arg) for arg in args)
if args != args_2:
args_2 = (*args_2[:-1], args_2[-1] + colors()["reset"])
print(*args_2, **kwargs, flush=True) # type: ignore[call-overload] # noqa: T201 | 0.406391 | 0.134605 |
from __future__ import annotations
import contextlib
import dataclasses
import json
import os
import shutil
import subprocess
import sys
import sysconfig
import textwrap
from collections.abc import Mapping, Sequence
from pathlib import Path
from typing import Generator
from packaging.version import Version
from . import __version__
from ._compat.typing import Self
from ._logging import logger
from ._shutil import Run
from .errors import CMakeConfigError, CMakeNotFoundError, FailedLiveProcessError
from .program_search import best_program, get_cmake_programs
__all__ = ["CMake", "CMaker"]
def __dir__() -> list[str]:
return __all__
DIR = Path(__file__).parent.resolve()
@dataclasses.dataclass(frozen=True)
class CMake:
version: Version
cmake_path: Path
@classmethod
def default_search(
cls, *, minimum_version: Version | None = None, module: bool = True
) -> Self:
candidates = get_cmake_programs(module=module)
cmake_program = best_program(candidates, minimum_version=minimum_version)
if cmake_program is None:
msg = f"Could not find CMake with version >= {minimum_version}"
raise CMakeNotFoundError(msg)
if cmake_program.version is None:
msg = "CMake version undetermined @ {program.path}"
raise CMakeNotFoundError(msg)
return cls(version=cmake_program.version, cmake_path=cmake_program.path)
def __fspath__(self) -> str:
return os.fspath(self.cmake_path)
@dataclasses.dataclass
class CMaker:
cmake: CMake
source_dir: Path
build_dir: Path
build_type: str
module_dirs: list[Path] = dataclasses.field(default_factory=list)
prefix_dirs: list[Path] = dataclasses.field(default_factory=list)
init_cache_file: Path = dataclasses.field(init=False, default=Path())
env: dict[str, str] = dataclasses.field(init=False, default_factory=os.environ.copy)
single_config: bool = not sysconfig.get_platform().startswith("win")
def __post_init__(self) -> None:
self.init_cache_file = self.build_dir / "CMakeInit.txt"
if not self.source_dir.is_dir():
msg = f"source directory {self.source_dir} does not exist"
raise CMakeConfigError(msg)
self.build_dir.mkdir(parents=True, exist_ok=True)
if not self.build_dir.is_dir():
msg = f"build directory {self.build_dir} must be a (creatable) directory"
raise CMakeConfigError(msg)
# If these were the same, the following check could wipe the source directory!
if self.build_dir.resolve() == self.source_dir.resolve():
msg = "build directory must be different from source directory"
raise CMakeConfigError(msg)
skbuild_info = self.build_dir / ".skbuild-info.json"
# If building via SDist, this could be pre-filled, so delete it if it exists
with contextlib.suppress(FileNotFoundError):
with skbuild_info.open("r", encoding="utf-8") as f:
info = json.load(f)
cached_source_dir = Path(info["source_dir"])
if cached_source_dir.resolve() != self.source_dir.resolve():
logger.warning(
"Original src {} != {}, wiping build directory",
cached_source_dir,
self.source_dir,
)
shutil.rmtree(self.build_dir)
self.build_dir.mkdir()
with skbuild_info.open("w", encoding="utf-8") as f:
json.dump(self._info_dict(), f, indent=2)
def _info_dict(self) -> dict[str, str]:
"""
Produce an information dict about the current run that can be stored in a json file.
"""
return {
"source_dir": os.fspath(self.source_dir.resolve()),
"build_dir": os.fspath(self.build_dir.resolve()),
"cmake_path": os.fspath(self.cmake),
"skbuild_path": os.fspath(DIR),
"skbuild_version": __version__,
"python_executable": sys.executable,
}
def init_cache(
self, cache_settings: Mapping[str, str | os.PathLike[str] | bool]
) -> None:
with self.init_cache_file.open("w", encoding="utf-8") as f:
for key, value in cache_settings.items():
if isinstance(value, bool):
str_value = "ON" if value else "OFF"
f.write(f'set({key} {str_value} CACHE BOOL "" FORCE)\n')
elif isinstance(value, os.PathLike):
# Convert to CMake's internal path format
str_value = str(value).replace("\\", "/")
f.write(f'set({key} [===[{str_value}]===] CACHE PATH "" FORCE)\n')
else:
f.write(f'set({key} [===[{value}]===] CACHE STRING "" FORCE)\n')
if self.module_dirs:
# Convert to CMake's internal path format, otherwise this breaks try_compile on Windows
module_dirs_str = ";".join(map(str, self.module_dirs)).replace(
"\\", "/"
)
f.write(
f'set(CMAKE_MODULE_PATH [===[{module_dirs_str}]===] CACHE PATH "" FORCE)\n'
)
if self.prefix_dirs:
prefix_dirs_str = ";".join(map(str, self.prefix_dirs)).replace(
"\\", "/"
)
f.write(
f'set(CMAKE_PREFIX_PATH [===[{prefix_dirs_str}]===] CACHE PATH "" FORCE)\n'
)
f.write('set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE "BOTH" CACHE PATH "")\n')
contents = self.init_cache_file.read_text(encoding="utf-8").strip()
logger.debug(
"{}:\n{}",
self.init_cache_file,
textwrap.indent(contents.strip(), " "),
)
def _compute_cmake_args(
self, defines: Mapping[str, str | os.PathLike[str] | bool]
) -> Generator[str, None, None]:
yield f"-S{self.source_dir}"
yield f"-B{self.build_dir}"
if self.init_cache_file.is_file():
yield f"-C{self.init_cache_file}"
if self.single_config and self.build_type:
yield f"-DCMAKE_BUILD_TYPE:STRING={self.build_type}"
for key, value in defines.items():
if isinstance(value, bool):
str_value = "ON" if value else "OFF"
yield f"-D{key}:BOOL={str_value}"
elif isinstance(value, os.PathLike):
str_value = str(value).replace("\\", "/")
yield f"-D{key}:PATH={str_value}"
else:
yield f"-D{key}={value}"
def configure(
self,
*,
defines: Mapping[str, str | os.PathLike[str] | bool] | None = None,
cmake_args: Sequence[str] = (),
) -> None:
if "CMAKE_GENERATOR" in self.env:
gen = self.env["CMAKE_GENERATOR"]
self.single_config = gen == "Ninja" or "Makefiles" in gen
_cmake_args = self._compute_cmake_args(defines or {})
try:
Run(env=self.env).live(self.cmake, *_cmake_args, *cmake_args)
except subprocess.CalledProcessError:
msg = "CMake configuration failed"
raise FailedLiveProcessError(msg) from None
def _compute_build_args(
self,
*,
verbose: bool,
) -> Generator[str, None, None]:
if verbose:
yield "-v"
if self.build_type and not self.single_config:
yield "--config"
yield self.build_type
def build(
self,
build_args: Sequence[str] = (),
*,
targets: Sequence[str] = (),
verbose: bool = False,
) -> None:
local_args = self._compute_build_args(verbose=verbose)
if not targets:
self._build(*local_args, *build_args)
return
for target in targets:
self._build(*local_args, "--target", target, *build_args)
def _build(self, *args: str) -> None:
try:
Run(env=self.env).live(self.cmake, "--build", self.build_dir, *args)
except subprocess.CalledProcessError:
msg = "CMake build failed"
raise FailedLiveProcessError(msg) from None
def install(
self, prefix: Path, *, strip: bool = False, components: Sequence[str] = ()
) -> None:
opts = ["--prefix", str(prefix)]
if not self.single_config and self.build_type:
opts += ["--config", self.build_type]
if strip:
opts.append("--strip")
if not components:
self._install(opts)
return
for comp in components:
opts_with_comp = [*opts, "--component", comp]
logger.info("Installing component {}", comp)
self._install(opts_with_comp)
def _install(self, opts: Sequence[str]) -> None:
try:
Run(env=self.env).live(
self.cmake,
"--install",
self.build_dir,
*opts,
)
except subprocess.CalledProcessError:
msg = "CMake install failed"
raise FailedLiveProcessError(msg) from None | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/cmake.py | cmake.py | from __future__ import annotations
import contextlib
import dataclasses
import json
import os
import shutil
import subprocess
import sys
import sysconfig
import textwrap
from collections.abc import Mapping, Sequence
from pathlib import Path
from typing import Generator
from packaging.version import Version
from . import __version__
from ._compat.typing import Self
from ._logging import logger
from ._shutil import Run
from .errors import CMakeConfigError, CMakeNotFoundError, FailedLiveProcessError
from .program_search import best_program, get_cmake_programs
__all__ = ["CMake", "CMaker"]
def __dir__() -> list[str]:
return __all__
DIR = Path(__file__).parent.resolve()
@dataclasses.dataclass(frozen=True)
class CMake:
version: Version
cmake_path: Path
@classmethod
def default_search(
cls, *, minimum_version: Version | None = None, module: bool = True
) -> Self:
candidates = get_cmake_programs(module=module)
cmake_program = best_program(candidates, minimum_version=minimum_version)
if cmake_program is None:
msg = f"Could not find CMake with version >= {minimum_version}"
raise CMakeNotFoundError(msg)
if cmake_program.version is None:
msg = "CMake version undetermined @ {program.path}"
raise CMakeNotFoundError(msg)
return cls(version=cmake_program.version, cmake_path=cmake_program.path)
def __fspath__(self) -> str:
return os.fspath(self.cmake_path)
@dataclasses.dataclass
class CMaker:
cmake: CMake
source_dir: Path
build_dir: Path
build_type: str
module_dirs: list[Path] = dataclasses.field(default_factory=list)
prefix_dirs: list[Path] = dataclasses.field(default_factory=list)
init_cache_file: Path = dataclasses.field(init=False, default=Path())
env: dict[str, str] = dataclasses.field(init=False, default_factory=os.environ.copy)
single_config: bool = not sysconfig.get_platform().startswith("win")
def __post_init__(self) -> None:
self.init_cache_file = self.build_dir / "CMakeInit.txt"
if not self.source_dir.is_dir():
msg = f"source directory {self.source_dir} does not exist"
raise CMakeConfigError(msg)
self.build_dir.mkdir(parents=True, exist_ok=True)
if not self.build_dir.is_dir():
msg = f"build directory {self.build_dir} must be a (creatable) directory"
raise CMakeConfigError(msg)
# If these were the same, the following check could wipe the source directory!
if self.build_dir.resolve() == self.source_dir.resolve():
msg = "build directory must be different from source directory"
raise CMakeConfigError(msg)
skbuild_info = self.build_dir / ".skbuild-info.json"
# If building via SDist, this could be pre-filled, so delete it if it exists
with contextlib.suppress(FileNotFoundError):
with skbuild_info.open("r", encoding="utf-8") as f:
info = json.load(f)
cached_source_dir = Path(info["source_dir"])
if cached_source_dir.resolve() != self.source_dir.resolve():
logger.warning(
"Original src {} != {}, wiping build directory",
cached_source_dir,
self.source_dir,
)
shutil.rmtree(self.build_dir)
self.build_dir.mkdir()
with skbuild_info.open("w", encoding="utf-8") as f:
json.dump(self._info_dict(), f, indent=2)
def _info_dict(self) -> dict[str, str]:
"""
Produce an information dict about the current run that can be stored in a json file.
"""
return {
"source_dir": os.fspath(self.source_dir.resolve()),
"build_dir": os.fspath(self.build_dir.resolve()),
"cmake_path": os.fspath(self.cmake),
"skbuild_path": os.fspath(DIR),
"skbuild_version": __version__,
"python_executable": sys.executable,
}
def init_cache(
self, cache_settings: Mapping[str, str | os.PathLike[str] | bool]
) -> None:
with self.init_cache_file.open("w", encoding="utf-8") as f:
for key, value in cache_settings.items():
if isinstance(value, bool):
str_value = "ON" if value else "OFF"
f.write(f'set({key} {str_value} CACHE BOOL "" FORCE)\n')
elif isinstance(value, os.PathLike):
# Convert to CMake's internal path format
str_value = str(value).replace("\\", "/")
f.write(f'set({key} [===[{str_value}]===] CACHE PATH "" FORCE)\n')
else:
f.write(f'set({key} [===[{value}]===] CACHE STRING "" FORCE)\n')
if self.module_dirs:
# Convert to CMake's internal path format, otherwise this breaks try_compile on Windows
module_dirs_str = ";".join(map(str, self.module_dirs)).replace(
"\\", "/"
)
f.write(
f'set(CMAKE_MODULE_PATH [===[{module_dirs_str}]===] CACHE PATH "" FORCE)\n'
)
if self.prefix_dirs:
prefix_dirs_str = ";".join(map(str, self.prefix_dirs)).replace(
"\\", "/"
)
f.write(
f'set(CMAKE_PREFIX_PATH [===[{prefix_dirs_str}]===] CACHE PATH "" FORCE)\n'
)
f.write('set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE "BOTH" CACHE PATH "")\n')
contents = self.init_cache_file.read_text(encoding="utf-8").strip()
logger.debug(
"{}:\n{}",
self.init_cache_file,
textwrap.indent(contents.strip(), " "),
)
def _compute_cmake_args(
self, defines: Mapping[str, str | os.PathLike[str] | bool]
) -> Generator[str, None, None]:
yield f"-S{self.source_dir}"
yield f"-B{self.build_dir}"
if self.init_cache_file.is_file():
yield f"-C{self.init_cache_file}"
if self.single_config and self.build_type:
yield f"-DCMAKE_BUILD_TYPE:STRING={self.build_type}"
for key, value in defines.items():
if isinstance(value, bool):
str_value = "ON" if value else "OFF"
yield f"-D{key}:BOOL={str_value}"
elif isinstance(value, os.PathLike):
str_value = str(value).replace("\\", "/")
yield f"-D{key}:PATH={str_value}"
else:
yield f"-D{key}={value}"
def configure(
self,
*,
defines: Mapping[str, str | os.PathLike[str] | bool] | None = None,
cmake_args: Sequence[str] = (),
) -> None:
if "CMAKE_GENERATOR" in self.env:
gen = self.env["CMAKE_GENERATOR"]
self.single_config = gen == "Ninja" or "Makefiles" in gen
_cmake_args = self._compute_cmake_args(defines or {})
try:
Run(env=self.env).live(self.cmake, *_cmake_args, *cmake_args)
except subprocess.CalledProcessError:
msg = "CMake configuration failed"
raise FailedLiveProcessError(msg) from None
def _compute_build_args(
self,
*,
verbose: bool,
) -> Generator[str, None, None]:
if verbose:
yield "-v"
if self.build_type and not self.single_config:
yield "--config"
yield self.build_type
def build(
self,
build_args: Sequence[str] = (),
*,
targets: Sequence[str] = (),
verbose: bool = False,
) -> None:
local_args = self._compute_build_args(verbose=verbose)
if not targets:
self._build(*local_args, *build_args)
return
for target in targets:
self._build(*local_args, "--target", target, *build_args)
def _build(self, *args: str) -> None:
try:
Run(env=self.env).live(self.cmake, "--build", self.build_dir, *args)
except subprocess.CalledProcessError:
msg = "CMake build failed"
raise FailedLiveProcessError(msg) from None
def install(
self, prefix: Path, *, strip: bool = False, components: Sequence[str] = ()
) -> None:
opts = ["--prefix", str(prefix)]
if not self.single_config and self.build_type:
opts += ["--config", self.build_type]
if strip:
opts.append("--strip")
if not components:
self._install(opts)
return
for comp in components:
opts_with_comp = [*opts, "--component", comp]
logger.info("Installing component {}", comp)
self._install(opts_with_comp)
def _install(self, opts: Sequence[str]) -> None:
try:
Run(env=self.env).live(
self.cmake,
"--install",
self.build_dir,
*opts,
)
except subprocess.CalledProcessError:
msg = "CMake install failed"
raise FailedLiveProcessError(msg) from None | 0.601711 | 0.055541 |
from __future__ import annotations
import contextlib
import shutil
import subprocess
from collections.abc import Generator, Iterable
from pathlib import Path
from typing import NamedTuple
from packaging.version import InvalidVersion, Version
from ._logging import logger
from ._shutil import Run
__all__ = ["get_cmake_programs", "get_ninja_programs", "best_program", "Program"]
def __dir__() -> list[str]:
return __all__
class Program(NamedTuple):
path: Path
version: Version | None
def _get_cmake_path(*, module: bool = True) -> Generator[Path, None, None]:
"""
Get the path to CMake.
"""
if module:
with contextlib.suppress(ImportError):
# If a "cmake" directory exists, this will also ImportError
from cmake import CMAKE_BIN_DIR
yield Path(CMAKE_BIN_DIR) / "cmake"
candidates = ("cmake3", "cmake")
for candidate in candidates:
cmake_path = shutil.which(candidate)
if cmake_path is not None:
yield Path(cmake_path)
def _get_ninja_path(*, module: bool = True) -> Generator[Path, None, None]:
"""
Get the path to ninja.
"""
if module:
with contextlib.suppress(ImportError):
from ninja import BIN_DIR
yield Path(BIN_DIR) / "ninja"
# Matches https://gitlab.kitware.com/cmake/cmake/-/blob/master/Modules/CMakeNinjaFindMake.cmake
candidates = ("ninja-build", "ninja", "samu")
for candidate in candidates:
ninja_path = shutil.which(candidate)
if ninja_path is not None:
yield Path(ninja_path)
def get_cmake_programs(*, module: bool = True) -> Generator[Program, None, None]:
"""
Get the path and version for CMake. If the version cannot be determined,
yiels (path, None). Otherwise, yields (path, version). Best matches are
yielded first.
"""
for cmake_path in _get_cmake_path(module=module):
try:
result = Run().capture(cmake_path, "--version")
except subprocess.CalledProcessError:
yield Program(cmake_path, None)
continue
try:
version = Version(result.stdout.splitlines()[0].split()[-1])
except (IndexError, InvalidVersion):
logger.warning(f"Could not determine CMake version, got {result.stdout!r}")
yield Program(cmake_path, None)
continue
logger.info("CMake version: {}", version)
yield Program(cmake_path, version)
def get_ninja_programs(*, module: bool = True) -> Generator[Program, None, None]:
"""
Get the path and version for Ninja. If the version cannot be determined,
yields (path, None). Otherwise, yields (path, version). Best matches are
yielded first.
"""
for ninja_path in _get_ninja_path(module=module):
try:
result = Run().capture(ninja_path, "--version")
except subprocess.CalledProcessError:
yield Program(ninja_path, None)
continue
try:
version = Version(".".join(result.stdout.strip().split(".")[:3]))
except ValueError:
yield Program(ninja_path, None)
continue
logger.info("Ninja version: {}", version)
yield Program(ninja_path, version)
def get_make_programs() -> Generator[Path, None, None]:
"""
Get the path to make.
"""
candidates = ("gmake", "make")
for candidate in candidates:
make_path = shutil.which(candidate)
if make_path is not None:
yield Path(make_path)
def best_program(
programs: Iterable[Program], *, minimum_version: Version | None
) -> Program | None:
"""
Select the first program entry that is of a supported version, or None if not found.
"""
for program in programs:
if minimum_version is None:
return program
if program.version is not None and program.version >= minimum_version:
return program
return None | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/program_search.py | program_search.py | from __future__ import annotations
import contextlib
import shutil
import subprocess
from collections.abc import Generator, Iterable
from pathlib import Path
from typing import NamedTuple
from packaging.version import InvalidVersion, Version
from ._logging import logger
from ._shutil import Run
__all__ = ["get_cmake_programs", "get_ninja_programs", "best_program", "Program"]
def __dir__() -> list[str]:
return __all__
class Program(NamedTuple):
path: Path
version: Version | None
def _get_cmake_path(*, module: bool = True) -> Generator[Path, None, None]:
"""
Get the path to CMake.
"""
if module:
with contextlib.suppress(ImportError):
# If a "cmake" directory exists, this will also ImportError
from cmake import CMAKE_BIN_DIR
yield Path(CMAKE_BIN_DIR) / "cmake"
candidates = ("cmake3", "cmake")
for candidate in candidates:
cmake_path = shutil.which(candidate)
if cmake_path is not None:
yield Path(cmake_path)
def _get_ninja_path(*, module: bool = True) -> Generator[Path, None, None]:
"""
Get the path to ninja.
"""
if module:
with contextlib.suppress(ImportError):
from ninja import BIN_DIR
yield Path(BIN_DIR) / "ninja"
# Matches https://gitlab.kitware.com/cmake/cmake/-/blob/master/Modules/CMakeNinjaFindMake.cmake
candidates = ("ninja-build", "ninja", "samu")
for candidate in candidates:
ninja_path = shutil.which(candidate)
if ninja_path is not None:
yield Path(ninja_path)
def get_cmake_programs(*, module: bool = True) -> Generator[Program, None, None]:
"""
Get the path and version for CMake. If the version cannot be determined,
yiels (path, None). Otherwise, yields (path, version). Best matches are
yielded first.
"""
for cmake_path in _get_cmake_path(module=module):
try:
result = Run().capture(cmake_path, "--version")
except subprocess.CalledProcessError:
yield Program(cmake_path, None)
continue
try:
version = Version(result.stdout.splitlines()[0].split()[-1])
except (IndexError, InvalidVersion):
logger.warning(f"Could not determine CMake version, got {result.stdout!r}")
yield Program(cmake_path, None)
continue
logger.info("CMake version: {}", version)
yield Program(cmake_path, version)
def get_ninja_programs(*, module: bool = True) -> Generator[Program, None, None]:
"""
Get the path and version for Ninja. If the version cannot be determined,
yields (path, None). Otherwise, yields (path, version). Best matches are
yielded first.
"""
for ninja_path in _get_ninja_path(module=module):
try:
result = Run().capture(ninja_path, "--version")
except subprocess.CalledProcessError:
yield Program(ninja_path, None)
continue
try:
version = Version(".".join(result.stdout.strip().split(".")[:3]))
except ValueError:
yield Program(ninja_path, None)
continue
logger.info("Ninja version: {}", version)
yield Program(ninja_path, version)
def get_make_programs() -> Generator[Path, None, None]:
"""
Get the path to make.
"""
candidates = ("gmake", "make")
for candidate in candidates:
make_path = shutil.which(candidate)
if make_path is not None:
yield Path(make_path)
def best_program(
programs: Iterable[Program], *, minimum_version: Version | None
) -> Program | None:
"""
Select the first program entry that is of a supported version, or None if not found.
"""
for program in programs:
if minimum_version is None:
return program
if program.version is not None and program.version >= minimum_version:
return program
return None | 0.783947 | 0.101278 |
from __future__ import annotations
import subprocess
import textwrap
__all__ = [
"CMakeAccessError",
"CMakeConfigError",
"CMakeNotFoundError",
"CMakeVersionError",
"NinjaVersionError",
"FailedLiveProcessError",
"FailedProcessError",
"NinjaNotFoundError",
"NotFoundError",
"ScikitBuildError",
]
def __dir__() -> list[str]:
return __all__
class ScikitBuildError(Exception):
"""
Base class for all ScikitBuildError errors.
"""
class NotFoundError(ScikitBuildError):
"""
Raised when a program is not found.
"""
class CMakeNotFoundError(NotFoundError):
"""
Raised when cmake is not found.
"""
class NinjaNotFoundError(NotFoundError):
"""
Raised when ninja is not found.
"""
class FailedProcessError(Exception):
"""
Exception raised when an call fails.
"""
def __init__(
self, exception: subprocess.CalledProcessError, description: str
) -> None:
super().__init__()
self.exception = exception
self._description = description
def __str__(self) -> str:
cmd = " ".join(self.exception.cmd)
description = f"{self._description}\n Command {cmd!r} failed with return code {self.exception.returncode}"
for stream_name in ("stdout", "stderr"):
stream = getattr(self.exception, stream_name)
if stream:
description += f"\n {stream_name}:\n"
description += textwrap.indent(stream.decode(), " ")
return description
class FailedLiveProcessError(Exception):
"""
Exception for when output was not being redirected.
"""
class CMakeAccessError(FailedProcessError):
"""
Error raised when CMake access fails.
"""
class CMakeVersionError(ScikitBuildError):
"""
Error raised when CMake version is not supported.
"""
class NinjaVersionError(ScikitBuildError):
"""
Error raised when CMake version is not supported.
"""
class CMakeConfigError(ScikitBuildError):
"""
Something is misconfigured.
""" | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/errors.py | errors.py | from __future__ import annotations
import subprocess
import textwrap
__all__ = [
"CMakeAccessError",
"CMakeConfigError",
"CMakeNotFoundError",
"CMakeVersionError",
"NinjaVersionError",
"FailedLiveProcessError",
"FailedProcessError",
"NinjaNotFoundError",
"NotFoundError",
"ScikitBuildError",
]
def __dir__() -> list[str]:
return __all__
class ScikitBuildError(Exception):
"""
Base class for all ScikitBuildError errors.
"""
class NotFoundError(ScikitBuildError):
"""
Raised when a program is not found.
"""
class CMakeNotFoundError(NotFoundError):
"""
Raised when cmake is not found.
"""
class NinjaNotFoundError(NotFoundError):
"""
Raised when ninja is not found.
"""
class FailedProcessError(Exception):
"""
Exception raised when an call fails.
"""
def __init__(
self, exception: subprocess.CalledProcessError, description: str
) -> None:
super().__init__()
self.exception = exception
self._description = description
def __str__(self) -> str:
cmd = " ".join(self.exception.cmd)
description = f"{self._description}\n Command {cmd!r} failed with return code {self.exception.returncode}"
for stream_name in ("stdout", "stderr"):
stream = getattr(self.exception, stream_name)
if stream:
description += f"\n {stream_name}:\n"
description += textwrap.indent(stream.decode(), " ")
return description
class FailedLiveProcessError(Exception):
"""
Exception for when output was not being redirected.
"""
class CMakeAccessError(FailedProcessError):
"""
Error raised when CMake access fails.
"""
class CMakeVersionError(ScikitBuildError):
"""
Error raised when CMake version is not supported.
"""
class NinjaVersionError(ScikitBuildError):
"""
Error raised when CMake version is not supported.
"""
class CMakeConfigError(ScikitBuildError):
"""
Something is misconfigured.
""" | 0.749362 | 0.086516 |
from __future__ import annotations
import contextlib
import dataclasses
import os
import stat
import subprocess
import sys
from collections.abc import Generator, Iterable
from typing import ClassVar
from ._logging import logger
__all__ = ["Run"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass
class Run:
env: dict[str, str] | None = None
cwd: os.PathLike[str] | None = None
# Stores last printout, for cleaner debug logging
_prev_env: ClassVar[dict[str, str]] = {}
def live(self, *args: str | os.PathLike[str]) -> None:
"""
Runs code and prints the results live.
"""
self._run(args, capture=False)
def capture(
self, *args: str | os.PathLike[str]
) -> subprocess.CompletedProcess[str]:
"""
Runs a command and captures the result.
"""
return self._run(args, capture=True)
def _run(
self,
args: Iterable[str | os.PathLike[str]],
capture: bool,
) -> subprocess.CompletedProcess[str]:
options = [
os.fspath(arg) if isinstance(arg, os.PathLike) else arg for arg in args
]
if self.env:
if not self._prev_env:
type(self)._prev_env = self.env.copy()
msg = "\n ".join(f"{k}={v!r}" for k, v in sorted(self.env.items()))
logger.debug("RUNENV:\n {}", msg)
else:
msg = "\n ".join(
f"{self._key_diff(k)} {k}={self.env.get(k, '<unset>')!r}"
for k in sorted(self.env.keys() | self._prev_env.keys())
if self._prev_env.get(k, None) != self.env.get(k, None)
)
logger.debug("RUNENV - changes since last run only:\n {}", msg)
type(self)._prev_env = self.env.copy()
logger.debug("RUN: {}", " ".join(options))
return subprocess.run(
options,
text=True,
check=True,
capture_output=capture,
env=self.env,
cwd=self.cwd,
)
def _key_diff(self, k: str) -> str:
assert self.env
if k in self.env and k not in self._prev_env:
return "+"
if k in self._prev_env and k not in self.env:
return "-"
return " "
def _fix_all_permissions(directory: str) -> None:
"""
Makes sure the write permission is set. Only run this on Windows.
"""
with os.scandir(directory) as it:
for entry in it:
if entry.is_dir():
_fix_all_permissions(entry.path)
continue
mode = stat.S_IMODE(entry.stat().st_mode)
if not mode & stat.S_IWRITE:
os.chmod(entry.path, mode | stat.S_IWRITE) # noqa: PTH101
@contextlib.contextmanager
def fix_win_37_all_permissions(tmpdir: str) -> Generator[None, None, None]:
try:
yield
finally:
if sys.version_info < (3, 8) and sys.platform.startswith("win"):
_fix_all_permissions(tmpdir) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/_shutil.py | _shutil.py | from __future__ import annotations
import contextlib
import dataclasses
import os
import stat
import subprocess
import sys
from collections.abc import Generator, Iterable
from typing import ClassVar
from ._logging import logger
__all__ = ["Run"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass
class Run:
env: dict[str, str] | None = None
cwd: os.PathLike[str] | None = None
# Stores last printout, for cleaner debug logging
_prev_env: ClassVar[dict[str, str]] = {}
def live(self, *args: str | os.PathLike[str]) -> None:
"""
Runs code and prints the results live.
"""
self._run(args, capture=False)
def capture(
self, *args: str | os.PathLike[str]
) -> subprocess.CompletedProcess[str]:
"""
Runs a command and captures the result.
"""
return self._run(args, capture=True)
def _run(
self,
args: Iterable[str | os.PathLike[str]],
capture: bool,
) -> subprocess.CompletedProcess[str]:
options = [
os.fspath(arg) if isinstance(arg, os.PathLike) else arg for arg in args
]
if self.env:
if not self._prev_env:
type(self)._prev_env = self.env.copy()
msg = "\n ".join(f"{k}={v!r}" for k, v in sorted(self.env.items()))
logger.debug("RUNENV:\n {}", msg)
else:
msg = "\n ".join(
f"{self._key_diff(k)} {k}={self.env.get(k, '<unset>')!r}"
for k in sorted(self.env.keys() | self._prev_env.keys())
if self._prev_env.get(k, None) != self.env.get(k, None)
)
logger.debug("RUNENV - changes since last run only:\n {}", msg)
type(self)._prev_env = self.env.copy()
logger.debug("RUN: {}", " ".join(options))
return subprocess.run(
options,
text=True,
check=True,
capture_output=capture,
env=self.env,
cwd=self.cwd,
)
def _key_diff(self, k: str) -> str:
assert self.env
if k in self.env and k not in self._prev_env:
return "+"
if k in self._prev_env and k not in self.env:
return "-"
return " "
def _fix_all_permissions(directory: str) -> None:
"""
Makes sure the write permission is set. Only run this on Windows.
"""
with os.scandir(directory) as it:
for entry in it:
if entry.is_dir():
_fix_all_permissions(entry.path)
continue
mode = stat.S_IMODE(entry.stat().st_mode)
if not mode & stat.S_IWRITE:
os.chmod(entry.path, mode | stat.S_IWRITE) # noqa: PTH101
@contextlib.contextmanager
def fix_win_37_all_permissions(tmpdir: str) -> Generator[None, None, None]:
try:
yield
finally:
if sys.version_info < (3, 8) and sys.platform.startswith("win"):
_fix_all_permissions(tmpdir) | 0.542621 | 0.11358 |
from __future__ import annotations
import importlib.abc
import importlib.machinery
import importlib.util
import os
import subprocess
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
MARKER = "SKBUILD_EDITABLE_SKIP"
VERBOSE = "SKBUILD_EDITABLE_VERBOSE"
__all__ = ["install"]
def __dir__() -> list[str]:
return __all__
class ScikitBuildRedirectingFinder(importlib.abc.MetaPathFinder):
def __init__(
self,
known_source_files: dict[str, str],
known_wheel_files: dict[str, str],
path: str | None,
rebuild: bool,
verbose: bool,
build_options: list[str],
install_options: list[str],
):
self.known_source_files = known_source_files
self.known_wheel_files = known_wheel_files
self.path = path
self.rebuild_flag = rebuild
self.verbose = verbose
self.build_options = build_options
self.install_options = install_options
def find_spec(
self,
fullname: str,
path: object = None,
target: object = None,
) -> importlib.machinery.ModuleSpec | None:
if fullname in self.known_wheel_files:
redir = self.known_wheel_files[fullname]
if self.rebuild_flag:
self.rebuild()
return importlib.util.spec_from_file_location(
fullname, os.path.join(DIR, redir)
)
if fullname in self.known_source_files:
redir = self.known_source_files[fullname]
return importlib.util.spec_from_file_location(fullname, redir)
return None
def rebuild(self) -> None:
# Don't rebuild if not set to a local path
if not self.path:
return
env = os.environ.copy()
# Protect against recursion
if self.path in env.get(MARKER, "").split(os.pathsep):
return
env[MARKER] = os.pathsep.join((env.get(MARKER, ""), self.path))
verbose = self.verbose or bool(env.get(VERBOSE, ""))
if env.get(VERBOSE, "") == "0":
verbose = False
if verbose:
print(f"Running cmake --build & --install in {self.path}") # noqa: T201
result = subprocess.run(
["cmake", "--build", ".", *self.build_options],
cwd=self.path,
stdout=sys.stderr if verbose else subprocess.PIPE,
env=env,
check=False,
text=True,
)
if result.returncode and verbose:
print( # noqa: T201
f"ERROR: {result.stdout}",
file=sys.stderr,
)
result.check_returncode()
result = subprocess.run(
["cmake", "--install", ".", "--prefix", DIR, *self.install_options],
cwd=self.path,
stdout=sys.stderr if verbose else subprocess.PIPE,
env=env,
check=False,
text=True,
)
if result.returncode and verbose:
print( # noqa: T201
f"ERROR: {result.stdout}",
file=sys.stderr,
)
result.check_returncode()
def install(
known_source_files: dict[str, str],
known_wheel_files: dict[str, str],
path: str | None,
rebuild: bool = False,
verbose: bool = False,
build_options: list[str] | None = None,
install_options: list[str] | None = None,
) -> None:
"""
Install a meta path finder that redirects imports to the source files, and
optionally rebuilds if path is given.
:param known_source_files: A mapping of module names to source files
:param known_wheel_files: A mapping of module names to wheel files
:param path: The path to the build directory, or None
:param verbose: Whether to print the cmake commands (also controlled by the
SKBUILD_EDITABLE_VERBOSE environment variable)
"""
sys.meta_path.insert(
0,
ScikitBuildRedirectingFinder(
known_source_files,
known_wheel_files,
path,
rebuild,
verbose,
build_options or [],
install_options or [],
),
) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/resources/_editable_redirect.py | _editable_redirect.py | from __future__ import annotations
import importlib.abc
import importlib.machinery
import importlib.util
import os
import subprocess
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
MARKER = "SKBUILD_EDITABLE_SKIP"
VERBOSE = "SKBUILD_EDITABLE_VERBOSE"
__all__ = ["install"]
def __dir__() -> list[str]:
return __all__
class ScikitBuildRedirectingFinder(importlib.abc.MetaPathFinder):
def __init__(
self,
known_source_files: dict[str, str],
known_wheel_files: dict[str, str],
path: str | None,
rebuild: bool,
verbose: bool,
build_options: list[str],
install_options: list[str],
):
self.known_source_files = known_source_files
self.known_wheel_files = known_wheel_files
self.path = path
self.rebuild_flag = rebuild
self.verbose = verbose
self.build_options = build_options
self.install_options = install_options
def find_spec(
self,
fullname: str,
path: object = None,
target: object = None,
) -> importlib.machinery.ModuleSpec | None:
if fullname in self.known_wheel_files:
redir = self.known_wheel_files[fullname]
if self.rebuild_flag:
self.rebuild()
return importlib.util.spec_from_file_location(
fullname, os.path.join(DIR, redir)
)
if fullname in self.known_source_files:
redir = self.known_source_files[fullname]
return importlib.util.spec_from_file_location(fullname, redir)
return None
def rebuild(self) -> None:
# Don't rebuild if not set to a local path
if not self.path:
return
env = os.environ.copy()
# Protect against recursion
if self.path in env.get(MARKER, "").split(os.pathsep):
return
env[MARKER] = os.pathsep.join((env.get(MARKER, ""), self.path))
verbose = self.verbose or bool(env.get(VERBOSE, ""))
if env.get(VERBOSE, "") == "0":
verbose = False
if verbose:
print(f"Running cmake --build & --install in {self.path}") # noqa: T201
result = subprocess.run(
["cmake", "--build", ".", *self.build_options],
cwd=self.path,
stdout=sys.stderr if verbose else subprocess.PIPE,
env=env,
check=False,
text=True,
)
if result.returncode and verbose:
print( # noqa: T201
f"ERROR: {result.stdout}",
file=sys.stderr,
)
result.check_returncode()
result = subprocess.run(
["cmake", "--install", ".", "--prefix", DIR, *self.install_options],
cwd=self.path,
stdout=sys.stderr if verbose else subprocess.PIPE,
env=env,
check=False,
text=True,
)
if result.returncode and verbose:
print( # noqa: T201
f"ERROR: {result.stdout}",
file=sys.stderr,
)
result.check_returncode()
def install(
known_source_files: dict[str, str],
known_wheel_files: dict[str, str],
path: str | None,
rebuild: bool = False,
verbose: bool = False,
build_options: list[str] | None = None,
install_options: list[str] | None = None,
) -> None:
"""
Install a meta path finder that redirects imports to the source files, and
optionally rebuilds if path is given.
:param known_source_files: A mapping of module names to source files
:param known_wheel_files: A mapping of module names to wheel files
:param path: The path to the build directory, or None
:param verbose: Whether to print the cmake commands (also controlled by the
SKBUILD_EDITABLE_VERBOSE environment variable)
"""
sys.meta_path.insert(
0,
ScikitBuildRedirectingFinder(
known_source_files,
known_wheel_files,
path,
rebuild,
verbose,
build_options or [],
install_options or [],
),
) | 0.488527 | 0.10833 |
from __future__ import annotations
import setuptools.build_meta
from setuptools.build_meta import (
build_sdist,
build_wheel,
prepare_metadata_for_build_wheel,
)
from ..builder.get_requires import GetRequires
if hasattr(setuptools.build_meta, "build_editable"):
from setuptools.build_meta import build_editable
if hasattr(setuptools.build_meta, "prepare_metadata_for_build_editable"):
from setuptools.build_meta import (
prepare_metadata_for_build_editable,
)
__all__ = [
"build_editable",
"build_sdist",
"build_wheel",
"get_requires_for_build_editable",
"get_requires_for_build_sdist",
"get_requires_for_build_wheel",
"prepare_metadata_for_build_editable",
"prepare_metadata_for_build_wheel",
]
def __dir__() -> list[str]:
return __all__
def get_requires_for_build_sdist(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
setuptools_reqs = setuptools.build_meta.get_requires_for_build_sdist(
config_settings
)
requires = GetRequires(config_settings)
# These are only injected if cmake is required for the SDist step
cmake_requires = (
[*requires.cmake(), *requires.ninja()] if requires.settings.sdist.cmake else []
)
return [*setuptools_reqs, *cmake_requires]
def get_requires_for_build_wheel(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
requires = GetRequires(config_settings)
setuptools_reqs = setuptools.build_meta.get_requires_for_build_wheel(
config_settings
)
return [*setuptools_reqs, *requires.cmake(), *requires.ninja()]
if hasattr(setuptools.build_meta, "get_requires_for_build_editable"):
def get_requires_for_build_editable(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
requires = GetRequires(config_settings)
setuptools_reqs = setuptools.build_meta.get_requires_for_build_editable(
config_settings
)
return [*setuptools_reqs, *requires.cmake(), *requires.ninja()] | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/setuptools/build_meta.py | build_meta.py | from __future__ import annotations
import setuptools.build_meta
from setuptools.build_meta import (
build_sdist,
build_wheel,
prepare_metadata_for_build_wheel,
)
from ..builder.get_requires import GetRequires
if hasattr(setuptools.build_meta, "build_editable"):
from setuptools.build_meta import build_editable
if hasattr(setuptools.build_meta, "prepare_metadata_for_build_editable"):
from setuptools.build_meta import (
prepare_metadata_for_build_editable,
)
__all__ = [
"build_editable",
"build_sdist",
"build_wheel",
"get_requires_for_build_editable",
"get_requires_for_build_sdist",
"get_requires_for_build_wheel",
"prepare_metadata_for_build_editable",
"prepare_metadata_for_build_wheel",
]
def __dir__() -> list[str]:
return __all__
def get_requires_for_build_sdist(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
setuptools_reqs = setuptools.build_meta.get_requires_for_build_sdist(
config_settings
)
requires = GetRequires(config_settings)
# These are only injected if cmake is required for the SDist step
cmake_requires = (
[*requires.cmake(), *requires.ninja()] if requires.settings.sdist.cmake else []
)
return [*setuptools_reqs, *cmake_requires]
def get_requires_for_build_wheel(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
requires = GetRequires(config_settings)
setuptools_reqs = setuptools.build_meta.get_requires_for_build_wheel(
config_settings
)
return [*setuptools_reqs, *requires.cmake(), *requires.ninja()]
if hasattr(setuptools.build_meta, "get_requires_for_build_editable"):
def get_requires_for_build_editable(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
requires = GetRequires(config_settings)
setuptools_reqs = setuptools.build_meta.get_requires_for_build_editable(
config_settings
)
return [*setuptools_reqs, *requires.cmake(), *requires.ninja()] | 0.554229 | 0.052352 |
from __future__ import annotations
import dataclasses
import functools
import importlib.util
import os
import sysconfig
from collections.abc import Generator, Mapping
from packaging.tags import sys_tags
from .._compat import tomllib
from .._compat.typing import Literal
from .._logging import logger
from ..program_search import (
best_program,
get_cmake_programs,
get_make_programs,
get_ninja_programs,
)
from ..resources import resources
from ..settings._load_provider import load_provider
from ..settings.skbuild_model import ScikitBuildSettings
from ..settings.skbuild_read_settings import SettingsReader
__all__ = ["GetRequires"]
def __dir__() -> list[str]:
return __all__
@functools.lru_cache(maxsize=2)
def known_wheels(name: Literal["ninja", "cmake"]) -> frozenset[str]:
with resources.joinpath("known_wheels.toml").open("rb") as f:
return frozenset(tomllib.load(f)["tool"]["scikit-build"][name]["known-wheels"])
@functools.lru_cache(maxsize=2)
def is_known_platform(platforms: frozenset[str]) -> bool:
return any(tag.platform in platforms for tag in sys_tags())
@dataclasses.dataclass
class GetRequires:
config_settings: Mapping[str, list[str] | str] | None = None
def __post_init__(self) -> None:
self._settings = SettingsReader.from_file(
"pyproject.toml", self.config_settings
).settings
@property
def settings(self) -> ScikitBuildSettings:
return self._settings
def cmake(self) -> Generator[str, None, None]:
cmake_min = self.settings.cmake.minimum_version
# If the module is already installed (via caching the build
# environment, for example), we will use that
if importlib.util.find_spec("cmake") is not None:
yield f"cmake>={cmake_min}"
return
cmake = best_program(
get_cmake_programs(module=False), minimum_version=cmake_min
)
if cmake is None:
yield f"cmake>={cmake_min}"
return
logger.debug("Found system CMake: {} - not requiring PyPI package", cmake)
def ninja(self) -> Generator[str, None, None]:
# On Windows MSVC, Ninja is not default
if sysconfig.get_platform().startswith("win") and "Ninja" not in os.environ.get(
"CMAKE_GENERATOR", ""
):
return
# If something besides Windows is set, don't add ninja
if "Ninja" not in os.environ.get("CMAKE_GENERATOR", "Ninja"):
return
# If CMAKE_MAKE_PROGRAM is set, don't add anything, someone already knows what they want
if os.environ.get("CMAKE_MAKE_PROGRAM", ""):
return
ninja_min = self.settings.ninja.minimum_version
# If the module is already installed (via caching the build
# environment, for example), we will use that
if importlib.util.find_spec("ninja") is not None:
yield f"ninja>={ninja_min}"
return
ninja = best_program(
get_ninja_programs(module=False), minimum_version=ninja_min
)
if ninja is not None:
logger.debug("Found system Ninja: {} - not requiring PyPI package", ninja)
return
if (
self.settings.ninja.make_fallback
and not is_known_platform(known_wheels("ninja"))
and list(get_make_programs())
):
logger.debug(
"Found system Make & not on known platform - not requiring PyPI package for Ninja"
)
return
yield f"ninja>={ninja_min}"
def dynamic_metadata(self) -> Generator[str, None, None]:
for dynamic_metadata in self.settings.metadata.values():
if "provider" in dynamic_metadata:
config = dynamic_metadata.copy()
provider = config.pop("provider")
provider_path = config.pop("provider-path", None)
module = load_provider(provider, provider_path)
yield from getattr(
module, "get_requires_for_dynamic_metadata", lambda _: []
)(config) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/builder/get_requires.py | get_requires.py | from __future__ import annotations
import dataclasses
import functools
import importlib.util
import os
import sysconfig
from collections.abc import Generator, Mapping
from packaging.tags import sys_tags
from .._compat import tomllib
from .._compat.typing import Literal
from .._logging import logger
from ..program_search import (
best_program,
get_cmake_programs,
get_make_programs,
get_ninja_programs,
)
from ..resources import resources
from ..settings._load_provider import load_provider
from ..settings.skbuild_model import ScikitBuildSettings
from ..settings.skbuild_read_settings import SettingsReader
__all__ = ["GetRequires"]
def __dir__() -> list[str]:
return __all__
@functools.lru_cache(maxsize=2)
def known_wheels(name: Literal["ninja", "cmake"]) -> frozenset[str]:
with resources.joinpath("known_wheels.toml").open("rb") as f:
return frozenset(tomllib.load(f)["tool"]["scikit-build"][name]["known-wheels"])
@functools.lru_cache(maxsize=2)
def is_known_platform(platforms: frozenset[str]) -> bool:
return any(tag.platform in platforms for tag in sys_tags())
@dataclasses.dataclass
class GetRequires:
config_settings: Mapping[str, list[str] | str] | None = None
def __post_init__(self) -> None:
self._settings = SettingsReader.from_file(
"pyproject.toml", self.config_settings
).settings
@property
def settings(self) -> ScikitBuildSettings:
return self._settings
def cmake(self) -> Generator[str, None, None]:
cmake_min = self.settings.cmake.minimum_version
# If the module is already installed (via caching the build
# environment, for example), we will use that
if importlib.util.find_spec("cmake") is not None:
yield f"cmake>={cmake_min}"
return
cmake = best_program(
get_cmake_programs(module=False), minimum_version=cmake_min
)
if cmake is None:
yield f"cmake>={cmake_min}"
return
logger.debug("Found system CMake: {} - not requiring PyPI package", cmake)
def ninja(self) -> Generator[str, None, None]:
# On Windows MSVC, Ninja is not default
if sysconfig.get_platform().startswith("win") and "Ninja" not in os.environ.get(
"CMAKE_GENERATOR", ""
):
return
# If something besides Windows is set, don't add ninja
if "Ninja" not in os.environ.get("CMAKE_GENERATOR", "Ninja"):
return
# If CMAKE_MAKE_PROGRAM is set, don't add anything, someone already knows what they want
if os.environ.get("CMAKE_MAKE_PROGRAM", ""):
return
ninja_min = self.settings.ninja.minimum_version
# If the module is already installed (via caching the build
# environment, for example), we will use that
if importlib.util.find_spec("ninja") is not None:
yield f"ninja>={ninja_min}"
return
ninja = best_program(
get_ninja_programs(module=False), minimum_version=ninja_min
)
if ninja is not None:
logger.debug("Found system Ninja: {} - not requiring PyPI package", ninja)
return
if (
self.settings.ninja.make_fallback
and not is_known_platform(known_wheels("ninja"))
and list(get_make_programs())
):
logger.debug(
"Found system Make & not on known platform - not requiring PyPI package for Ninja"
)
return
yield f"ninja>={ninja_min}"
def dynamic_metadata(self) -> Generator[str, None, None]:
for dynamic_metadata in self.settings.metadata.values():
if "provider" in dynamic_metadata:
config = dynamic_metadata.copy()
provider = config.pop("provider")
provider_path = config.pop("provider-path", None)
module = load_provider(provider, provider_path)
yield from getattr(
module, "get_requires_for_dynamic_metadata", lambda _: []
)(config) | 0.450843 | 0.069763 |
from __future__ import annotations
import configparser
import os
import sys
import sysconfig
from collections.abc import Mapping
from pathlib import Path
from .._logging import logger
__all__ = ["get_python_include_dir", "get_python_library", "get_cmake_platform"]
TARGET_TO_PLAT = {
"x86": "win32",
"x64": "win-amd64",
"arm": "win-arm32",
"arm64": "win-arm64",
}
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
def __dir__() -> list[str]:
return __all__
def get_python_library(env: Mapping[str, str], *, abi3: bool = False) -> Path | None:
# When cross-compiling, check DIST_EXTRA_CONFIG first
config_file = env.get("DIST_EXTRA_CONFIG", None)
if config_file and Path(config_file).is_file():
cp = configparser.ConfigParser()
cp.read(config_file)
result = cp.get("build_ext", "library_dirs", fallback="")
if result:
logger.info("Reading DIST_EXTRA_CONFIG:build_ext.library_dirs={}", result)
minor = "" if abi3 else sys.version_info[1]
return Path(result) / f"python3{minor}.lib"
libdirstr = sysconfig.get_config_var("LIBDIR")
ldlibrarystr = sysconfig.get_config_var("LDLIBRARY")
libdir: Path | None = libdirstr and Path(libdirstr)
ldlibrary: Path | None = ldlibrarystr and Path(ldlibrarystr)
multiarch: str | None = sysconfig.get_config_var("MULTIARCH")
masd: str | None = sysconfig.get_config_var("multiarchsubdir")
if libdir and ldlibrary:
try:
libdir_is_dir = libdir.is_dir()
except PermissionError:
return None
if libdir_is_dir:
if multiarch and masd:
if masd.startswith(os.sep):
masd = masd[len(os.sep) :]
libdir_masd = libdir / masd
if libdir_masd.is_dir():
libdir = libdir_masd
libpath = libdir / ldlibrary
if Path(os.path.expandvars(libpath)).is_file():
return libpath
logger.warning("libdir/ldlibrary: {} is not a real file!", libpath)
else:
logger.warning("libdir: {} is not a directory", libdir)
framework_prefix = sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX")
if framework_prefix and Path(framework_prefix).is_dir() and ldlibrary:
libpath = Path(framework_prefix) / ldlibrary
if libpath.is_file():
return libpath
logger.warning(
"Can't find a Python library, got libdir={}, ldlibrary={}, multiarch={}, masd={}",
libdir,
ldlibrary,
multiarch,
masd,
)
return None
def get_python_include_dir() -> Path:
return Path(sysconfig.get_path("include"))
def get_host_platform() -> str:
"""
Return a string that identifies the current platform. This mimics
setuptools get_host_platform (without 3.8 aix compat).
"""
if sys.version_info < (3, 8) and os.name == "nt":
if "(arm)" in sys.version.lower():
return "win-arm32"
if "(arm64)" in sys.version.lower():
return "win-arm64"
return sysconfig.get_platform()
def get_platform(env: Mapping[str, str] | None = None) -> str:
"""
Return the Python platform name for a platform, respecting VSCMD_ARG_TGT_ARCH.
"""
if env is None:
env = os.environ
if sysconfig.get_platform().startswith("win"):
if "VSCMD_ARG_TGT_ARCH" in env:
logger.debug(
"Selecting {} or {} due to VSCMD_ARG_TARGET_ARCH",
TARGET_TO_PLAT.get(env["VSCMD_ARG_TGT_ARCH"]),
get_host_platform(),
)
return TARGET_TO_PLAT.get(env["VSCMD_ARG_TGT_ARCH"]) or get_host_platform()
if "arm64" in env.get("SETUPTOOLS_EXT_SUFFIX", "").lower():
logger.debug("Windows ARM targeted via SETUPTOOLS_EXT_SUFFIX")
return "win-arm64"
return get_host_platform()
def get_cmake_platform(env: Mapping[str, str] | None) -> str:
"""
Return the CMake platform name for a platform, respecting VSCMD_ARG_TGT_ARCH.
"""
plat = get_platform(env)
return PLAT_TO_CMAKE.get(plat, plat)
def get_soabi(env: Mapping[str, str], *, abi3: bool = False) -> str:
if abi3:
return "" if sysconfig.get_platform().startswith("win") else "abi3"
# Cross-compile support
setuptools_ext_suffix = env.get("SETUPTOOLS_EXT_SUFFIX", "")
if setuptools_ext_suffix:
return setuptools_ext_suffix.rsplit(".", 1)[0].lstrip(".")
if sys.version_info < (3, 8, 7):
# See https://github.com/python/cpython/issues/84006
import distutils.sysconfig # pylint: disable=deprecated-module
ext_suffix = distutils.sysconfig.get_config_var("EXT_SUFFIX")
else:
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
assert isinstance(ext_suffix, str)
return ext_suffix.rsplit(".", 1)[0].lstrip(".") | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/builder/sysconfig.py | sysconfig.py | from __future__ import annotations
import configparser
import os
import sys
import sysconfig
from collections.abc import Mapping
from pathlib import Path
from .._logging import logger
__all__ = ["get_python_include_dir", "get_python_library", "get_cmake_platform"]
TARGET_TO_PLAT = {
"x86": "win32",
"x64": "win-amd64",
"arm": "win-arm32",
"arm64": "win-arm64",
}
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
def __dir__() -> list[str]:
return __all__
def get_python_library(env: Mapping[str, str], *, abi3: bool = False) -> Path | None:
# When cross-compiling, check DIST_EXTRA_CONFIG first
config_file = env.get("DIST_EXTRA_CONFIG", None)
if config_file and Path(config_file).is_file():
cp = configparser.ConfigParser()
cp.read(config_file)
result = cp.get("build_ext", "library_dirs", fallback="")
if result:
logger.info("Reading DIST_EXTRA_CONFIG:build_ext.library_dirs={}", result)
minor = "" if abi3 else sys.version_info[1]
return Path(result) / f"python3{minor}.lib"
libdirstr = sysconfig.get_config_var("LIBDIR")
ldlibrarystr = sysconfig.get_config_var("LDLIBRARY")
libdir: Path | None = libdirstr and Path(libdirstr)
ldlibrary: Path | None = ldlibrarystr and Path(ldlibrarystr)
multiarch: str | None = sysconfig.get_config_var("MULTIARCH")
masd: str | None = sysconfig.get_config_var("multiarchsubdir")
if libdir and ldlibrary:
try:
libdir_is_dir = libdir.is_dir()
except PermissionError:
return None
if libdir_is_dir:
if multiarch and masd:
if masd.startswith(os.sep):
masd = masd[len(os.sep) :]
libdir_masd = libdir / masd
if libdir_masd.is_dir():
libdir = libdir_masd
libpath = libdir / ldlibrary
if Path(os.path.expandvars(libpath)).is_file():
return libpath
logger.warning("libdir/ldlibrary: {} is not a real file!", libpath)
else:
logger.warning("libdir: {} is not a directory", libdir)
framework_prefix = sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX")
if framework_prefix and Path(framework_prefix).is_dir() and ldlibrary:
libpath = Path(framework_prefix) / ldlibrary
if libpath.is_file():
return libpath
logger.warning(
"Can't find a Python library, got libdir={}, ldlibrary={}, multiarch={}, masd={}",
libdir,
ldlibrary,
multiarch,
masd,
)
return None
def get_python_include_dir() -> Path:
return Path(sysconfig.get_path("include"))
def get_host_platform() -> str:
"""
Return a string that identifies the current platform. This mimics
setuptools get_host_platform (without 3.8 aix compat).
"""
if sys.version_info < (3, 8) and os.name == "nt":
if "(arm)" in sys.version.lower():
return "win-arm32"
if "(arm64)" in sys.version.lower():
return "win-arm64"
return sysconfig.get_platform()
def get_platform(env: Mapping[str, str] | None = None) -> str:
"""
Return the Python platform name for a platform, respecting VSCMD_ARG_TGT_ARCH.
"""
if env is None:
env = os.environ
if sysconfig.get_platform().startswith("win"):
if "VSCMD_ARG_TGT_ARCH" in env:
logger.debug(
"Selecting {} or {} due to VSCMD_ARG_TARGET_ARCH",
TARGET_TO_PLAT.get(env["VSCMD_ARG_TGT_ARCH"]),
get_host_platform(),
)
return TARGET_TO_PLAT.get(env["VSCMD_ARG_TGT_ARCH"]) or get_host_platform()
if "arm64" in env.get("SETUPTOOLS_EXT_SUFFIX", "").lower():
logger.debug("Windows ARM targeted via SETUPTOOLS_EXT_SUFFIX")
return "win-arm64"
return get_host_platform()
def get_cmake_platform(env: Mapping[str, str] | None) -> str:
"""
Return the CMake platform name for a platform, respecting VSCMD_ARG_TGT_ARCH.
"""
plat = get_platform(env)
return PLAT_TO_CMAKE.get(plat, plat)
def get_soabi(env: Mapping[str, str], *, abi3: bool = False) -> str:
if abi3:
return "" if sysconfig.get_platform().startswith("win") else "abi3"
# Cross-compile support
setuptools_ext_suffix = env.get("SETUPTOOLS_EXT_SUFFIX", "")
if setuptools_ext_suffix:
return setuptools_ext_suffix.rsplit(".", 1)[0].lstrip(".")
if sys.version_info < (3, 8, 7):
# See https://github.com/python/cpython/issues/84006
import distutils.sysconfig # pylint: disable=deprecated-module
ext_suffix = distutils.sysconfig.get_config_var("EXT_SUFFIX")
else:
ext_suffix = sysconfig.get_config_var("EXT_SUFFIX")
assert isinstance(ext_suffix, str)
return ext_suffix.rsplit(".", 1)[0].lstrip(".") | 0.391406 | 0.070528 |
from __future__ import annotations
import re
import subprocess
import sys
import sysconfig
from collections.abc import Mapping, MutableMapping
from .._logging import logger
from ..cmake import CMake
from ..errors import NinjaNotFoundError
from ..program_search import best_program, get_make_programs, get_ninja_programs
from ..settings.skbuild_model import NinjaSettings
from .sysconfig import get_cmake_platform
__all__ = ["set_environment_for_gen"]
def __dir__() -> list[str]:
return __all__
def parse_help_default(txt: str) -> str | None:
"""
Parses the default generator from the output of cmake --help.
"""
lines: list[str] = re.findall(
r"^\*\s*(.*?)(?:\s*\[arch\])?\s*= Generate", txt, re.MULTILINE
)
if len(lines) != 1:
return None
return lines[0]
def get_default(cmake: CMake) -> str | None:
"""
Returns the default generator for the current platform. None if it cannot be
determined.
"""
result = subprocess.run(
[str(cmake.cmake_path), "--help"],
check=False,
capture_output=True,
encoding="utf-8",
)
if result.returncode != 0:
return None
return parse_help_default(result.stdout)
def set_environment_for_gen(
cmake: CMake, env: MutableMapping[str, str], ninja_settings: NinjaSettings
) -> Mapping[str, str]:
"""
This function modifies the environment as needed to safely set a generator.
A reasonable default generator is set if the environment does not already
have one set; if ninja is present, ninja will be used over make on Unix.
"""
default = get_default(cmake) or ""
if default:
logger.debug("Default generator: {}", default)
if sysconfig.get_platform().startswith("win") and "Visual Studio" in env.get(
"CMAKE_GENERATOR", default
):
# This must also be set when *_PLATFORM is set.
env.setdefault("CMAKE_GENERATOR", default)
env.setdefault("CMAKE_GENERATOR_PLATFORM", get_cmake_platform(env))
return {}
if sys.platform.startswith("win") and not sysconfig.get_platform().startswith(
"win"
):
# Non-MSVC Windows platforms require Ninja
default = "Ninja"
# Try Ninja if it is available, even if make is CMake default
if default == "Unix Makefiles":
default = "Ninja"
if env.get("CMAKE_GENERATOR", default or "Ninja") == "Ninja":
ninja = best_program(
get_ninja_programs(), minimum_version=ninja_settings.minimum_version
)
if ninja is not None:
env.setdefault("CMAKE_GENERATOR", "Ninja")
logger.debug("CMAKE_GENERATOR: Using ninja: {}", ninja.path)
return {"CMAKE_MAKE_PROGRAM": str(ninja.path)}
msg = "Ninja is required to build"
if not ninja_settings.make_fallback:
raise NinjaNotFoundError(msg)
msg = "Ninja or make is required to build"
make_programs = list(get_make_programs())
if not make_programs:
raise NinjaNotFoundError(msg)
env.setdefault("CMAKE_GENERATOR", "Unix Makefiles")
logger.debug("CMAKE_GENERATOR: Using make: {}", make_programs[0])
return {"CMAKE_MAKE_PROGRAM": str(make_programs[0])}
return {} | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/builder/generator.py | generator.py | from __future__ import annotations
import re
import subprocess
import sys
import sysconfig
from collections.abc import Mapping, MutableMapping
from .._logging import logger
from ..cmake import CMake
from ..errors import NinjaNotFoundError
from ..program_search import best_program, get_make_programs, get_ninja_programs
from ..settings.skbuild_model import NinjaSettings
from .sysconfig import get_cmake_platform
__all__ = ["set_environment_for_gen"]
def __dir__() -> list[str]:
return __all__
def parse_help_default(txt: str) -> str | None:
"""
Parses the default generator from the output of cmake --help.
"""
lines: list[str] = re.findall(
r"^\*\s*(.*?)(?:\s*\[arch\])?\s*= Generate", txt, re.MULTILINE
)
if len(lines) != 1:
return None
return lines[0]
def get_default(cmake: CMake) -> str | None:
"""
Returns the default generator for the current platform. None if it cannot be
determined.
"""
result = subprocess.run(
[str(cmake.cmake_path), "--help"],
check=False,
capture_output=True,
encoding="utf-8",
)
if result.returncode != 0:
return None
return parse_help_default(result.stdout)
def set_environment_for_gen(
cmake: CMake, env: MutableMapping[str, str], ninja_settings: NinjaSettings
) -> Mapping[str, str]:
"""
This function modifies the environment as needed to safely set a generator.
A reasonable default generator is set if the environment does not already
have one set; if ninja is present, ninja will be used over make on Unix.
"""
default = get_default(cmake) or ""
if default:
logger.debug("Default generator: {}", default)
if sysconfig.get_platform().startswith("win") and "Visual Studio" in env.get(
"CMAKE_GENERATOR", default
):
# This must also be set when *_PLATFORM is set.
env.setdefault("CMAKE_GENERATOR", default)
env.setdefault("CMAKE_GENERATOR_PLATFORM", get_cmake_platform(env))
return {}
if sys.platform.startswith("win") and not sysconfig.get_platform().startswith(
"win"
):
# Non-MSVC Windows platforms require Ninja
default = "Ninja"
# Try Ninja if it is available, even if make is CMake default
if default == "Unix Makefiles":
default = "Ninja"
if env.get("CMAKE_GENERATOR", default or "Ninja") == "Ninja":
ninja = best_program(
get_ninja_programs(), minimum_version=ninja_settings.minimum_version
)
if ninja is not None:
env.setdefault("CMAKE_GENERATOR", "Ninja")
logger.debug("CMAKE_GENERATOR: Using ninja: {}", ninja.path)
return {"CMAKE_MAKE_PROGRAM": str(ninja.path)}
msg = "Ninja is required to build"
if not ninja_settings.make_fallback:
raise NinjaNotFoundError(msg)
msg = "Ninja or make is required to build"
make_programs = list(get_make_programs())
if not make_programs:
raise NinjaNotFoundError(msg)
env.setdefault("CMAKE_GENERATOR", "Unix Makefiles")
logger.debug("CMAKE_GENERATOR: Using make: {}", make_programs[0])
return {"CMAKE_MAKE_PROGRAM": str(make_programs[0])}
return {} | 0.478529 | 0.069795 |
from __future__ import annotations
import dataclasses
import re
import sys
import sysconfig
from collections.abc import Iterable, Mapping, Sequence
from pathlib import Path
from packaging.version import Version
from .. import __version__
from .._compat.importlib import metadata, resources
from .._logging import logger
from ..cmake import CMaker
from ..resources import find_python
from ..settings.skbuild_model import ScikitBuildSettings
from .generator import set_environment_for_gen
from .sysconfig import (
get_platform,
get_python_include_dir,
get_python_library,
get_soabi,
)
__all__ = ["Builder", "get_archs", "archs_to_tags"]
DIR = Path(__file__).parent.resolve()
def __dir__() -> list[str]:
return __all__
# TODO: cross-compile support for other platforms
def get_archs(env: Mapping[str, str], cmake_args: Sequence[str] = ()) -> list[str]:
"""
Takes macOS platform settings and returns a list of platforms.
Example (macOS):
ARCHFLAGS="-arch x86_64" -> ["x86_64"]
ARCHFLAGS="-arch x86_64 -arch arm64" -> ["x86_64", "arm64"]
Returns an empty list otherwise or if ARCHFLAGS is not set.
"""
if sys.platform.startswith("darwin"):
for cmake_arg in cmake_args:
if "CMAKE_SYSTEM_PROCESSOR" in cmake_arg:
return [cmake_arg.split("=")[1]]
return re.findall(r"-arch (\S+)", env.get("ARCHFLAGS", ""))
if sys.platform.startswith("win") and get_platform(env) == "win-arm64":
return ["win_arm64"]
return []
def archs_to_tags(archs: list[str]) -> list[str]:
"""
Convert a list of architectures to a list of tags (e.g. "universal2").
"""
if sys.platform.startswith("darwin") and set(archs) == {"arm64", "x86_64"}:
return ["universal2"]
return archs
@dataclasses.dataclass
class Builder:
settings: ScikitBuildSettings
config: CMaker
def get_cmake_args(self) -> list[str]:
"""
Get CMake args from the settings and environment.
"""
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSX on conda-forge)
env_cmake_args = filter(None, self.config.env.get("CMAKE_ARGS", "").split(" "))
return [*self.settings.cmake.args, *env_cmake_args]
def configure(
self,
*,
defines: Mapping[str, str | bool],
cache_entries: Mapping[str, str | Path] | None = None,
name: str | None = None,
version: Version | None = None,
limited_abi: bool | None = None,
configure_args: Iterable[str] = (),
) -> None:
cmake_defines = {
k: ("TRUE" if v else "FALSE") if isinstance(v, bool) else v
for k, v in defines.items()
}
# Add any extra CMake modules
eps = metadata.entry_points(group="cmake.module")
self.config.module_dirs.extend(resources.files(ep.load()) for ep in eps)
# Add any extra CMake prefixes
eps = metadata.entry_points(group="cmake.prefix")
self.config.prefix_dirs.extend(resources.files(ep.load()) for ep in eps)
# Add site-packages to the prefix path for CMake
site_packages = Path(sysconfig.get_path("purelib"))
self.config.prefix_dirs.append(site_packages)
logger.debug("SITE_PACKAGES: {}", site_packages)
if site_packages != DIR.parent.parent:
self.config.prefix_dirs.append(DIR.parent.parent)
logger.debug("Extra SITE_PACKAGES: {}", site_packages)
# Add the FindPython backport if needed
if self.config.cmake.version < self.settings.backport.find_python:
fp_dir = Path(find_python.__file__).parent.resolve()
self.config.module_dirs.append(fp_dir)
logger.debug("FindPython backport activated at {}", fp_dir)
local_def = set_environment_for_gen(
self.config.cmake, self.config.env, self.settings.ninja
)
cmake_defines.update(local_def)
cache_config: dict[str, str | Path | bool] = {
"SKBUILD": "2",
"SKBUILD_CORE_VERSION": __version__,
}
if name is not None:
canonical_name = name.replace("-", "_").replace(".", "_")
cache_config["SKBUILD_PROJECT_NAME"] = canonical_name
if version is not None:
cache_config["SKBUILD_PROJECT_VERSION"] = str(version)
if limited_abi is None:
if self.settings.wheel.py_api.startswith("cp3"):
target_minor_version = int(self.settings.wheel.py_api[3:])
limited_abi = target_minor_version <= sys.version_info.minor
else:
limited_abi = False
python_library = get_python_library(self.config.env, abi3=False)
python_sabi_library = (
get_python_library(self.config.env, abi3=True) if limited_abi else None
)
python_include_dir = get_python_include_dir()
# Classic Find Python
cache_config["PYTHON_EXECUTABLE"] = sys.executable
cache_config["PYTHON_INCLUDE_DIR"] = python_include_dir
if python_library:
cache_config["PYTHON_LIBRARY"] = python_library
# Modern Find Python
for prefix in ("Python", "Python3"):
cache_config[f"{prefix}_EXECUTABLE"] = sys.executable
cache_config[f"{prefix}_ROOT_DIR"] = sys.prefix
cache_config[f"{prefix}_INCLUDE_DIR"] = python_include_dir
cache_config[f"{prefix}_FIND_REGISTRY"] = "NEVER"
# FindPython may break if this is set - only useful on Windows
if python_library and sysconfig.get_platform().startswith("win"):
cache_config[f"{prefix}_LIBRARY"] = python_library
if python_sabi_library and sysconfig.get_platform().startswith("win"):
cache_config[f"{prefix}_SABI_LIBRARY"] = python_sabi_library
cache_config["SKBUILD_SOABI"] = get_soabi(self.config.env, abi3=limited_abi)
# Allow CMakeLists to detect this is supposed to be a limited ABI build
cache_config["SKBUILD_SABI_COMPONENT"] = (
"Development.SABIModule" if limited_abi else ""
)
if cache_entries:
cache_config.update(cache_entries)
self.config.init_cache(cache_config)
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = get_archs(self.config.env)
if archs:
cmake_defines["CMAKE_OSX_ARCHITECTURES"] = ";".join(archs)
# Add the pre-defined or passed CMake defines
cmake_defines.update(
{
k: ("TRUE" if v else "FALSE") if isinstance(v, bool) else v
for k, v in self.settings.cmake.define.items()
}
)
self.config.configure(
defines=cmake_defines,
cmake_args=[*self.get_cmake_args(), *configure_args],
)
def build(self, build_args: list[str]) -> None:
self.config.build(
build_args=build_args,
targets=self.settings.cmake.targets,
verbose=self.settings.cmake.verbose,
)
def install(self, install_dir: Path) -> None:
components = self.settings.install.components
strip = self.settings.install.strip
assert strip is not None
self.config.install(install_dir, strip=strip, components=components) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/builder/builder.py | builder.py | from __future__ import annotations
import dataclasses
import re
import sys
import sysconfig
from collections.abc import Iterable, Mapping, Sequence
from pathlib import Path
from packaging.version import Version
from .. import __version__
from .._compat.importlib import metadata, resources
from .._logging import logger
from ..cmake import CMaker
from ..resources import find_python
from ..settings.skbuild_model import ScikitBuildSettings
from .generator import set_environment_for_gen
from .sysconfig import (
get_platform,
get_python_include_dir,
get_python_library,
get_soabi,
)
__all__ = ["Builder", "get_archs", "archs_to_tags"]
DIR = Path(__file__).parent.resolve()
def __dir__() -> list[str]:
return __all__
# TODO: cross-compile support for other platforms
def get_archs(env: Mapping[str, str], cmake_args: Sequence[str] = ()) -> list[str]:
"""
Takes macOS platform settings and returns a list of platforms.
Example (macOS):
ARCHFLAGS="-arch x86_64" -> ["x86_64"]
ARCHFLAGS="-arch x86_64 -arch arm64" -> ["x86_64", "arm64"]
Returns an empty list otherwise or if ARCHFLAGS is not set.
"""
if sys.platform.startswith("darwin"):
for cmake_arg in cmake_args:
if "CMAKE_SYSTEM_PROCESSOR" in cmake_arg:
return [cmake_arg.split("=")[1]]
return re.findall(r"-arch (\S+)", env.get("ARCHFLAGS", ""))
if sys.platform.startswith("win") and get_platform(env) == "win-arm64":
return ["win_arm64"]
return []
def archs_to_tags(archs: list[str]) -> list[str]:
"""
Convert a list of architectures to a list of tags (e.g. "universal2").
"""
if sys.platform.startswith("darwin") and set(archs) == {"arm64", "x86_64"}:
return ["universal2"]
return archs
@dataclasses.dataclass
class Builder:
settings: ScikitBuildSettings
config: CMaker
def get_cmake_args(self) -> list[str]:
"""
Get CMake args from the settings and environment.
"""
# Adding CMake arguments set as environment variable
# (needed e.g. to build for ARM OSX on conda-forge)
env_cmake_args = filter(None, self.config.env.get("CMAKE_ARGS", "").split(" "))
return [*self.settings.cmake.args, *env_cmake_args]
def configure(
self,
*,
defines: Mapping[str, str | bool],
cache_entries: Mapping[str, str | Path] | None = None,
name: str | None = None,
version: Version | None = None,
limited_abi: bool | None = None,
configure_args: Iterable[str] = (),
) -> None:
cmake_defines = {
k: ("TRUE" if v else "FALSE") if isinstance(v, bool) else v
for k, v in defines.items()
}
# Add any extra CMake modules
eps = metadata.entry_points(group="cmake.module")
self.config.module_dirs.extend(resources.files(ep.load()) for ep in eps)
# Add any extra CMake prefixes
eps = metadata.entry_points(group="cmake.prefix")
self.config.prefix_dirs.extend(resources.files(ep.load()) for ep in eps)
# Add site-packages to the prefix path for CMake
site_packages = Path(sysconfig.get_path("purelib"))
self.config.prefix_dirs.append(site_packages)
logger.debug("SITE_PACKAGES: {}", site_packages)
if site_packages != DIR.parent.parent:
self.config.prefix_dirs.append(DIR.parent.parent)
logger.debug("Extra SITE_PACKAGES: {}", site_packages)
# Add the FindPython backport if needed
if self.config.cmake.version < self.settings.backport.find_python:
fp_dir = Path(find_python.__file__).parent.resolve()
self.config.module_dirs.append(fp_dir)
logger.debug("FindPython backport activated at {}", fp_dir)
local_def = set_environment_for_gen(
self.config.cmake, self.config.env, self.settings.ninja
)
cmake_defines.update(local_def)
cache_config: dict[str, str | Path | bool] = {
"SKBUILD": "2",
"SKBUILD_CORE_VERSION": __version__,
}
if name is not None:
canonical_name = name.replace("-", "_").replace(".", "_")
cache_config["SKBUILD_PROJECT_NAME"] = canonical_name
if version is not None:
cache_config["SKBUILD_PROJECT_VERSION"] = str(version)
if limited_abi is None:
if self.settings.wheel.py_api.startswith("cp3"):
target_minor_version = int(self.settings.wheel.py_api[3:])
limited_abi = target_minor_version <= sys.version_info.minor
else:
limited_abi = False
python_library = get_python_library(self.config.env, abi3=False)
python_sabi_library = (
get_python_library(self.config.env, abi3=True) if limited_abi else None
)
python_include_dir = get_python_include_dir()
# Classic Find Python
cache_config["PYTHON_EXECUTABLE"] = sys.executable
cache_config["PYTHON_INCLUDE_DIR"] = python_include_dir
if python_library:
cache_config["PYTHON_LIBRARY"] = python_library
# Modern Find Python
for prefix in ("Python", "Python3"):
cache_config[f"{prefix}_EXECUTABLE"] = sys.executable
cache_config[f"{prefix}_ROOT_DIR"] = sys.prefix
cache_config[f"{prefix}_INCLUDE_DIR"] = python_include_dir
cache_config[f"{prefix}_FIND_REGISTRY"] = "NEVER"
# FindPython may break if this is set - only useful on Windows
if python_library and sysconfig.get_platform().startswith("win"):
cache_config[f"{prefix}_LIBRARY"] = python_library
if python_sabi_library and sysconfig.get_platform().startswith("win"):
cache_config[f"{prefix}_SABI_LIBRARY"] = python_sabi_library
cache_config["SKBUILD_SOABI"] = get_soabi(self.config.env, abi3=limited_abi)
# Allow CMakeLists to detect this is supposed to be a limited ABI build
cache_config["SKBUILD_SABI_COMPONENT"] = (
"Development.SABIModule" if limited_abi else ""
)
if cache_entries:
cache_config.update(cache_entries)
self.config.init_cache(cache_config)
if sys.platform.startswith("darwin"):
# Cross-compile support for macOS - respect ARCHFLAGS if set
archs = get_archs(self.config.env)
if archs:
cmake_defines["CMAKE_OSX_ARCHITECTURES"] = ";".join(archs)
# Add the pre-defined or passed CMake defines
cmake_defines.update(
{
k: ("TRUE" if v else "FALSE") if isinstance(v, bool) else v
for k, v in self.settings.cmake.define.items()
}
)
self.config.configure(
defines=cmake_defines,
cmake_args=[*self.get_cmake_args(), *configure_args],
)
def build(self, build_args: list[str]) -> None:
self.config.build(
build_args=build_args,
targets=self.settings.cmake.targets,
verbose=self.settings.cmake.verbose,
)
def install(self, install_dir: Path) -> None:
components = self.settings.install.components
strip = self.settings.install.strip
assert strip is not None
self.config.install(install_dir, strip=strip, components=components) | 0.4436 | 0.08772 |
from __future__ import annotations
import dataclasses
import itertools
import sys
from collections.abc import Iterable, Sequence
import packaging.tags
from .._compat.typing import Self
from .._logging import logger
from .macos import get_macosx_deployment_target
__all__ = ["WheelTag"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass(frozen=True)
class WheelTag:
pyvers: list[str]
abis: list[str]
archs: list[str]
# TODO: plats only used on macOS & Windows
@classmethod
def compute_best(
cls,
archs: Sequence[str],
py_api: str = "",
expand_macos: bool = False,
) -> Self:
best_tag = next(packaging.tags.sys_tags())
interp, abi, *plats = (best_tag.interpreter, best_tag.abi, best_tag.platform)
pyvers = [interp]
if sys.platform.startswith("win") and archs:
plats = [x.replace("-", "_") for x in archs]
elif sys.platform.startswith("darwin"):
pairs: Iterable[tuple[str | None, bool]]
if expand_macos and archs == ["universal2"]:
pairs = zip(
["universal2", "universal2", "x86_64", "arm64"],
[False, True, False, True],
)
elif not archs:
# It's okay to set arm to False, since this would be a native build,
# and that will already be 11+ for ARM anyway.
pairs = zip([None], [False])
else:
pairs = zip(archs, [a == "arm64" for a in archs])
plats = [
next(
packaging.tags.mac_platforms(
get_macosx_deployment_target(arm), arch
)
)
for arch, arm in pairs
]
# Remove duplicates (e.g. universal2 if macOS > 11.0 and expanded)
plats = list(dict.fromkeys(plats))
if py_api:
pyvers_new = py_api.split(".")
if all(x.startswith("cp3") and x[3:].isdecimal() for x in pyvers_new):
if len(pyvers_new) != 1:
msg = "Unexpected py-api, must be a single cp version (e.g. cp39), not {py_api}"
raise AssertionError(msg)
minor = int(pyvers_new[0][3:])
if (
sys.implementation.name == "cpython"
and minor <= sys.version_info.minor
):
pyvers = pyvers_new
abi = "abi3"
else:
msg = "Ignoring py-api, not a CPython interpreter ({}) or version (3.{}) is too high"
logger.debug(msg, sys.implementation.name, minor)
elif all(x.startswith("py") and x[2:].isdecimal() for x in pyvers_new):
pyvers = pyvers_new
abi = "none"
else:
msg = f"Unexpected py-api, must be abi3 (e.g. cp39) or Pythonless (e.g. py2.py3), not {py_api}"
raise AssertionError(msg)
return cls(pyvers=pyvers, abis=[abi], archs=plats)
@property
def pyver(self) -> str:
return ".".join(self.pyvers)
@property
def abi(self) -> str:
return ".".join(self.abis)
@property
def arch(self) -> str:
return ".".join(self.archs)
def __str__(self) -> str:
return f"{self.pyver}-{self.abi}-{self.arch}"
def tags_dict(self) -> dict[str, list[str]]:
return {
"pyver": self.pyvers,
"abi": self.abis,
"arch": self.archs,
}
def as_tags_set(self) -> frozenset[packaging.tags.Tag]:
vals = itertools.product(self.pyvers, self.abis, self.archs)
return frozenset(packaging.tags.Tag(*v) for v in vals)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--archs",
nargs="*",
default=[],
help="Specify one or more archs (macOS only currently)",
)
parser.add_argument(
"--abi",
default="",
help="Specify py-api, like 'cp37' or 'py3'",
)
args = parser.parse_args()
tag = WheelTag.compute_best(args.archs, args.abi)
print(tag) # noqa: T201 | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/builder/wheel_tag.py | wheel_tag.py | from __future__ import annotations
import dataclasses
import itertools
import sys
from collections.abc import Iterable, Sequence
import packaging.tags
from .._compat.typing import Self
from .._logging import logger
from .macos import get_macosx_deployment_target
__all__ = ["WheelTag"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass(frozen=True)
class WheelTag:
pyvers: list[str]
abis: list[str]
archs: list[str]
# TODO: plats only used on macOS & Windows
@classmethod
def compute_best(
cls,
archs: Sequence[str],
py_api: str = "",
expand_macos: bool = False,
) -> Self:
best_tag = next(packaging.tags.sys_tags())
interp, abi, *plats = (best_tag.interpreter, best_tag.abi, best_tag.platform)
pyvers = [interp]
if sys.platform.startswith("win") and archs:
plats = [x.replace("-", "_") for x in archs]
elif sys.platform.startswith("darwin"):
pairs: Iterable[tuple[str | None, bool]]
if expand_macos and archs == ["universal2"]:
pairs = zip(
["universal2", "universal2", "x86_64", "arm64"],
[False, True, False, True],
)
elif not archs:
# It's okay to set arm to False, since this would be a native build,
# and that will already be 11+ for ARM anyway.
pairs = zip([None], [False])
else:
pairs = zip(archs, [a == "arm64" for a in archs])
plats = [
next(
packaging.tags.mac_platforms(
get_macosx_deployment_target(arm), arch
)
)
for arch, arm in pairs
]
# Remove duplicates (e.g. universal2 if macOS > 11.0 and expanded)
plats = list(dict.fromkeys(plats))
if py_api:
pyvers_new = py_api.split(".")
if all(x.startswith("cp3") and x[3:].isdecimal() for x in pyvers_new):
if len(pyvers_new) != 1:
msg = "Unexpected py-api, must be a single cp version (e.g. cp39), not {py_api}"
raise AssertionError(msg)
minor = int(pyvers_new[0][3:])
if (
sys.implementation.name == "cpython"
and minor <= sys.version_info.minor
):
pyvers = pyvers_new
abi = "abi3"
else:
msg = "Ignoring py-api, not a CPython interpreter ({}) or version (3.{}) is too high"
logger.debug(msg, sys.implementation.name, minor)
elif all(x.startswith("py") and x[2:].isdecimal() for x in pyvers_new):
pyvers = pyvers_new
abi = "none"
else:
msg = f"Unexpected py-api, must be abi3 (e.g. cp39) or Pythonless (e.g. py2.py3), not {py_api}"
raise AssertionError(msg)
return cls(pyvers=pyvers, abis=[abi], archs=plats)
@property
def pyver(self) -> str:
return ".".join(self.pyvers)
@property
def abi(self) -> str:
return ".".join(self.abis)
@property
def arch(self) -> str:
return ".".join(self.archs)
def __str__(self) -> str:
return f"{self.pyver}-{self.abi}-{self.arch}"
def tags_dict(self) -> dict[str, list[str]]:
return {
"pyver": self.pyvers,
"abi": self.abis,
"arch": self.archs,
}
def as_tags_set(self) -> frozenset[packaging.tags.Tag]:
vals = itertools.product(self.pyvers, self.abis, self.archs)
return frozenset(packaging.tags.Tag(*v) for v in vals)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--archs",
nargs="*",
default=[],
help="Specify one or more archs (macOS only currently)",
)
parser.add_argument(
"--abi",
default="",
help="Specify py-api, like 'cp37' or 'py3'",
)
args = parser.parse_args()
tag = WheelTag.compute_best(args.archs, args.abi)
print(tag) # noqa: T201 | 0.349755 | 0.183027 |
import builtins
import dataclasses
import json
import sys
from pathlib import Path
from typing import Any, Callable, Dict, List, Type, TypeVar, Union # noqa: TID251
from .._compat.builtins import ExceptionGroup
from .._compat.typing import get_args, get_origin
from .model.cache import Cache
from .model.cmakefiles import CMakeFiles
from .model.codemodel import CodeModel, Target
from .model.directory import Directory
from .model.index import Index
__all__ = ["load_reply_dir"]
def __dir__() -> List[str]:
return __all__
T = TypeVar("T")
InputDict = Dict[str, Any]
class Converter:
def __init__(self, base_dir: Path) -> None:
self.base_dir = base_dir
def load(self) -> Index:
"""
Load the newest index.json file and return the Index object.
"""
index_file = sorted(self.base_dir.glob("index-*"))[-1]
with index_file.open(encoding="utf-8") as f:
data = json.load(f)
return self.make_class(data, Index)
def _load_from_json(self, name: Path, target: Type[T]) -> T:
with self.base_dir.joinpath(name).open(encoding="utf-8") as f:
data = json.load(f)
return self.make_class(data, target)
def make_class(self, data: InputDict, target: Type[T]) -> T:
"""
Convert a dict to a dataclass. Automatically load a few nested jsonFile classes.
"""
if (
target in (CodeModel, Target, Cache, CMakeFiles, Directory)
and "jsonFile" in data
and data["jsonFile"] is not None
):
return self._load_from_json(Path(data["jsonFile"]), target)
input_dict = {}
exceptions: List[Exception] = []
# We don't have DataclassInstance exposed in typing yet
for field in dataclasses.fields(target): # type: ignore[arg-type]
json_field = field.name.replace("_v", "-v").replace(
"cmakefiles", "cmakeFiles"
)
if json_field in data:
try:
input_dict[field.name] = self._convert_any(
data[json_field], field.type
)
except TypeError as err:
msg = f"Failed to convert field {field.name!r} of type {field.type}"
if sys.version_info < (3, 11):
err.__notes__ = [*getattr(err, "__notes__", []), msg] # type: ignore[attr-defined]
else:
err.add_note(msg) # pylint: disable=no-member
exceptions.append(err)
except ExceptionGroup as err:
exceptions.append(err)
if exceptions:
msg = f"Failed converting {target}"
raise ExceptionGroup(msg, exceptions)
return target(**input_dict)
def _convert_any(self, item: Any, target: Type[T]) -> T:
if dataclasses.is_dataclass(target):
# We don't have DataclassInstance exposed in typing yet
return self.make_class(item, target) # type: ignore[return-value]
origin = get_origin(target)
if origin is not None:
if origin == list:
return [self._convert_any(i, get_args(target)[0]) for i in item] # type: ignore[return-value]
if origin == Union:
return self._convert_any(item, get_args(target)[0]) # type: ignore[no-any-return]
return target(item) # type: ignore[call-arg]
def load_reply_dir(path: Path) -> Index:
return Converter(path).load()
if __name__ == "__main__":
import argparse
rich_print: Callable[[object], None]
try:
from rich import print as rich_print
except ModuleNotFoundError:
rich_print = builtins.print
parser = argparse.ArgumentParser()
parser.add_argument("reply_dir", type=Path, help="Path to the reply directory")
args = parser.parse_args()
reply = Path(args.reply_dir)
rich_print(load_reply_dir(reply)) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/file_api/reply.py | reply.py | import builtins
import dataclasses
import json
import sys
from pathlib import Path
from typing import Any, Callable, Dict, List, Type, TypeVar, Union # noqa: TID251
from .._compat.builtins import ExceptionGroup
from .._compat.typing import get_args, get_origin
from .model.cache import Cache
from .model.cmakefiles import CMakeFiles
from .model.codemodel import CodeModel, Target
from .model.directory import Directory
from .model.index import Index
__all__ = ["load_reply_dir"]
def __dir__() -> List[str]:
return __all__
T = TypeVar("T")
InputDict = Dict[str, Any]
class Converter:
def __init__(self, base_dir: Path) -> None:
self.base_dir = base_dir
def load(self) -> Index:
"""
Load the newest index.json file and return the Index object.
"""
index_file = sorted(self.base_dir.glob("index-*"))[-1]
with index_file.open(encoding="utf-8") as f:
data = json.load(f)
return self.make_class(data, Index)
def _load_from_json(self, name: Path, target: Type[T]) -> T:
with self.base_dir.joinpath(name).open(encoding="utf-8") as f:
data = json.load(f)
return self.make_class(data, target)
def make_class(self, data: InputDict, target: Type[T]) -> T:
"""
Convert a dict to a dataclass. Automatically load a few nested jsonFile classes.
"""
if (
target in (CodeModel, Target, Cache, CMakeFiles, Directory)
and "jsonFile" in data
and data["jsonFile"] is not None
):
return self._load_from_json(Path(data["jsonFile"]), target)
input_dict = {}
exceptions: List[Exception] = []
# We don't have DataclassInstance exposed in typing yet
for field in dataclasses.fields(target): # type: ignore[arg-type]
json_field = field.name.replace("_v", "-v").replace(
"cmakefiles", "cmakeFiles"
)
if json_field in data:
try:
input_dict[field.name] = self._convert_any(
data[json_field], field.type
)
except TypeError as err:
msg = f"Failed to convert field {field.name!r} of type {field.type}"
if sys.version_info < (3, 11):
err.__notes__ = [*getattr(err, "__notes__", []), msg] # type: ignore[attr-defined]
else:
err.add_note(msg) # pylint: disable=no-member
exceptions.append(err)
except ExceptionGroup as err:
exceptions.append(err)
if exceptions:
msg = f"Failed converting {target}"
raise ExceptionGroup(msg, exceptions)
return target(**input_dict)
def _convert_any(self, item: Any, target: Type[T]) -> T:
if dataclasses.is_dataclass(target):
# We don't have DataclassInstance exposed in typing yet
return self.make_class(item, target) # type: ignore[return-value]
origin = get_origin(target)
if origin is not None:
if origin == list:
return [self._convert_any(i, get_args(target)[0]) for i in item] # type: ignore[return-value]
if origin == Union:
return self._convert_any(item, get_args(target)[0]) # type: ignore[no-any-return]
return target(item) # type: ignore[call-arg]
def load_reply_dir(path: Path) -> Index:
return Converter(path).load()
if __name__ == "__main__":
import argparse
rich_print: Callable[[object], None]
try:
from rich import print as rich_print
except ModuleNotFoundError:
rich_print = builtins.print
parser = argparse.ArgumentParser()
parser.add_argument("reply_dir", type=Path, help="Path to the reply directory")
args = parser.parse_args()
reply = Path(args.reply_dir)
rich_print(load_reply_dir(reply)) | 0.503662 | 0.141459 |
import builtins
import json
from pathlib import Path
from typing import Any, Callable, Dict, Type, TypeVar # noqa: TID251
import cattr
import cattr.preconf.json
from .model.cache import Cache
from .model.cmakefiles import CMakeFiles
from .model.codemodel import CodeModel, Target
from .model.index import Index, Reply
T = TypeVar("T")
__all__ = ["make_converter", "load_reply_dir"]
def to_path(path: str, _: Type[Path]) -> Path:
return Path(path)
def make_converter(base_dir: Path) -> cattr.preconf.json.JsonConverter:
converter = cattr.preconf.json.make_converter()
converter.register_structure_hook(Path, to_path)
st_hook = cattr.gen.make_dict_structure_fn(
Reply,
converter,
codemodel_v2=cattr.gen.override(rename="codemodel-v2"),
cache_v2=cattr.gen.override(rename="cache-v2"),
cmakefiles_v1=cattr.gen.override(rename="cmakeFiles-v1"),
toolchains_v1=cattr.gen.override(rename="toolchains-v1"),
)
converter.register_structure_hook(Reply, st_hook)
def from_json_file(with_path: Dict[str, Any], t: Type[T]) -> T:
if with_path["jsonFile"] is None:
return converter.structure_attrs_fromdict({}, t)
path = base_dir / Path(with_path["jsonFile"])
raw = json.loads(path.read_text(encoding="utf-8"))
return converter.structure_attrs_fromdict(raw, t)
converter.register_structure_hook(CodeModel, from_json_file)
converter.register_structure_hook(Target, from_json_file)
converter.register_structure_hook(Cache, from_json_file)
converter.register_structure_hook(CMakeFiles, from_json_file)
return converter
def load_reply_dir(reply_dir: Path) -> Index:
converter = make_converter(reply_dir)
indexes = sorted(reply_dir.glob("index-*"))
if not indexes:
msg = f"index file not found in {reply_dir}"
raise IndexError(msg)
index_file = indexes[-1]
return converter.loads(index_file.read_text(), Index)
if __name__ == "__main__":
import argparse
rich_print: Callable[[object], None]
try:
from rich import print as rich_print
except ModuleNotFoundError:
rich_print = builtins.print
parser = argparse.ArgumentParser()
parser.add_argument("reply_dir", type=Path, help="Path to the reply directory")
args = parser.parse_args()
reply = Path(args.reply_dir)
rich_print(load_reply_dir(reply)) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/file_api/_cattrs_converter.py | _cattrs_converter.py |
import builtins
import json
from pathlib import Path
from typing import Any, Callable, Dict, Type, TypeVar # noqa: TID251
import cattr
import cattr.preconf.json
from .model.cache import Cache
from .model.cmakefiles import CMakeFiles
from .model.codemodel import CodeModel, Target
from .model.index import Index, Reply
T = TypeVar("T")
__all__ = ["make_converter", "load_reply_dir"]
def to_path(path: str, _: Type[Path]) -> Path:
return Path(path)
def make_converter(base_dir: Path) -> cattr.preconf.json.JsonConverter:
converter = cattr.preconf.json.make_converter()
converter.register_structure_hook(Path, to_path)
st_hook = cattr.gen.make_dict_structure_fn(
Reply,
converter,
codemodel_v2=cattr.gen.override(rename="codemodel-v2"),
cache_v2=cattr.gen.override(rename="cache-v2"),
cmakefiles_v1=cattr.gen.override(rename="cmakeFiles-v1"),
toolchains_v1=cattr.gen.override(rename="toolchains-v1"),
)
converter.register_structure_hook(Reply, st_hook)
def from_json_file(with_path: Dict[str, Any], t: Type[T]) -> T:
if with_path["jsonFile"] is None:
return converter.structure_attrs_fromdict({}, t)
path = base_dir / Path(with_path["jsonFile"])
raw = json.loads(path.read_text(encoding="utf-8"))
return converter.structure_attrs_fromdict(raw, t)
converter.register_structure_hook(CodeModel, from_json_file)
converter.register_structure_hook(Target, from_json_file)
converter.register_structure_hook(Cache, from_json_file)
converter.register_structure_hook(CMakeFiles, from_json_file)
return converter
def load_reply_dir(reply_dir: Path) -> Index:
converter = make_converter(reply_dir)
indexes = sorted(reply_dir.glob("index-*"))
if not indexes:
msg = f"index file not found in {reply_dir}"
raise IndexError(msg)
index_file = indexes[-1]
return converter.loads(index_file.read_text(), Index)
if __name__ == "__main__":
import argparse
rich_print: Callable[[object], None]
try:
from rich import print as rich_print
except ModuleNotFoundError:
rich_print = builtins.print
parser = argparse.ArgumentParser()
parser.add_argument("reply_dir", type=Path, help="Path to the reply directory")
args = parser.parse_args()
reply = Path(args.reply_dir)
rich_print(load_reply_dir(reply)) | 0.619817 | 0.094427 |
import dataclasses
from pathlib import Path
from typing import List, Optional
from .common import APIVersion, Paths
__all__ = [
"Archive",
"Artifact",
"CodeModel",
"CommandFragment",
"Configuration",
"Dependency",
"Destination",
"Directory",
"Install",
"Link",
"Prefix",
"Project",
"Source",
"StringCMakeVersion",
"Sysroot",
"Target",
]
def __dir__() -> List[str]:
return __all__
@dataclasses.dataclass(frozen=True)
class StringCMakeVersion:
string: str
@dataclasses.dataclass(frozen=True)
class Directory:
source: Path
build: Path
projectIndex: int
jsonFile: Optional[Path] = None
parentIndex: Optional[int] = None
childIndexes: List[int] = dataclasses.field(default_factory=list)
targetIndexes: List[int] = dataclasses.field(default_factory=list)
minimumCMakeVersion: Optional[StringCMakeVersion] = None
hasInstallRule: bool = False
# Directory is currently not resolved automatically.
@dataclasses.dataclass(frozen=True)
class Project:
name: str
directoryIndexes: List[int]
parentIndex: Optional[int] = None
childIndexes: List[int] = dataclasses.field(default_factory=list)
targetIndexes: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True)
class Artifact:
path: Path
@dataclasses.dataclass(frozen=True)
class Prefix:
path: Path
@dataclasses.dataclass(frozen=True)
class Destination:
path: Path
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Install:
prefix: Prefix
destinations: List[Destination]
@dataclasses.dataclass(frozen=True)
class CommandFragment:
fragment: str
role: str
@dataclasses.dataclass(frozen=True)
class Sysroot:
path: Path
@dataclasses.dataclass(frozen=True)
class Link:
language: str
commandFragments: List[CommandFragment]
lto: Optional[bool] = None
sysroot: Optional[Sysroot] = None
@dataclasses.dataclass(frozen=True)
class Archive:
commandFragments: List[CommandFragment] = dataclasses.field(default_factory=list)
lto: Optional[bool] = None
@dataclasses.dataclass(frozen=True)
class Dependency:
id: str
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Source:
path: Path
compileGroupIndex: Optional[int] = None
sourceGroupIndex: Optional[int] = None
isGenerated: Optional[bool] = None
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Target:
name: str
id: str
type: str
paths: Paths
sources = List[Source]
nameOnDisk: Optional[Path] = None
artifacts: List[Artifact] = dataclasses.field(default_factory=list)
isGeneratorProvided: Optional[bool] = None
install: Optional[Install] = None
link: Optional[Link] = None
archive: Optional[Archive] = None
dependencies: List[Dependency] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True)
class Configuration:
name: str
projects: List[Project]
targets: List[Target]
directories: List[Directory]
@dataclasses.dataclass(frozen=True)
class CodeModel:
kind: str
version: APIVersion
paths: Paths
configurations: List[Configuration] | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/file_api/model/codemodel.py | codemodel.py | import dataclasses
from pathlib import Path
from typing import List, Optional
from .common import APIVersion, Paths
__all__ = [
"Archive",
"Artifact",
"CodeModel",
"CommandFragment",
"Configuration",
"Dependency",
"Destination",
"Directory",
"Install",
"Link",
"Prefix",
"Project",
"Source",
"StringCMakeVersion",
"Sysroot",
"Target",
]
def __dir__() -> List[str]:
return __all__
@dataclasses.dataclass(frozen=True)
class StringCMakeVersion:
string: str
@dataclasses.dataclass(frozen=True)
class Directory:
source: Path
build: Path
projectIndex: int
jsonFile: Optional[Path] = None
parentIndex: Optional[int] = None
childIndexes: List[int] = dataclasses.field(default_factory=list)
targetIndexes: List[int] = dataclasses.field(default_factory=list)
minimumCMakeVersion: Optional[StringCMakeVersion] = None
hasInstallRule: bool = False
# Directory is currently not resolved automatically.
@dataclasses.dataclass(frozen=True)
class Project:
name: str
directoryIndexes: List[int]
parentIndex: Optional[int] = None
childIndexes: List[int] = dataclasses.field(default_factory=list)
targetIndexes: List[int] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True)
class Artifact:
path: Path
@dataclasses.dataclass(frozen=True)
class Prefix:
path: Path
@dataclasses.dataclass(frozen=True)
class Destination:
path: Path
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Install:
prefix: Prefix
destinations: List[Destination]
@dataclasses.dataclass(frozen=True)
class CommandFragment:
fragment: str
role: str
@dataclasses.dataclass(frozen=True)
class Sysroot:
path: Path
@dataclasses.dataclass(frozen=True)
class Link:
language: str
commandFragments: List[CommandFragment]
lto: Optional[bool] = None
sysroot: Optional[Sysroot] = None
@dataclasses.dataclass(frozen=True)
class Archive:
commandFragments: List[CommandFragment] = dataclasses.field(default_factory=list)
lto: Optional[bool] = None
@dataclasses.dataclass(frozen=True)
class Dependency:
id: str
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Source:
path: Path
compileGroupIndex: Optional[int] = None
sourceGroupIndex: Optional[int] = None
isGenerated: Optional[bool] = None
backtrace: Optional[int] = None
@dataclasses.dataclass(frozen=True)
class Target:
name: str
id: str
type: str
paths: Paths
sources = List[Source]
nameOnDisk: Optional[Path] = None
artifacts: List[Artifact] = dataclasses.field(default_factory=list)
isGeneratorProvided: Optional[bool] = None
install: Optional[Install] = None
link: Optional[Link] = None
archive: Optional[Archive] = None
dependencies: List[Dependency] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True)
class Configuration:
name: str
projects: List[Project]
targets: List[Target]
directories: List[Directory]
@dataclasses.dataclass(frozen=True)
class CodeModel:
kind: str
version: APIVersion
paths: Paths
configurations: List[Configuration] | 0.839405 | 0.300131 |
from __future__ import annotations
import ast
import dataclasses
import inspect
import sys
import textwrap
from collections.abc import Generator
from pathlib import Path
from packaging.version import Version
from .._compat.typing import get_args, get_origin
__all__ = ["pull_docs"]
def __dir__() -> list[str]:
return __all__
def _get_value(value: ast.expr) -> str:
if sys.version_info < (3, 8):
assert isinstance(value, ast.Str)
return value.s
assert isinstance(value, ast.Constant)
return value.value
def pull_docs(dc: type[object]) -> dict[str, str]:
"""
Pulls documentation from a dataclass.
"""
t = ast.parse(inspect.getsource(dc))
(obody,) = t.body
assert isinstance(obody, ast.ClassDef)
body = obody.body
return {
assign.target.id: textwrap.dedent(_get_value(expr.value)).strip().replace("\n", " ") # type: ignore[union-attr]
for assign, expr in zip(body[:-1], body[1:])
if isinstance(assign, ast.AnnAssign) and isinstance(expr, ast.Expr)
}
@dataclasses.dataclass
class DCDoc:
name: str
default: str
docs: str
def __str__(self) -> str:
docs = "\n".join(f"# {s}" for s in textwrap.wrap(self.docs, width=78))
return f"{docs}\n{self.name} = {self.default}\n"
def mk_docs(dc: type[object], prefix: str = "") -> Generator[DCDoc, None, None]:
"""
Makes documentation for a dataclass.
"""
assert dataclasses.is_dataclass(dc)
docs = pull_docs(dc)
for field in dataclasses.fields(dc):
if dataclasses.is_dataclass(field.type):
yield from mk_docs(field.type, prefix=f"{prefix}{field.name}.")
continue
if get_origin(field.type) is list:
field_type = get_args(field.type)[0]
if dataclasses.is_dataclass(field_type):
yield from mk_docs(field_type, prefix=f"{prefix}{field.name}[].")
continue
if field.default is not dataclasses.MISSING and field.default is not None:
default = repr(
str(field.default)
if isinstance(field.default, (Path, Version))
else field.default
)
elif field.default_factory is not dataclasses.MISSING:
default = repr(field.default_factory())
else:
default = '""'
yield DCDoc(
f"{prefix}{field.name}".replace("_", "-"),
default.replace("'", '"').replace("True", "true").replace("False", "false"),
docs[field.name],
) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/documentation.py | documentation.py | from __future__ import annotations
import ast
import dataclasses
import inspect
import sys
import textwrap
from collections.abc import Generator
from pathlib import Path
from packaging.version import Version
from .._compat.typing import get_args, get_origin
__all__ = ["pull_docs"]
def __dir__() -> list[str]:
return __all__
def _get_value(value: ast.expr) -> str:
if sys.version_info < (3, 8):
assert isinstance(value, ast.Str)
return value.s
assert isinstance(value, ast.Constant)
return value.value
def pull_docs(dc: type[object]) -> dict[str, str]:
"""
Pulls documentation from a dataclass.
"""
t = ast.parse(inspect.getsource(dc))
(obody,) = t.body
assert isinstance(obody, ast.ClassDef)
body = obody.body
return {
assign.target.id: textwrap.dedent(_get_value(expr.value)).strip().replace("\n", " ") # type: ignore[union-attr]
for assign, expr in zip(body[:-1], body[1:])
if isinstance(assign, ast.AnnAssign) and isinstance(expr, ast.Expr)
}
@dataclasses.dataclass
class DCDoc:
name: str
default: str
docs: str
def __str__(self) -> str:
docs = "\n".join(f"# {s}" for s in textwrap.wrap(self.docs, width=78))
return f"{docs}\n{self.name} = {self.default}\n"
def mk_docs(dc: type[object], prefix: str = "") -> Generator[DCDoc, None, None]:
"""
Makes documentation for a dataclass.
"""
assert dataclasses.is_dataclass(dc)
docs = pull_docs(dc)
for field in dataclasses.fields(dc):
if dataclasses.is_dataclass(field.type):
yield from mk_docs(field.type, prefix=f"{prefix}{field.name}.")
continue
if get_origin(field.type) is list:
field_type = get_args(field.type)[0]
if dataclasses.is_dataclass(field_type):
yield from mk_docs(field_type, prefix=f"{prefix}{field.name}[].")
continue
if field.default is not dataclasses.MISSING and field.default is not None:
default = repr(
str(field.default)
if isinstance(field.default, (Path, Version))
else field.default
)
elif field.default_factory is not dataclasses.MISSING:
default = repr(field.default_factory())
else:
default = '""'
yield DCDoc(
f"{prefix}{field.name}".replace("_", "-"),
default.replace("'", '"').replace("True", "true").replace("False", "false"),
docs[field.name],
) | 0.557604 | 0.253896 |
from __future__ import annotations
import difflib
import os
import sys
from collections.abc import Generator, Mapping
from pathlib import Path
from typing import Any
from packaging.version import Version
from .. import __version__
from .._compat import tomllib
from .._logging import logger, rich_print
from ..errors import CMakeConfigError
from .skbuild_model import ScikitBuildSettings
from .sources import ConfSource, EnvSource, SourceChain, TOMLSource
__all__ = ["SettingsReader"]
def __dir__() -> list[str]:
return __all__
class SettingsReader:
def __init__(
self,
pyproject: dict[str, Any],
config_settings: Mapping[str, str | list[str]],
*,
verify_conf: bool = True,
) -> None:
self.sources = SourceChain(
EnvSource("SKBUILD"),
ConfSource(settings=config_settings, verify=verify_conf),
TOMLSource("tool", "scikit-build", settings=pyproject),
prefixes=["tool", "scikit-build"],
)
self.settings = self.sources.convert_target(ScikitBuildSettings)
if self.settings.minimum_version:
current_version = Version(__version__)
minimum_version = self.settings.minimum_version
if current_version < minimum_version:
msg = (
f"scikit-build-core version {__version__} is too old. "
f"Minimum required version is {self.settings.minimum_version}."
)
raise CMakeConfigError(msg)
if self.settings.editable.rebuild and not self.settings.build_dir:
rich_print(
"[red][bold]ERROR:[/bold] editable mode with rebuild requires build_dir"
)
raise SystemExit(7)
install_policy = (
self.settings.minimum_version is None
or self.settings.minimum_version >= Version("0.5")
)
if self.settings.install.strip is None:
self.settings.install.strip = install_policy
def unrecognized_options(self) -> Generator[str, None, None]:
return self.sources.unrecognized_options(ScikitBuildSettings)
def suggestions(self, index: int) -> dict[str, list[str]]:
all_options = list(self.sources[index].all_option_names(ScikitBuildSettings))
result: dict[str, list[str]] = {
k: [] for k in self.sources[index].unrecognized_options(ScikitBuildSettings)
}
for option in result:
possibilities = {
".".join(k.split(".")[: option.count(".") + 1]) for k in all_options
}
result[option] = difflib.get_close_matches(option, possibilities, n=3)
return result
def print_suggestions(self) -> None:
for index in (1, 2):
name = {1: "config-settings", 2: "pyproject.toml"}[index]
suggestions_dict = self.suggestions(index)
if suggestions_dict:
rich_print(f"[red][bold]ERROR:[/bold] Unrecognized options in {name}:")
for option, suggestions in suggestions_dict.items():
rich_print(f" [red]{option}", end="")
if suggestions:
sugstr = ", ".join(suggestions)
rich_print(f"[yellow] -> Did you mean: {sugstr}?", end="")
rich_print()
def validate_may_exit(self) -> None:
unrecognized = list(self.unrecognized_options())
if unrecognized:
if self.settings.strict_config:
sys.stdout.flush()
self.print_suggestions()
raise SystemExit(7)
logger.warning("Unrecognized options: {}", ", ".join(unrecognized))
for key, value in self.settings.metadata.items():
if "provider" not in value:
sys.stdout.flush()
rich_print(
f"[red][bold]ERROR:[/bold] provider= must be provided in {key!r}:"
)
raise SystemExit(7)
if not self.settings.experimental and (
"provider-path" in value
or not value["provider"].startswith("scikit_build_core.")
):
sys.stdout.flush()
rich_print(
"[red][bold]ERROR:[/bold] experimental must be enabled currently to use plugins not provided by scikit-build-core"
)
raise SystemExit(7)
for gen in self.settings.generate:
if not gen.template and not gen.template_path:
sys.stdout.flush()
rich_print(
"[red][bold]ERROR:[/bold] template= or template-path= must be provided in generate"
)
raise SystemExit(7)
@classmethod
def from_file(
cls,
pyproject_path: os.PathLike[str] | str,
config_settings: Mapping[str, str | list[str]] | None,
*,
verify_conf: bool = True,
) -> SettingsReader:
with Path(pyproject_path).open("rb") as f:
pyproject = tomllib.load(f)
return cls(pyproject, config_settings or {}, verify_conf=verify_conf) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/skbuild_read_settings.py | skbuild_read_settings.py | from __future__ import annotations
import difflib
import os
import sys
from collections.abc import Generator, Mapping
from pathlib import Path
from typing import Any
from packaging.version import Version
from .. import __version__
from .._compat import tomllib
from .._logging import logger, rich_print
from ..errors import CMakeConfigError
from .skbuild_model import ScikitBuildSettings
from .sources import ConfSource, EnvSource, SourceChain, TOMLSource
__all__ = ["SettingsReader"]
def __dir__() -> list[str]:
return __all__
class SettingsReader:
def __init__(
self,
pyproject: dict[str, Any],
config_settings: Mapping[str, str | list[str]],
*,
verify_conf: bool = True,
) -> None:
self.sources = SourceChain(
EnvSource("SKBUILD"),
ConfSource(settings=config_settings, verify=verify_conf),
TOMLSource("tool", "scikit-build", settings=pyproject),
prefixes=["tool", "scikit-build"],
)
self.settings = self.sources.convert_target(ScikitBuildSettings)
if self.settings.minimum_version:
current_version = Version(__version__)
minimum_version = self.settings.minimum_version
if current_version < minimum_version:
msg = (
f"scikit-build-core version {__version__} is too old. "
f"Minimum required version is {self.settings.minimum_version}."
)
raise CMakeConfigError(msg)
if self.settings.editable.rebuild and not self.settings.build_dir:
rich_print(
"[red][bold]ERROR:[/bold] editable mode with rebuild requires build_dir"
)
raise SystemExit(7)
install_policy = (
self.settings.minimum_version is None
or self.settings.minimum_version >= Version("0.5")
)
if self.settings.install.strip is None:
self.settings.install.strip = install_policy
def unrecognized_options(self) -> Generator[str, None, None]:
return self.sources.unrecognized_options(ScikitBuildSettings)
def suggestions(self, index: int) -> dict[str, list[str]]:
all_options = list(self.sources[index].all_option_names(ScikitBuildSettings))
result: dict[str, list[str]] = {
k: [] for k in self.sources[index].unrecognized_options(ScikitBuildSettings)
}
for option in result:
possibilities = {
".".join(k.split(".")[: option.count(".") + 1]) for k in all_options
}
result[option] = difflib.get_close_matches(option, possibilities, n=3)
return result
def print_suggestions(self) -> None:
for index in (1, 2):
name = {1: "config-settings", 2: "pyproject.toml"}[index]
suggestions_dict = self.suggestions(index)
if suggestions_dict:
rich_print(f"[red][bold]ERROR:[/bold] Unrecognized options in {name}:")
for option, suggestions in suggestions_dict.items():
rich_print(f" [red]{option}", end="")
if suggestions:
sugstr = ", ".join(suggestions)
rich_print(f"[yellow] -> Did you mean: {sugstr}?", end="")
rich_print()
def validate_may_exit(self) -> None:
unrecognized = list(self.unrecognized_options())
if unrecognized:
if self.settings.strict_config:
sys.stdout.flush()
self.print_suggestions()
raise SystemExit(7)
logger.warning("Unrecognized options: {}", ", ".join(unrecognized))
for key, value in self.settings.metadata.items():
if "provider" not in value:
sys.stdout.flush()
rich_print(
f"[red][bold]ERROR:[/bold] provider= must be provided in {key!r}:"
)
raise SystemExit(7)
if not self.settings.experimental and (
"provider-path" in value
or not value["provider"].startswith("scikit_build_core.")
):
sys.stdout.flush()
rich_print(
"[red][bold]ERROR:[/bold] experimental must be enabled currently to use plugins not provided by scikit-build-core"
)
raise SystemExit(7)
for gen in self.settings.generate:
if not gen.template and not gen.template_path:
sys.stdout.flush()
rich_print(
"[red][bold]ERROR:[/bold] template= or template-path= must be provided in generate"
)
raise SystemExit(7)
@classmethod
def from_file(
cls,
pyproject_path: os.PathLike[str] | str,
config_settings: Mapping[str, str | list[str]] | None,
*,
verify_conf: bool = True,
) -> SettingsReader:
with Path(pyproject_path).open("rb") as f:
pyproject = tomllib.load(f)
return cls(pyproject, config_settings or {}, verify_conf=verify_conf) | 0.538255 | 0.090053 |
from __future__ import annotations
import dataclasses
import sys
from pathlib import Path
from typing import Any, Union
from packaging.version import Version
from .._compat.builtins import ExceptionGroup
from .._compat.typing import Literal, get_args, get_origin
from .documentation import pull_docs
__all__ = ["to_json_schema", "convert_type", "FailedConversion"]
def __dir__() -> list[str]:
return __all__
class FailedConversion(TypeError):
pass
def to_json_schema(dclass: type[Any], *, normalize_keys: bool) -> dict[str, Any]:
assert dataclasses.is_dataclass(dclass)
props = {}
errs = []
required = []
for field in dataclasses.fields(dclass):
if dataclasses.is_dataclass(field.type):
props[field.name] = to_json_schema(
field.type, normalize_keys=normalize_keys
)
continue
try:
props[field.name] = convert_type(field.type, normalize_keys=normalize_keys)
except FailedConversion as err:
if sys.version_info < (3, 11):
notes = "__notes__" # set so linter's won't try to be clever
setattr(err, notes, [*getattr(err, notes, []), f"Field: {field.name}"])
else:
# pylint: disable-next=no-member
err.add_note(f"Field: {field.name}")
errs.append(err)
continue
if field.default is not dataclasses.MISSING and field.default is not None:
props[field.name]["default"] = (
str(field.default)
if isinstance(field.default, (Version, Path))
else field.default
)
if (
field.default_factory is dataclasses.MISSING
and field.default is dataclasses.MISSING
):
required.append(field.name)
if errs:
msg = f"Failed Conversion to JSON Schema on {dclass.__name__}"
raise ExceptionGroup(msg, errs)
docs = pull_docs(dclass)
for k, v in docs.items():
props[k]["description"] = v
if normalize_keys:
props = {k.replace("_", "-"): v for k, v in props.items()}
if required:
return {
"type": "object",
"additionalProperties": False,
"required": required,
"properties": props,
}
return {"type": "object", "additionalProperties": False, "properties": props}
def convert_type(t: Any, *, normalize_keys: bool) -> dict[str, Any]:
if dataclasses.is_dataclass(t):
return to_json_schema(t, normalize_keys=normalize_keys)
if t is str or t is Path or t is Version:
return {"type": "string"}
if t is bool:
return {"type": "boolean"}
origin = get_origin(t)
args = get_args(t)
if origin is list:
assert len(args) == 1
return {
"type": "array",
"items": convert_type(args[0], normalize_keys=normalize_keys),
}
if origin is dict:
assert len(args) == 2
assert args[0] is str
if args[1] is Any:
return {"type": "object"}
return {
"type": "object",
"patternProperties": {
".+": convert_type(args[1], normalize_keys=normalize_keys)
},
}
if origin is Union:
# Ignore optional
if len(args) == 2 and any(a is type(None) for a in args):
return convert_type(
next(iter(a for a in args if a is not type(None))),
normalize_keys=normalize_keys,
)
return {"oneOf": [convert_type(a, normalize_keys=normalize_keys) for a in args]}
if origin is Literal:
return {"enum": list(args)}
msg = f"Cannot convert type {t} to JSON Schema"
raise FailedConversion(msg) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/json_schema.py | json_schema.py | from __future__ import annotations
import dataclasses
import sys
from pathlib import Path
from typing import Any, Union
from packaging.version import Version
from .._compat.builtins import ExceptionGroup
from .._compat.typing import Literal, get_args, get_origin
from .documentation import pull_docs
__all__ = ["to_json_schema", "convert_type", "FailedConversion"]
def __dir__() -> list[str]:
return __all__
class FailedConversion(TypeError):
pass
def to_json_schema(dclass: type[Any], *, normalize_keys: bool) -> dict[str, Any]:
assert dataclasses.is_dataclass(dclass)
props = {}
errs = []
required = []
for field in dataclasses.fields(dclass):
if dataclasses.is_dataclass(field.type):
props[field.name] = to_json_schema(
field.type, normalize_keys=normalize_keys
)
continue
try:
props[field.name] = convert_type(field.type, normalize_keys=normalize_keys)
except FailedConversion as err:
if sys.version_info < (3, 11):
notes = "__notes__" # set so linter's won't try to be clever
setattr(err, notes, [*getattr(err, notes, []), f"Field: {field.name}"])
else:
# pylint: disable-next=no-member
err.add_note(f"Field: {field.name}")
errs.append(err)
continue
if field.default is not dataclasses.MISSING and field.default is not None:
props[field.name]["default"] = (
str(field.default)
if isinstance(field.default, (Version, Path))
else field.default
)
if (
field.default_factory is dataclasses.MISSING
and field.default is dataclasses.MISSING
):
required.append(field.name)
if errs:
msg = f"Failed Conversion to JSON Schema on {dclass.__name__}"
raise ExceptionGroup(msg, errs)
docs = pull_docs(dclass)
for k, v in docs.items():
props[k]["description"] = v
if normalize_keys:
props = {k.replace("_", "-"): v for k, v in props.items()}
if required:
return {
"type": "object",
"additionalProperties": False,
"required": required,
"properties": props,
}
return {"type": "object", "additionalProperties": False, "properties": props}
def convert_type(t: Any, *, normalize_keys: bool) -> dict[str, Any]:
if dataclasses.is_dataclass(t):
return to_json_schema(t, normalize_keys=normalize_keys)
if t is str or t is Path or t is Version:
return {"type": "string"}
if t is bool:
return {"type": "boolean"}
origin = get_origin(t)
args = get_args(t)
if origin is list:
assert len(args) == 1
return {
"type": "array",
"items": convert_type(args[0], normalize_keys=normalize_keys),
}
if origin is dict:
assert len(args) == 2
assert args[0] is str
if args[1] is Any:
return {"type": "object"}
return {
"type": "object",
"patternProperties": {
".+": convert_type(args[1], normalize_keys=normalize_keys)
},
}
if origin is Union:
# Ignore optional
if len(args) == 2 and any(a is type(None) for a in args):
return convert_type(
next(iter(a for a in args if a is not type(None))),
normalize_keys=normalize_keys,
)
return {"oneOf": [convert_type(a, normalize_keys=normalize_keys) for a in args]}
if origin is Literal:
return {"enum": list(args)}
msg = f"Cannot convert type {t} to JSON Schema"
raise FailedConversion(msg) | 0.52975 | 0.263973 |
from __future__ import annotations
import dataclasses
import os
import typing
from collections.abc import Generator, Iterator, Mapping, Sequence
from typing import Any, TypeVar, Union
from .._compat.builtins import ExceptionGroup
from .._compat.typing import Literal, Protocol, get_args, get_origin
T = TypeVar("T")
__all__ = ["Source", "SourceChain", "ConfSource", "EnvSource", "TOMLSource"]
def __dir__() -> list[str]:
return __all__
def _dig_strict(__dict: Mapping[str, Any], *names: str) -> Any:
for name in names:
__dict = __dict[name]
return __dict
def _dig_not_strict(__dict: Mapping[str, Any], *names: str) -> Any:
for name in names:
__dict = __dict.get(name, {})
return __dict
def _dig_fields(__opt: Any, *names: str) -> Any:
for name in names:
fields = dataclasses.fields(__opt)
types = [x.type for x in fields if x.name == name]
if len(types) != 1:
msg = f"Could not access {'.'.join(names)}"
raise KeyError(msg)
(__opt,) = types
return __opt
def _process_union(target: type[Any]) -> Any:
"""
Filters None out of Unions. If a Union only has one item, return that item.
"""
origin = get_origin(target)
if origin is Union:
non_none_args = [a for a in get_args(target) if a is not type(None)]
if len(non_none_args) == 1:
return non_none_args[0]
return Union[tuple(non_none_args)]
return target
def _get_target_raw_type(target: type[Any]) -> Any:
"""
Takes a type like ``Optional[str]`` and returns str, or ``Optional[Dict[str,
int]]`` and returns dict. Returns Union for a Union with more than one
non-none type. Literal is also a valid return.
"""
target = _process_union(target)
origin = get_origin(target)
return origin or target
def _get_inner_type(__target: type[Any]) -> type[Any]:
"""
Takes a types like ``List[str]`` and returns str,
or ``Dict[str, int]`` and returns int.
"""
raw_target = _get_target_raw_type(__target)
target = _process_union(__target)
if raw_target == list:
return get_args(target)[0] # type: ignore[no-any-return]
if raw_target == dict:
return get_args(target)[1] # type: ignore[no-any-return]
msg = f"Expected a list or dict, got {target!r}"
raise AssertionError(msg)
def _nested_dataclass_to_names(__target: type[Any], *inner: str) -> Iterator[list[str]]:
"""
Yields each entry, like ``("a", "b", "c")`` for ``a.b.c``.
"""
if dataclasses.is_dataclass(__target):
for field in dataclasses.fields(__target):
yield from _nested_dataclass_to_names(field.type, *inner, field.name)
else:
yield list(inner)
class Source(Protocol):
def has_item(self, *fields: str, is_dict: bool) -> bool:
"""
Check if the source contains a chain of fields. For example, ``fields =
[Field(name="a"), Field(name="b")]`` will check if the source contains the
key "a.b". ``is_dict`` should be set if it can be nested.
"""
...
def get_item(self, *fields: str, is_dict: bool) -> Any:
"""
Select an item from a chain of fields. Raises KeyError if
the there is no item. ``is_dict`` should be set if it can be nested.
"""
...
@classmethod
def convert(cls, item: Any, target: type[Any]) -> object:
"""
Convert an ``item`` from the base representation of the source's source
into a ``target`` type. Raises TypeError if the conversion fails.
"""
...
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
"""
Given a model, produce an iterator of all unrecognized option names.
Empty iterator if this can't be computed for the source (like for
environment variables).
"""
...
def all_option_names(self, target: type[Any]) -> Iterator[str]:
"""
Given a model, produce a list of all possible names (used for producing
suggestions).
"""
...
class EnvSource:
"""
This is a source using environment variables.
"""
def __init__(self, prefix: str, *, env: Mapping[str, str] | None = None) -> None:
self.env = env or os.environ
self.prefix = prefix
def _get_name(self, *fields: str) -> str:
names = [field.upper() for field in fields]
return "_".join([self.prefix, *names] if self.prefix else names)
def has_item(self, *fields: str, is_dict: bool) -> bool: # noqa: ARG002
name = self._get_name(*fields)
return bool(self.env.get(name, ""))
def get_item(
self, *fields: str, is_dict: bool # noqa: ARG002
) -> str | dict[str, str]:
name = self._get_name(*fields)
if name in self.env:
return self.env[name]
msg = f"{name!r} not found in environment"
raise KeyError(msg)
@classmethod
def convert(cls, item: str, target: type[Any]) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
msg = f"Array of dataclasses are not supported in configuration settings ({raw_target})"
raise TypeError(msg)
if raw_target == list:
return [
cls.convert(i.strip(), _get_inner_type(target)) for i in item.split(";")
]
if raw_target == dict:
items = (i.strip().split("=") for i in item.split(";"))
return {k: cls.convert(v, _get_inner_type(target)) for k, v in items}
if raw_target is bool:
return item.strip().lower() not in {"0", "false", "off", "no", ""}
if raw_target is Union and str in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(
self, options: object # noqa: ARG002
) -> Generator[str, None, None]:
yield from ()
def all_option_names(self, target: type[Any]) -> Iterator[str]:
prefix = [self.prefix] if self.prefix else []
for names in _nested_dataclass_to_names(target):
yield "_".join(prefix + names).upper()
def _unrecognized_dict(
settings: Mapping[str, Any], options: Any, above: Sequence[str]
) -> Generator[str, None, None]:
for keystr in settings:
# We don't have DataclassInstance exposed in typing yet
matches = [
x for x in dataclasses.fields(options) if x.name.replace("_", "-") == keystr
]
if not matches:
yield ".".join((*above, keystr))
continue
(inner_option_field,) = matches
inner_option = inner_option_field.type
if dataclasses.is_dataclass(inner_option):
yield from _unrecognized_dict(
settings[keystr], inner_option, (*above, keystr)
)
class ConfSource:
"""
This is a source for the PEP 517 configuration settings.
You should initialize it with a dict from PEP 517. a.b will be treated as
nested dicts. "verify" is a boolean that determines whether unrecognized
options should be checked for. Only set this to false if this might be sharing
config options at the same level.
"""
def __init__(
self,
*prefixes: str,
settings: Mapping[str, str | list[str]],
verify: bool = True,
):
self.prefixes = prefixes
self.settings = settings
self.verify = verify
def _get_name(self, *fields: str) -> list[str]:
names = [field.replace("_", "-") for field in fields]
return [*self.prefixes, *names]
def has_item(self, *fields: str, is_dict: bool) -> bool:
names = self._get_name(*fields)
name = ".".join(names)
if is_dict:
return any(k.startswith(f"{name}.") for k in self.settings)
return name in self.settings
def get_item(self, *fields: str, is_dict: bool) -> str | list[str] | dict[str, str]:
names = self._get_name(*fields)
name = ".".join(names)
if is_dict:
d = {
k[len(name) + 1 :]: str(v)
for k, v in self.settings.items()
if k.startswith(f"{name}.")
}
if d:
return d
msg = f"Dict items {name}.* not found in settings"
raise KeyError(msg)
if name in self.settings:
return self.settings[name]
msg = f"{name!r} not found in configuration settings"
raise KeyError(msg)
@classmethod
def convert(
cls, item: str | list[str] | dict[str, str], target: type[Any]
) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
msg = f"Array of dataclasses are not supported in configuration settings ({raw_target})"
raise TypeError(msg)
if raw_target == list:
if isinstance(item, list):
return [cls.convert(i, _get_inner_type(target)) for i in item]
if isinstance(item, dict):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return [
cls.convert(i.strip(), _get_inner_type(target)) for i in item.split(";")
]
if raw_target == dict:
assert not isinstance(item, (str, list))
return {k: cls.convert(v, _get_inner_type(target)) for k, v in item.items()}
if isinstance(item, (list, dict)):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
if raw_target is bool:
return item.strip().lower() not in {"0", "false", "off", "no", ""}
if raw_target is Union and str in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
if not self.verify:
return
for keystr in self.settings:
keys = keystr.replace("-", "_").split(".")[len(self.prefixes) :]
try:
outer_option = _dig_fields(options, *keys[:-1])
except KeyError:
yield ".".join(keystr.split(".")[:-1])
continue
if dataclasses.is_dataclass(outer_option):
try:
_dig_fields(outer_option, keys[-1])
except KeyError:
yield keystr
continue
if _get_target_raw_type(outer_option) == dict:
continue
def all_option_names(self, target: type[Any]) -> Iterator[str]:
for names in _nested_dataclass_to_names(target):
dash_names = [name.replace("_", "-") for name in names]
yield ".".join((*self.prefixes, *dash_names))
class TOMLSource:
def __init__(self, *prefixes: str, settings: Mapping[str, Any]):
self.prefixes = prefixes
self.settings = _dig_not_strict(settings, *prefixes)
def _get_name(self, *fields: str) -> list[str]:
return [field.replace("_", "-") for field in fields]
def has_item(self, *fields: str, is_dict: bool) -> bool: # noqa: ARG002
names = self._get_name(*fields)
try:
_dig_strict(self.settings, *names)
return True
except KeyError:
return False
def get_item(self, *fields: str, is_dict: bool) -> Any: # noqa: ARG002
names = self._get_name(*fields)
try:
return _dig_strict(self.settings, *names)
except KeyError:
msg = f"{names!r} not found in configuration settings"
raise KeyError(msg) from None
@classmethod
def convert(cls, item: Any, target: type[Any]) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
fields = dataclasses.fields(raw_target)
values = ((k.replace("-", "_"), v) for k, v in item.items())
return raw_target(
**{
k: cls.convert(v, *[f.type for f in fields if f.name == k])
for k, v in values
}
)
if raw_target is list:
if not isinstance(item, list):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return [cls.convert(it, _get_inner_type(target)) for it in item]
if raw_target is dict:
if not isinstance(item, dict):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return {k: cls.convert(v, _get_inner_type(target)) for k, v in item.items()}
if raw_target is Any:
return item
if raw_target is Union and type(item) in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
yield from _unrecognized_dict(self.settings, options, self.prefixes)
def all_option_names(self, target: type[Any]) -> Iterator[str]:
for names in _nested_dataclass_to_names(target):
dash_names = [name.replace("_", "-") for name in names]
yield ".".join((*self.prefixes, *dash_names))
class SourceChain:
def __init__(self, *sources: Source, prefixes: Sequence[str] = ()) -> None:
"""
Combine a collection of sources into a single object that can run
``convert_target(dataclass)``. An optional list of prefixes can be
given that will be prepended (dot separated) to error messages.
"""
self.sources = sources
self.prefixes = prefixes
def __getitem__(self, index: int) -> Source:
return self.sources[index]
def has_item(self, *fields: str, is_dict: bool) -> bool:
return any(source.has_item(*fields, is_dict=is_dict) for source in self.sources)
def get_item(self, *fields: str, is_dict: bool) -> Any:
for source in self.sources:
if source.has_item(*fields, is_dict=is_dict):
return source.get_item(*fields, is_dict=is_dict)
msg = f"{fields!r} not found in any source"
raise KeyError(msg)
def convert_target(self, target: type[T], *prefixes: str) -> T:
"""
Given a dataclass type, create an object of that dataclass filled
with the values in the sources.
"""
errors = []
prep: dict[str, Any] = {}
for field in dataclasses.fields(target): # type: ignore[arg-type]
if dataclasses.is_dataclass(field.type):
try:
prep[field.name] = self.convert_target(
field.type, *prefixes, field.name
)
except Exception as e:
name = ".".join([*self.prefixes, *prefixes, field.name])
e.__notes__ = [*getattr(e, "__notes__", []), f"Field: {name}"] # type: ignore[attr-defined]
errors.append(e)
continue
is_dict = _get_target_raw_type(field.type) == dict
for source in self.sources:
if source.has_item(*prefixes, field.name, is_dict=is_dict):
simple = source.get_item(*prefixes, field.name, is_dict=is_dict)
try:
tmp = source.convert(simple, field.type)
except Exception as e:
name = ".".join([*self.prefixes, *prefixes, field.name])
e.__notes__ = [*getattr(e, "__notes__", []), f"Field {name}"] # type: ignore[attr-defined]
errors.append(e)
prep[field.name] = None
break
if is_dict:
assert isinstance(tmp, dict), f"{field.name} must be a dict"
prep[field.name] = {**tmp, **prep.get(field.name, {})}
continue
prep[field.name] = tmp
break
if field.name in prep:
continue
if field.default is not dataclasses.MISSING:
prep[field.name] = field.default
continue
if field.default_factory is not dataclasses.MISSING:
prep[field.name] = field.default_factory()
continue
errors.append(ValueError(f"Missing value for {field.name!r}"))
if errors:
prefix_str = ".".join([*self.prefixes, *prefixes])
msg = f"Failed converting {prefix_str}"
raise ExceptionGroup(msg, errors)
return target(**prep)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
for source in self.sources:
yield from source.unrecognized_options(options)
if typing.TYPE_CHECKING:
_: Source = typing.cast(EnvSource, None)
_ = typing.cast(ConfSource, None)
_ = typing.cast(TOMLSource, None) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/sources.py | sources.py | from __future__ import annotations
import dataclasses
import os
import typing
from collections.abc import Generator, Iterator, Mapping, Sequence
from typing import Any, TypeVar, Union
from .._compat.builtins import ExceptionGroup
from .._compat.typing import Literal, Protocol, get_args, get_origin
T = TypeVar("T")
__all__ = ["Source", "SourceChain", "ConfSource", "EnvSource", "TOMLSource"]
def __dir__() -> list[str]:
return __all__
def _dig_strict(__dict: Mapping[str, Any], *names: str) -> Any:
for name in names:
__dict = __dict[name]
return __dict
def _dig_not_strict(__dict: Mapping[str, Any], *names: str) -> Any:
for name in names:
__dict = __dict.get(name, {})
return __dict
def _dig_fields(__opt: Any, *names: str) -> Any:
for name in names:
fields = dataclasses.fields(__opt)
types = [x.type for x in fields if x.name == name]
if len(types) != 1:
msg = f"Could not access {'.'.join(names)}"
raise KeyError(msg)
(__opt,) = types
return __opt
def _process_union(target: type[Any]) -> Any:
"""
Filters None out of Unions. If a Union only has one item, return that item.
"""
origin = get_origin(target)
if origin is Union:
non_none_args = [a for a in get_args(target) if a is not type(None)]
if len(non_none_args) == 1:
return non_none_args[0]
return Union[tuple(non_none_args)]
return target
def _get_target_raw_type(target: type[Any]) -> Any:
"""
Takes a type like ``Optional[str]`` and returns str, or ``Optional[Dict[str,
int]]`` and returns dict. Returns Union for a Union with more than one
non-none type. Literal is also a valid return.
"""
target = _process_union(target)
origin = get_origin(target)
return origin or target
def _get_inner_type(__target: type[Any]) -> type[Any]:
"""
Takes a types like ``List[str]`` and returns str,
or ``Dict[str, int]`` and returns int.
"""
raw_target = _get_target_raw_type(__target)
target = _process_union(__target)
if raw_target == list:
return get_args(target)[0] # type: ignore[no-any-return]
if raw_target == dict:
return get_args(target)[1] # type: ignore[no-any-return]
msg = f"Expected a list or dict, got {target!r}"
raise AssertionError(msg)
def _nested_dataclass_to_names(__target: type[Any], *inner: str) -> Iterator[list[str]]:
"""
Yields each entry, like ``("a", "b", "c")`` for ``a.b.c``.
"""
if dataclasses.is_dataclass(__target):
for field in dataclasses.fields(__target):
yield from _nested_dataclass_to_names(field.type, *inner, field.name)
else:
yield list(inner)
class Source(Protocol):
def has_item(self, *fields: str, is_dict: bool) -> bool:
"""
Check if the source contains a chain of fields. For example, ``fields =
[Field(name="a"), Field(name="b")]`` will check if the source contains the
key "a.b". ``is_dict`` should be set if it can be nested.
"""
...
def get_item(self, *fields: str, is_dict: bool) -> Any:
"""
Select an item from a chain of fields. Raises KeyError if
the there is no item. ``is_dict`` should be set if it can be nested.
"""
...
@classmethod
def convert(cls, item: Any, target: type[Any]) -> object:
"""
Convert an ``item`` from the base representation of the source's source
into a ``target`` type. Raises TypeError if the conversion fails.
"""
...
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
"""
Given a model, produce an iterator of all unrecognized option names.
Empty iterator if this can't be computed for the source (like for
environment variables).
"""
...
def all_option_names(self, target: type[Any]) -> Iterator[str]:
"""
Given a model, produce a list of all possible names (used for producing
suggestions).
"""
...
class EnvSource:
"""
This is a source using environment variables.
"""
def __init__(self, prefix: str, *, env: Mapping[str, str] | None = None) -> None:
self.env = env or os.environ
self.prefix = prefix
def _get_name(self, *fields: str) -> str:
names = [field.upper() for field in fields]
return "_".join([self.prefix, *names] if self.prefix else names)
def has_item(self, *fields: str, is_dict: bool) -> bool: # noqa: ARG002
name = self._get_name(*fields)
return bool(self.env.get(name, ""))
def get_item(
self, *fields: str, is_dict: bool # noqa: ARG002
) -> str | dict[str, str]:
name = self._get_name(*fields)
if name in self.env:
return self.env[name]
msg = f"{name!r} not found in environment"
raise KeyError(msg)
@classmethod
def convert(cls, item: str, target: type[Any]) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
msg = f"Array of dataclasses are not supported in configuration settings ({raw_target})"
raise TypeError(msg)
if raw_target == list:
return [
cls.convert(i.strip(), _get_inner_type(target)) for i in item.split(";")
]
if raw_target == dict:
items = (i.strip().split("=") for i in item.split(";"))
return {k: cls.convert(v, _get_inner_type(target)) for k, v in items}
if raw_target is bool:
return item.strip().lower() not in {"0", "false", "off", "no", ""}
if raw_target is Union and str in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(
self, options: object # noqa: ARG002
) -> Generator[str, None, None]:
yield from ()
def all_option_names(self, target: type[Any]) -> Iterator[str]:
prefix = [self.prefix] if self.prefix else []
for names in _nested_dataclass_to_names(target):
yield "_".join(prefix + names).upper()
def _unrecognized_dict(
settings: Mapping[str, Any], options: Any, above: Sequence[str]
) -> Generator[str, None, None]:
for keystr in settings:
# We don't have DataclassInstance exposed in typing yet
matches = [
x for x in dataclasses.fields(options) if x.name.replace("_", "-") == keystr
]
if not matches:
yield ".".join((*above, keystr))
continue
(inner_option_field,) = matches
inner_option = inner_option_field.type
if dataclasses.is_dataclass(inner_option):
yield from _unrecognized_dict(
settings[keystr], inner_option, (*above, keystr)
)
class ConfSource:
"""
This is a source for the PEP 517 configuration settings.
You should initialize it with a dict from PEP 517. a.b will be treated as
nested dicts. "verify" is a boolean that determines whether unrecognized
options should be checked for. Only set this to false if this might be sharing
config options at the same level.
"""
def __init__(
self,
*prefixes: str,
settings: Mapping[str, str | list[str]],
verify: bool = True,
):
self.prefixes = prefixes
self.settings = settings
self.verify = verify
def _get_name(self, *fields: str) -> list[str]:
names = [field.replace("_", "-") for field in fields]
return [*self.prefixes, *names]
def has_item(self, *fields: str, is_dict: bool) -> bool:
names = self._get_name(*fields)
name = ".".join(names)
if is_dict:
return any(k.startswith(f"{name}.") for k in self.settings)
return name in self.settings
def get_item(self, *fields: str, is_dict: bool) -> str | list[str] | dict[str, str]:
names = self._get_name(*fields)
name = ".".join(names)
if is_dict:
d = {
k[len(name) + 1 :]: str(v)
for k, v in self.settings.items()
if k.startswith(f"{name}.")
}
if d:
return d
msg = f"Dict items {name}.* not found in settings"
raise KeyError(msg)
if name in self.settings:
return self.settings[name]
msg = f"{name!r} not found in configuration settings"
raise KeyError(msg)
@classmethod
def convert(
cls, item: str | list[str] | dict[str, str], target: type[Any]
) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
msg = f"Array of dataclasses are not supported in configuration settings ({raw_target})"
raise TypeError(msg)
if raw_target == list:
if isinstance(item, list):
return [cls.convert(i, _get_inner_type(target)) for i in item]
if isinstance(item, dict):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return [
cls.convert(i.strip(), _get_inner_type(target)) for i in item.split(";")
]
if raw_target == dict:
assert not isinstance(item, (str, list))
return {k: cls.convert(v, _get_inner_type(target)) for k, v in item.items()}
if isinstance(item, (list, dict)):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
if raw_target is bool:
return item.strip().lower() not in {"0", "false", "off", "no", ""}
if raw_target is Union and str in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
if not self.verify:
return
for keystr in self.settings:
keys = keystr.replace("-", "_").split(".")[len(self.prefixes) :]
try:
outer_option = _dig_fields(options, *keys[:-1])
except KeyError:
yield ".".join(keystr.split(".")[:-1])
continue
if dataclasses.is_dataclass(outer_option):
try:
_dig_fields(outer_option, keys[-1])
except KeyError:
yield keystr
continue
if _get_target_raw_type(outer_option) == dict:
continue
def all_option_names(self, target: type[Any]) -> Iterator[str]:
for names in _nested_dataclass_to_names(target):
dash_names = [name.replace("_", "-") for name in names]
yield ".".join((*self.prefixes, *dash_names))
class TOMLSource:
def __init__(self, *prefixes: str, settings: Mapping[str, Any]):
self.prefixes = prefixes
self.settings = _dig_not_strict(settings, *prefixes)
def _get_name(self, *fields: str) -> list[str]:
return [field.replace("_", "-") for field in fields]
def has_item(self, *fields: str, is_dict: bool) -> bool: # noqa: ARG002
names = self._get_name(*fields)
try:
_dig_strict(self.settings, *names)
return True
except KeyError:
return False
def get_item(self, *fields: str, is_dict: bool) -> Any: # noqa: ARG002
names = self._get_name(*fields)
try:
return _dig_strict(self.settings, *names)
except KeyError:
msg = f"{names!r} not found in configuration settings"
raise KeyError(msg) from None
@classmethod
def convert(cls, item: Any, target: type[Any]) -> object:
raw_target = _get_target_raw_type(target)
if dataclasses.is_dataclass(raw_target):
fields = dataclasses.fields(raw_target)
values = ((k.replace("-", "_"), v) for k, v in item.items())
return raw_target(
**{
k: cls.convert(v, *[f.type for f in fields if f.name == k])
for k, v in values
}
)
if raw_target is list:
if not isinstance(item, list):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return [cls.convert(it, _get_inner_type(target)) for it in item]
if raw_target is dict:
if not isinstance(item, dict):
msg = f"Expected {target}, got {type(item).__name__}"
raise TypeError(msg)
return {k: cls.convert(v, _get_inner_type(target)) for k, v in item.items()}
if raw_target is Any:
return item
if raw_target is Union and type(item) in get_args(target):
return item
if raw_target is Literal:
if item not in get_args(_process_union(target)):
msg = f"{item!r} not in {get_args(_process_union(target))!r}"
raise TypeError(msg)
return item
if callable(raw_target):
return raw_target(item)
msg = f"Can't convert target {target}"
raise TypeError(msg)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
yield from _unrecognized_dict(self.settings, options, self.prefixes)
def all_option_names(self, target: type[Any]) -> Iterator[str]:
for names in _nested_dataclass_to_names(target):
dash_names = [name.replace("_", "-") for name in names]
yield ".".join((*self.prefixes, *dash_names))
class SourceChain:
def __init__(self, *sources: Source, prefixes: Sequence[str] = ()) -> None:
"""
Combine a collection of sources into a single object that can run
``convert_target(dataclass)``. An optional list of prefixes can be
given that will be prepended (dot separated) to error messages.
"""
self.sources = sources
self.prefixes = prefixes
def __getitem__(self, index: int) -> Source:
return self.sources[index]
def has_item(self, *fields: str, is_dict: bool) -> bool:
return any(source.has_item(*fields, is_dict=is_dict) for source in self.sources)
def get_item(self, *fields: str, is_dict: bool) -> Any:
for source in self.sources:
if source.has_item(*fields, is_dict=is_dict):
return source.get_item(*fields, is_dict=is_dict)
msg = f"{fields!r} not found in any source"
raise KeyError(msg)
def convert_target(self, target: type[T], *prefixes: str) -> T:
"""
Given a dataclass type, create an object of that dataclass filled
with the values in the sources.
"""
errors = []
prep: dict[str, Any] = {}
for field in dataclasses.fields(target): # type: ignore[arg-type]
if dataclasses.is_dataclass(field.type):
try:
prep[field.name] = self.convert_target(
field.type, *prefixes, field.name
)
except Exception as e:
name = ".".join([*self.prefixes, *prefixes, field.name])
e.__notes__ = [*getattr(e, "__notes__", []), f"Field: {name}"] # type: ignore[attr-defined]
errors.append(e)
continue
is_dict = _get_target_raw_type(field.type) == dict
for source in self.sources:
if source.has_item(*prefixes, field.name, is_dict=is_dict):
simple = source.get_item(*prefixes, field.name, is_dict=is_dict)
try:
tmp = source.convert(simple, field.type)
except Exception as e:
name = ".".join([*self.prefixes, *prefixes, field.name])
e.__notes__ = [*getattr(e, "__notes__", []), f"Field {name}"] # type: ignore[attr-defined]
errors.append(e)
prep[field.name] = None
break
if is_dict:
assert isinstance(tmp, dict), f"{field.name} must be a dict"
prep[field.name] = {**tmp, **prep.get(field.name, {})}
continue
prep[field.name] = tmp
break
if field.name in prep:
continue
if field.default is not dataclasses.MISSING:
prep[field.name] = field.default
continue
if field.default_factory is not dataclasses.MISSING:
prep[field.name] = field.default_factory()
continue
errors.append(ValueError(f"Missing value for {field.name!r}"))
if errors:
prefix_str = ".".join([*self.prefixes, *prefixes])
msg = f"Failed converting {prefix_str}"
raise ExceptionGroup(msg, errors)
return target(**prep)
def unrecognized_options(self, options: object) -> Generator[str, None, None]:
for source in self.sources:
yield from source.unrecognized_options(options)
if typing.TYPE_CHECKING:
_: Source = typing.cast(EnvSource, None)
_ = typing.cast(ConfSource, None)
_ = typing.cast(TOMLSource, None) | 0.863852 | 0.189109 |
from __future__ import annotations
import copy
import json
from typing import Any
from ..resources import resources
__all__ = ["get_skbuild_schema", "generate_skbuild_schema"]
def __dir__() -> list[str]:
return __all__
def generate_skbuild_schema(tool_name: str = "scikit-build") -> dict[str, Any]:
"Generate the complete schema for scikit-build settings."
assert tool_name == "scikit-build", "Only scikit-build is supported."
from .json_schema import to_json_schema
from .skbuild_model import ScikitBuildSettings
schema = {
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/scikit-build/scikit-build-core/blob/main/src/scikit_build_core/resources/scikit-build.schema.json",
"description": "Scikit-build-core's settings.",
**to_json_schema(ScikitBuildSettings, normalize_keys=True),
}
# Manipulate a bit to get better validation
# This is making the generate's template or template-path required
generate = schema["properties"]["generate"]["items"]
for prop in generate["properties"].values():
if prop.get("type", "") == "string":
prop["minLength"] = 1
generate_tmpl = copy.deepcopy(generate)
generate_path = copy.deepcopy(generate)
generate_tmpl["required"] = ["path", "template"]
del generate_tmpl["properties"]["template-path"]
del generate_tmpl["properties"]["template"]["default"]
generate_path["required"] = ["path", "template-path"]
del generate_path["properties"]["template"]
schema["properties"]["generate"]["items"] = {
"oneOf": [generate_tmpl, generate_path]
}
return schema
def get_skbuild_schema(tool_name: str = "scikit-build") -> dict[str, Any]:
"Get the stored complete schema for scikit-build settings."
assert tool_name == "scikit-build", "Only scikit-build is supported."
with resources.joinpath("scikit-build.schema.json").open(encoding="utf-8") as f:
return json.load(f) # type: ignore[no-any-return]
if __name__ == "__main__":
d = generate_skbuild_schema()
print(json.dumps(d, indent=2)) # noqa: T201 | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/skbuild_schema.py | skbuild_schema.py | from __future__ import annotations
import copy
import json
from typing import Any
from ..resources import resources
__all__ = ["get_skbuild_schema", "generate_skbuild_schema"]
def __dir__() -> list[str]:
return __all__
def generate_skbuild_schema(tool_name: str = "scikit-build") -> dict[str, Any]:
"Generate the complete schema for scikit-build settings."
assert tool_name == "scikit-build", "Only scikit-build is supported."
from .json_schema import to_json_schema
from .skbuild_model import ScikitBuildSettings
schema = {
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/scikit-build/scikit-build-core/blob/main/src/scikit_build_core/resources/scikit-build.schema.json",
"description": "Scikit-build-core's settings.",
**to_json_schema(ScikitBuildSettings, normalize_keys=True),
}
# Manipulate a bit to get better validation
# This is making the generate's template or template-path required
generate = schema["properties"]["generate"]["items"]
for prop in generate["properties"].values():
if prop.get("type", "") == "string":
prop["minLength"] = 1
generate_tmpl = copy.deepcopy(generate)
generate_path = copy.deepcopy(generate)
generate_tmpl["required"] = ["path", "template"]
del generate_tmpl["properties"]["template-path"]
del generate_tmpl["properties"]["template"]["default"]
generate_path["required"] = ["path", "template-path"]
del generate_path["properties"]["template"]
schema["properties"]["generate"]["items"] = {
"oneOf": [generate_tmpl, generate_path]
}
return schema
def get_skbuild_schema(tool_name: str = "scikit-build") -> dict[str, Any]:
"Get the stored complete schema for scikit-build settings."
assert tool_name == "scikit-build", "Only scikit-build is supported."
with resources.joinpath("scikit-build.schema.json").open(encoding="utf-8") as f:
return json.load(f) # type: ignore[no-any-return]
if __name__ == "__main__":
d = generate_skbuild_schema()
print(json.dumps(d, indent=2)) # noqa: T201 | 0.788217 | 0.220636 |
from __future__ import annotations
import importlib
import sys
from collections.abc import Generator, Iterable, Mapping
from pathlib import Path
from typing import Any, Union
from .._compat.typing import Protocol
__all__ = ["load_provider", "load_dynamic_metadata"]
def __dir__() -> list[str]:
return __all__
class DynamicMetadataProtocol(Protocol):
def dynamic_metadata(
self, fields: Iterable[str], settings: dict[str, Any]
) -> dict[str, Any]:
...
class DynamicMetadataRequirementsProtocol(DynamicMetadataProtocol, Protocol):
def get_requires_for_dynamic_metadata(self, settings: dict[str, Any]) -> list[str]:
...
class DynamicMetadataWheelProtocol(DynamicMetadataProtocol, Protocol):
def dynamic_wheel(
self, field: str, settings: Mapping[str, Any] | None = None
) -> bool:
...
class DynamicMetadataRequirementsWheelProtocol(
DynamicMetadataRequirementsProtocol, DynamicMetadataWheelProtocol, Protocol
):
...
DMProtocols = Union[
DynamicMetadataProtocol,
DynamicMetadataRequirementsProtocol,
DynamicMetadataWheelProtocol,
DynamicMetadataRequirementsWheelProtocol,
]
def load_provider(
provider: str,
provider_path: str | None = None,
) -> DMProtocols:
if provider_path is None:
return importlib.import_module(provider)
if not Path(provider_path).is_dir():
msg = "provider-path must be an existing directory"
raise AssertionError(msg)
try:
sys.path.insert(0, provider_path)
return importlib.import_module(provider)
finally:
sys.path.pop(0)
def load_dynamic_metadata(
metadata: Mapping[str, Mapping[str, str]]
) -> Generator[tuple[str, DMProtocols | None, dict[str, str]], None, None]:
for field, orig_config in metadata.items():
if "provider" in orig_config:
config = dict(orig_config)
provider = config.pop("provider")
provider_path = config.pop("provider-path", None)
yield field, load_provider(provider, provider_path), config
else:
yield field, None, dict(orig_config) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/_load_provider.py | _load_provider.py | from __future__ import annotations
import importlib
import sys
from collections.abc import Generator, Iterable, Mapping
from pathlib import Path
from typing import Any, Union
from .._compat.typing import Protocol
__all__ = ["load_provider", "load_dynamic_metadata"]
def __dir__() -> list[str]:
return __all__
class DynamicMetadataProtocol(Protocol):
def dynamic_metadata(
self, fields: Iterable[str], settings: dict[str, Any]
) -> dict[str, Any]:
...
class DynamicMetadataRequirementsProtocol(DynamicMetadataProtocol, Protocol):
def get_requires_for_dynamic_metadata(self, settings: dict[str, Any]) -> list[str]:
...
class DynamicMetadataWheelProtocol(DynamicMetadataProtocol, Protocol):
def dynamic_wheel(
self, field: str, settings: Mapping[str, Any] | None = None
) -> bool:
...
class DynamicMetadataRequirementsWheelProtocol(
DynamicMetadataRequirementsProtocol, DynamicMetadataWheelProtocol, Protocol
):
...
DMProtocols = Union[
DynamicMetadataProtocol,
DynamicMetadataRequirementsProtocol,
DynamicMetadataWheelProtocol,
DynamicMetadataRequirementsWheelProtocol,
]
def load_provider(
provider: str,
provider_path: str | None = None,
) -> DMProtocols:
if provider_path is None:
return importlib.import_module(provider)
if not Path(provider_path).is_dir():
msg = "provider-path must be an existing directory"
raise AssertionError(msg)
try:
sys.path.insert(0, provider_path)
return importlib.import_module(provider)
finally:
sys.path.pop(0)
def load_dynamic_metadata(
metadata: Mapping[str, Mapping[str, str]]
) -> Generator[tuple[str, DMProtocols | None, dict[str, str]], None, None]:
for field, orig_config in metadata.items():
if "provider" in orig_config:
config = dict(orig_config)
provider = config.pop("provider")
provider_path = config.pop("provider-path", None)
yield field, load_provider(provider, provider_path), config
else:
yield field, None, dict(orig_config) | 0.550366 | 0.104249 |
import dataclasses
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from packaging.version import Version
from .._compat.typing import Literal
__all__ = [
"BackportSettings",
"CMakeSettings",
"EditableSettings",
"InstallSettings",
"LoggingSettings",
"NinjaSettings",
"SDistSettings",
"ScikitBuildSettings",
"GenerateSettings",
"WheelSettings",
]
def __dir__() -> List[str]:
return __all__
@dataclasses.dataclass
class CMakeSettings:
minimum_version: Version = Version("3.15")
"""
The minimum version of CMake to use. If CMake is not present on the system
or is older than this, it will be downloaded via PyPI if possible. An empty
string will disable this check.
"""
args: List[str] = dataclasses.field(default_factory=list)
"""
A list of args to pass to CMake when configuring the project. Setting this
in config or envvar will override toml. See also ``cmake.define``.
"""
define: Dict[str, Union[str, bool]] = dataclasses.field(default_factory=dict)
"""
A table of defines to pass to CMake when configuring the project. Additive.
"""
verbose: bool = False
"""
Verbose printout when building.
"""
build_type: str = "Release"
"""
The build type to use when building the project.
Valid options are: "Debug", "Release", "RelWithDebInfo", "MinSizeRel",
"", etc.
"""
source_dir: Path = Path()
"""
The source directory to use when building the project. Currently only
affects the native builder (not the setuptools plugin).
"""
targets: List[str] = dataclasses.field(default_factory=list)
"""
The build targets to use when building the project. Empty builds the
default target.
"""
@dataclasses.dataclass
class NinjaSettings:
minimum_version: Version = Version("1.5")
"""
The minimum version of Ninja to use. If Ninja is not present on the system
or is older than this, it will be downloaded via PyPI if possible. An empty
string will disable this check.
"""
make_fallback: bool = True
"""
If CMake is not present on the system or is older required, it will be
downloaded via PyPI if possible. An empty string will disable this check.
"""
@dataclasses.dataclass
class LoggingSettings:
level: Literal[
"NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
] = "WARNING"
"""
The logging level to display, "DEBUG", "INFO", "WARNING", and "ERROR" are
possible options.
"""
@dataclasses.dataclass
class SDistSettings:
include: List[str] = dataclasses.field(default_factory=list)
"""
Files to include in the SDist even if they are skipped by default.
Supports gitignore syntax.
"""
exclude: List[str] = dataclasses.field(default_factory=list)
"""
Files to exclude from the SDist even if they are included by default.
Supports gitignore syntax.
"""
reproducible: bool = True
"""
If set to True, try to build a reproducible distribution (Unix and Python
3.9+ recommended). ``SOURCE_DATE_EPOCH`` will be used for timestamps, or a
fixed value if not set.
"""
cmake: bool = False
"""
If set to True, CMake will be run before building the SDist.
"""
@dataclasses.dataclass
class WheelSettings:
packages: Optional[List[str]] = None
"""
A list of packages to auto-copy into the wheel. If this is not set, it will
default to the first of ``src/<package>`` or ``<package>`` if they exist.
The prefix(s) will be stripped from the package name inside the wheel.
"""
py_api: str = ""
"""
The Python tags. The default (empty string) will use the default Python
version. You can also set this to "cp37" to enable the CPython 3.7+ Stable
ABI / Limited API (only on CPython and if the version is sufficient,
otherwise this has no effect). Or you can set it to "py3" or "py2.py3" to
ignore Python ABI compatibility. The ABI tag is inferred from this tag.
"""
expand_macos_universal_tags: bool = False
"""
Fill out extra tags that are not required. This adds "x86_64" and "arm64"
to the list of platforms when "universal2" is used, which helps older
Pip's (before 21.0.1) find the correct wheel.
"""
install_dir: str = ""
"""
The install directory for the wheel. This is relative to the platlib root.
You might set this to the package name. The original dir is still at
SKBUILD_PLATLIB_DIR (also SKBUILD_DATA_DIR, etc. are available).
EXPERIMENTAL: An absolute path will be one level higher than the platlib
root, giving access to "/platlib", "/data", "/headers", and "/scripts".
"""
license_files: List[str] = dataclasses.field(
default_factory=lambda: ["LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*"]
)
"""
A list of license files to include in the wheel. Supports glob patterns.
"""
@dataclasses.dataclass
class BackportSettings:
find_python: Version = Version("3.26.1")
"""
If CMake is less than this value, backport a copy of FindPython. Set to 0
disable this, or the empty string.
"""
@dataclasses.dataclass
class EditableSettings:
mode: Literal["redirect"] = "redirect"
"""
Select the editable mode to use. Currently only "redirect" is supported.
"""
verbose: bool = True
"""
Turn on verbose output for the editable mode rebuilds.
"""
rebuild: bool = False
"""
Rebuild the project when the package is imported. The build-directory must
be set.
"""
@dataclasses.dataclass
class InstallSettings:
components: List[str] = dataclasses.field(default_factory=list)
"""
The components to install. If empty, all default components are installed.
"""
strip: Optional[bool] = None
"""
Whether to strip the binaries. True for scikit-build-core 0.5+.
"""
@dataclasses.dataclass
class GenerateSettings:
path: Path
"""
The path (relative to platlib) for the file to generate.
"""
template: str = ""
"""
The template to use for the file. This includes string.Template style
placeholders for all the metadata. If empty, a template-path must be set.
"""
template_path: Optional[Path] = None
"""
The path to the template file. If empty, a template must be set.
"""
location: Literal["install", "build", "source"] = "install"
"""
The place to put the generated file. The "build" directory is useful for
CMake files, and the "install" directory is useful for Python files,
usually. You can also write directly to the "source" directory, will
overwrite existing files & remember to gitignore the file.
"""
@dataclasses.dataclass
class ScikitBuildSettings:
cmake: CMakeSettings = dataclasses.field(default_factory=CMakeSettings)
ninja: NinjaSettings = dataclasses.field(default_factory=NinjaSettings)
logging: LoggingSettings = dataclasses.field(default_factory=LoggingSettings)
sdist: SDistSettings = dataclasses.field(default_factory=SDistSettings)
wheel: WheelSettings = dataclasses.field(default_factory=WheelSettings)
backport: BackportSettings = dataclasses.field(default_factory=BackportSettings)
editable: EditableSettings = dataclasses.field(default_factory=EditableSettings)
install: InstallSettings = dataclasses.field(default_factory=InstallSettings)
generate: List[GenerateSettings] = dataclasses.field(default_factory=list)
metadata: Dict[str, Dict[str, Any]] = dataclasses.field(default_factory=dict)
"""
List dynamic metadata fields and hook locations in this table.
"""
strict_config: bool = True
"""
Strictly check all config options. If False, warnings will be
printed for unknown options. If True, an error will be raised.
"""
experimental: bool = False
"""
Enable early previews of features not finalized yet.
"""
minimum_version: Optional[Version] = None
"""
If set, this will provide a method for backward compatibility.
"""
build_dir: str = ""
"""
The build directory. Defaults to a temporary directory, but can be set.
""" | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/settings/skbuild_model.py | skbuild_model.py | import dataclasses
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from packaging.version import Version
from .._compat.typing import Literal
__all__ = [
"BackportSettings",
"CMakeSettings",
"EditableSettings",
"InstallSettings",
"LoggingSettings",
"NinjaSettings",
"SDistSettings",
"ScikitBuildSettings",
"GenerateSettings",
"WheelSettings",
]
def __dir__() -> List[str]:
return __all__
@dataclasses.dataclass
class CMakeSettings:
minimum_version: Version = Version("3.15")
"""
The minimum version of CMake to use. If CMake is not present on the system
or is older than this, it will be downloaded via PyPI if possible. An empty
string will disable this check.
"""
args: List[str] = dataclasses.field(default_factory=list)
"""
A list of args to pass to CMake when configuring the project. Setting this
in config or envvar will override toml. See also ``cmake.define``.
"""
define: Dict[str, Union[str, bool]] = dataclasses.field(default_factory=dict)
"""
A table of defines to pass to CMake when configuring the project. Additive.
"""
verbose: bool = False
"""
Verbose printout when building.
"""
build_type: str = "Release"
"""
The build type to use when building the project.
Valid options are: "Debug", "Release", "RelWithDebInfo", "MinSizeRel",
"", etc.
"""
source_dir: Path = Path()
"""
The source directory to use when building the project. Currently only
affects the native builder (not the setuptools plugin).
"""
targets: List[str] = dataclasses.field(default_factory=list)
"""
The build targets to use when building the project. Empty builds the
default target.
"""
@dataclasses.dataclass
class NinjaSettings:
minimum_version: Version = Version("1.5")
"""
The minimum version of Ninja to use. If Ninja is not present on the system
or is older than this, it will be downloaded via PyPI if possible. An empty
string will disable this check.
"""
make_fallback: bool = True
"""
If CMake is not present on the system or is older required, it will be
downloaded via PyPI if possible. An empty string will disable this check.
"""
@dataclasses.dataclass
class LoggingSettings:
level: Literal[
"NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
] = "WARNING"
"""
The logging level to display, "DEBUG", "INFO", "WARNING", and "ERROR" are
possible options.
"""
@dataclasses.dataclass
class SDistSettings:
include: List[str] = dataclasses.field(default_factory=list)
"""
Files to include in the SDist even if they are skipped by default.
Supports gitignore syntax.
"""
exclude: List[str] = dataclasses.field(default_factory=list)
"""
Files to exclude from the SDist even if they are included by default.
Supports gitignore syntax.
"""
reproducible: bool = True
"""
If set to True, try to build a reproducible distribution (Unix and Python
3.9+ recommended). ``SOURCE_DATE_EPOCH`` will be used for timestamps, or a
fixed value if not set.
"""
cmake: bool = False
"""
If set to True, CMake will be run before building the SDist.
"""
@dataclasses.dataclass
class WheelSettings:
packages: Optional[List[str]] = None
"""
A list of packages to auto-copy into the wheel. If this is not set, it will
default to the first of ``src/<package>`` or ``<package>`` if they exist.
The prefix(s) will be stripped from the package name inside the wheel.
"""
py_api: str = ""
"""
The Python tags. The default (empty string) will use the default Python
version. You can also set this to "cp37" to enable the CPython 3.7+ Stable
ABI / Limited API (only on CPython and if the version is sufficient,
otherwise this has no effect). Or you can set it to "py3" or "py2.py3" to
ignore Python ABI compatibility. The ABI tag is inferred from this tag.
"""
expand_macos_universal_tags: bool = False
"""
Fill out extra tags that are not required. This adds "x86_64" and "arm64"
to the list of platforms when "universal2" is used, which helps older
Pip's (before 21.0.1) find the correct wheel.
"""
install_dir: str = ""
"""
The install directory for the wheel. This is relative to the platlib root.
You might set this to the package name. The original dir is still at
SKBUILD_PLATLIB_DIR (also SKBUILD_DATA_DIR, etc. are available).
EXPERIMENTAL: An absolute path will be one level higher than the platlib
root, giving access to "/platlib", "/data", "/headers", and "/scripts".
"""
license_files: List[str] = dataclasses.field(
default_factory=lambda: ["LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*"]
)
"""
A list of license files to include in the wheel. Supports glob patterns.
"""
@dataclasses.dataclass
class BackportSettings:
find_python: Version = Version("3.26.1")
"""
If CMake is less than this value, backport a copy of FindPython. Set to 0
disable this, or the empty string.
"""
@dataclasses.dataclass
class EditableSettings:
mode: Literal["redirect"] = "redirect"
"""
Select the editable mode to use. Currently only "redirect" is supported.
"""
verbose: bool = True
"""
Turn on verbose output for the editable mode rebuilds.
"""
rebuild: bool = False
"""
Rebuild the project when the package is imported. The build-directory must
be set.
"""
@dataclasses.dataclass
class InstallSettings:
components: List[str] = dataclasses.field(default_factory=list)
"""
The components to install. If empty, all default components are installed.
"""
strip: Optional[bool] = None
"""
Whether to strip the binaries. True for scikit-build-core 0.5+.
"""
@dataclasses.dataclass
class GenerateSettings:
path: Path
"""
The path (relative to platlib) for the file to generate.
"""
template: str = ""
"""
The template to use for the file. This includes string.Template style
placeholders for all the metadata. If empty, a template-path must be set.
"""
template_path: Optional[Path] = None
"""
The path to the template file. If empty, a template must be set.
"""
location: Literal["install", "build", "source"] = "install"
"""
The place to put the generated file. The "build" directory is useful for
CMake files, and the "install" directory is useful for Python files,
usually. You can also write directly to the "source" directory, will
overwrite existing files & remember to gitignore the file.
"""
@dataclasses.dataclass
class ScikitBuildSettings:
cmake: CMakeSettings = dataclasses.field(default_factory=CMakeSettings)
ninja: NinjaSettings = dataclasses.field(default_factory=NinjaSettings)
logging: LoggingSettings = dataclasses.field(default_factory=LoggingSettings)
sdist: SDistSettings = dataclasses.field(default_factory=SDistSettings)
wheel: WheelSettings = dataclasses.field(default_factory=WheelSettings)
backport: BackportSettings = dataclasses.field(default_factory=BackportSettings)
editable: EditableSettings = dataclasses.field(default_factory=EditableSettings)
install: InstallSettings = dataclasses.field(default_factory=InstallSettings)
generate: List[GenerateSettings] = dataclasses.field(default_factory=list)
metadata: Dict[str, Dict[str, Any]] = dataclasses.field(default_factory=dict)
"""
List dynamic metadata fields and hook locations in this table.
"""
strict_config: bool = True
"""
Strictly check all config options. If False, warnings will be
printed for unknown options. If True, an error will be raised.
"""
experimental: bool = False
"""
Enable early previews of features not finalized yet.
"""
minimum_version: Optional[Version] = None
"""
If set, this will provide a method for backward compatibility.
"""
build_dir: str = ""
"""
The build directory. Defaults to a temporary directory, but can be set.
""" | 0.908638 | 0.347316 |
from __future__ import annotations
import base64
import copy
import csv
import dataclasses
import hashlib
import io
import os
import stat
import time
import zipfile
from collections.abc import Mapping, Set
from email.message import Message
from email.policy import EmailPolicy
from pathlib import Path
from zipfile import ZipInfo
import packaging.utils
from packaging.tags import Tag
from packaging.utils import BuildTag
from pyproject_metadata import StandardMetadata
from .. import __version__
from .._compat.typing import Self
EMAIL_POLICY = EmailPolicy(max_line_length=0, mangle_from_=False, utf8=True)
MIN_TIMESTAMP = 315532800 # 1980-01-01 00:00:00 UTC
def _b64encode(data: bytes) -> bytes:
return base64.urlsafe_b64encode(data).rstrip(b"=")
__all__ = ["WheelWriter", "WheelMetadata"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass
class WheelMetadata:
root_is_purelib: bool = False
metadata_version: str = "1.0"
generator: str = f"scikit-build-core {__version__}"
build_tag: BuildTag = ()
tags: Set[Tag] = dataclasses.field(default_factory=frozenset)
def as_bytes(self) -> bytes:
msg = Message(policy=EMAIL_POLICY)
msg["Wheel-Version"] = self.metadata_version
msg["Generator"] = self.generator
msg["Root-Is-Purelib"] = str(self.root_is_purelib).lower()
if self.build_tag:
msg["Build"] = str(self.build_tag[0]) + self.build_tag[1]
for tag in sorted(self.tags, key=lambda t: (t.interpreter, t.abi, t.platform)):
msg["Tag"] = f"{tag.interpreter}-{tag.abi}-{tag.platform}"
return msg.as_bytes()
@dataclasses.dataclass
class WheelWriter:
"""A general tool for writing wheels. Designed to look a little like ZipFile."""
metadata: StandardMetadata
folder: Path
tags: Set[Tag]
wheel_metadata = WheelMetadata(root_is_purelib=False)
buildver: str = ""
license_files: Mapping[Path, bytes] = dataclasses.field(default_factory=dict)
_zipfile: zipfile.ZipFile | None = None
@property
def name_ver(self) -> str:
name = packaging.utils.canonicalize_name(self.metadata.name).replace("-", "_")
# replace - with _ as a local version separator
version = str(self.metadata.version).replace("-", "_")
return f"{name}-{version}"
@property
def basename(self) -> str:
pyver = ".".join(sorted({t.interpreter for t in self.tags}))
abi = ".".join(sorted({t.abi for t in self.tags}))
arch = ".".join(sorted({t.platform for t in self.tags}))
optbuildver = [self.buildver] if self.buildver else []
return "-".join([self.name_ver, *optbuildver, pyver, abi, arch])
@property
def wheelpath(self) -> Path:
return self.folder / f"{self.basename}.whl"
@property
def dist_info(self) -> str:
return f"{self.name_ver}.dist-info"
@staticmethod
def timestamp(mtime: float | None = None) -> tuple[int, int, int, int, int, int]:
timestamp = int(os.environ.get("SOURCE_DATE_EPOCH", mtime or time.time()))
# The ZIP file format does not support timestamps before 1980.
timestamp = max(timestamp, MIN_TIMESTAMP)
return time.gmtime(timestamp)[0:6]
def dist_info_contents(self) -> dict[str, bytes]:
entry_points = io.StringIO()
ep = self.metadata.entrypoints.copy()
ep["console_scripts"] = self.metadata.scripts
ep["gui_scripts"] = self.metadata.gui_scripts
for group, entries in ep.items():
if entries:
entry_points.write(f"[{group}]\n")
for name, target in entries.items():
entry_points.write(f"{name} = {target}\n")
entry_points.write("\n")
self.wheel_metadata.tags = self.tags
# Using deepcopy here because of a bug in pyproject-metadata
# https://github.com/FFY00/python-pyproject-metadata/pull/49
rfc822 = copy.deepcopy(self.metadata).as_rfc822()
for fp in self.license_files:
rfc822["License-File"] = f"{fp}"
license_entries = {
f"licenses/{fp}": data for fp, data in self.license_files.items()
}
return {
"METADATA": bytes(rfc822),
"WHEEL": self.wheel_metadata.as_bytes(),
"entry_points.txt": entry_points.getvalue().encode("utf-8"),
**license_entries,
}
def build(self, wheel_dirs: dict[str, Path]) -> None:
assert "platlib" in wheel_dirs
assert "purelib" not in wheel_dirs
assert {"platlib", "data", "headers", "scripts", "null"} >= wheel_dirs.keys()
# The "main" directory (platlib for us) will be handled specially below
plans = {"": wheel_dirs["platlib"]}
data_dir = f"{self.name_ver}.data"
for key in sorted({"data", "headers", "scripts"} & wheel_dirs.keys()):
plans[key] = wheel_dirs[key]
for key, path in plans.items():
for filename in sorted(path.glob("**/*")):
is_in_dist_info = any(x.endswith(".dist-info") for x in filename.parts)
is_python_cache = filename.suffix in {".pyc", ".pyo"}
if filename.is_file() and not is_in_dist_info and not is_python_cache:
relpath = filename.relative_to(path)
target = Path(data_dir) / key / relpath if key else relpath
self.write(str(filename), str(target))
dist_info_contents = self.dist_info_contents()
for key, data in dist_info_contents.items():
self.writestr(f"{self.dist_info}/{key}", data)
def write(self, filename: str, arcname: str | None = None) -> None:
"""Write a file to the archive. Paths are normalized to Posix paths."""
with Path(filename).open("rb") as f:
st = os.fstat(f.fileno())
data = f.read()
# Zipfiles require Posix paths for the arcname
zinfo = ZipInfo(
(arcname or filename).replace("\\", "/"),
date_time=self.timestamp(st.st_mtime),
)
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
self.writestr(zinfo, data)
def writestr(self, zinfo_or_arcname: str | ZipInfo, data: bytes) -> None:
"""Write bytes (not strings) to the archive."""
assert isinstance(data, bytes)
assert self._zipfile is not None
if isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zinfo_or_arcname
else:
zinfo = zipfile.ZipInfo(
zinfo_or_arcname.replace("\\", "/"),
date_time=self.timestamp(),
)
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = (0o664 | stat.S_IFREG) << 16
assert (
"\\" not in zinfo.filename
), f"\\ not supported in zip; got {zinfo.filename!r}"
self._zipfile.writestr(zinfo, data)
def __enter__(self) -> Self:
if not self.wheelpath.parent.exists():
self.wheelpath.parent.mkdir(parents=True)
self._zipfile = zipfile.ZipFile(
self.wheelpath, "w", compression=zipfile.ZIP_DEFLATED
)
return self
def __exit__(self, *args: object) -> None:
assert self._zipfile is not None
record = f"{self.dist_info}/RECORD"
data = io.StringIO()
writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
for member in self._zipfile.infolist():
assert (
"\\" not in member.filename
), f"Invalid zip contents: {member.filename}"
with self._zipfile.open(member) as f:
member_data = f.read()
sha = _b64encode(hashlib.sha256(member_data).digest()).decode("ascii")
writer.writerow((member.filename, f"sha256={sha}", member.file_size))
writer.writerow((record, "", ""))
self.writestr(record, data.getvalue().encode("utf-8"))
self._zipfile.close()
self._zipfile = None | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/build/_wheelfile.py | _wheelfile.py | from __future__ import annotations
import base64
import copy
import csv
import dataclasses
import hashlib
import io
import os
import stat
import time
import zipfile
from collections.abc import Mapping, Set
from email.message import Message
from email.policy import EmailPolicy
from pathlib import Path
from zipfile import ZipInfo
import packaging.utils
from packaging.tags import Tag
from packaging.utils import BuildTag
from pyproject_metadata import StandardMetadata
from .. import __version__
from .._compat.typing import Self
EMAIL_POLICY = EmailPolicy(max_line_length=0, mangle_from_=False, utf8=True)
MIN_TIMESTAMP = 315532800 # 1980-01-01 00:00:00 UTC
def _b64encode(data: bytes) -> bytes:
return base64.urlsafe_b64encode(data).rstrip(b"=")
__all__ = ["WheelWriter", "WheelMetadata"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass
class WheelMetadata:
root_is_purelib: bool = False
metadata_version: str = "1.0"
generator: str = f"scikit-build-core {__version__}"
build_tag: BuildTag = ()
tags: Set[Tag] = dataclasses.field(default_factory=frozenset)
def as_bytes(self) -> bytes:
msg = Message(policy=EMAIL_POLICY)
msg["Wheel-Version"] = self.metadata_version
msg["Generator"] = self.generator
msg["Root-Is-Purelib"] = str(self.root_is_purelib).lower()
if self.build_tag:
msg["Build"] = str(self.build_tag[0]) + self.build_tag[1]
for tag in sorted(self.tags, key=lambda t: (t.interpreter, t.abi, t.platform)):
msg["Tag"] = f"{tag.interpreter}-{tag.abi}-{tag.platform}"
return msg.as_bytes()
@dataclasses.dataclass
class WheelWriter:
"""A general tool for writing wheels. Designed to look a little like ZipFile."""
metadata: StandardMetadata
folder: Path
tags: Set[Tag]
wheel_metadata = WheelMetadata(root_is_purelib=False)
buildver: str = ""
license_files: Mapping[Path, bytes] = dataclasses.field(default_factory=dict)
_zipfile: zipfile.ZipFile | None = None
@property
def name_ver(self) -> str:
name = packaging.utils.canonicalize_name(self.metadata.name).replace("-", "_")
# replace - with _ as a local version separator
version = str(self.metadata.version).replace("-", "_")
return f"{name}-{version}"
@property
def basename(self) -> str:
pyver = ".".join(sorted({t.interpreter for t in self.tags}))
abi = ".".join(sorted({t.abi for t in self.tags}))
arch = ".".join(sorted({t.platform for t in self.tags}))
optbuildver = [self.buildver] if self.buildver else []
return "-".join([self.name_ver, *optbuildver, pyver, abi, arch])
@property
def wheelpath(self) -> Path:
return self.folder / f"{self.basename}.whl"
@property
def dist_info(self) -> str:
return f"{self.name_ver}.dist-info"
@staticmethod
def timestamp(mtime: float | None = None) -> tuple[int, int, int, int, int, int]:
timestamp = int(os.environ.get("SOURCE_DATE_EPOCH", mtime or time.time()))
# The ZIP file format does not support timestamps before 1980.
timestamp = max(timestamp, MIN_TIMESTAMP)
return time.gmtime(timestamp)[0:6]
def dist_info_contents(self) -> dict[str, bytes]:
entry_points = io.StringIO()
ep = self.metadata.entrypoints.copy()
ep["console_scripts"] = self.metadata.scripts
ep["gui_scripts"] = self.metadata.gui_scripts
for group, entries in ep.items():
if entries:
entry_points.write(f"[{group}]\n")
for name, target in entries.items():
entry_points.write(f"{name} = {target}\n")
entry_points.write("\n")
self.wheel_metadata.tags = self.tags
# Using deepcopy here because of a bug in pyproject-metadata
# https://github.com/FFY00/python-pyproject-metadata/pull/49
rfc822 = copy.deepcopy(self.metadata).as_rfc822()
for fp in self.license_files:
rfc822["License-File"] = f"{fp}"
license_entries = {
f"licenses/{fp}": data for fp, data in self.license_files.items()
}
return {
"METADATA": bytes(rfc822),
"WHEEL": self.wheel_metadata.as_bytes(),
"entry_points.txt": entry_points.getvalue().encode("utf-8"),
**license_entries,
}
def build(self, wheel_dirs: dict[str, Path]) -> None:
assert "platlib" in wheel_dirs
assert "purelib" not in wheel_dirs
assert {"platlib", "data", "headers", "scripts", "null"} >= wheel_dirs.keys()
# The "main" directory (platlib for us) will be handled specially below
plans = {"": wheel_dirs["platlib"]}
data_dir = f"{self.name_ver}.data"
for key in sorted({"data", "headers", "scripts"} & wheel_dirs.keys()):
plans[key] = wheel_dirs[key]
for key, path in plans.items():
for filename in sorted(path.glob("**/*")):
is_in_dist_info = any(x.endswith(".dist-info") for x in filename.parts)
is_python_cache = filename.suffix in {".pyc", ".pyo"}
if filename.is_file() and not is_in_dist_info and not is_python_cache:
relpath = filename.relative_to(path)
target = Path(data_dir) / key / relpath if key else relpath
self.write(str(filename), str(target))
dist_info_contents = self.dist_info_contents()
for key, data in dist_info_contents.items():
self.writestr(f"{self.dist_info}/{key}", data)
def write(self, filename: str, arcname: str | None = None) -> None:
"""Write a file to the archive. Paths are normalized to Posix paths."""
with Path(filename).open("rb") as f:
st = os.fstat(f.fileno())
data = f.read()
# Zipfiles require Posix paths for the arcname
zinfo = ZipInfo(
(arcname or filename).replace("\\", "/"),
date_time=self.timestamp(st.st_mtime),
)
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
self.writestr(zinfo, data)
def writestr(self, zinfo_or_arcname: str | ZipInfo, data: bytes) -> None:
"""Write bytes (not strings) to the archive."""
assert isinstance(data, bytes)
assert self._zipfile is not None
if isinstance(zinfo_or_arcname, zipfile.ZipInfo):
zinfo = zinfo_or_arcname
else:
zinfo = zipfile.ZipInfo(
zinfo_or_arcname.replace("\\", "/"),
date_time=self.timestamp(),
)
zinfo.compress_type = zipfile.ZIP_DEFLATED
zinfo.external_attr = (0o664 | stat.S_IFREG) << 16
assert (
"\\" not in zinfo.filename
), f"\\ not supported in zip; got {zinfo.filename!r}"
self._zipfile.writestr(zinfo, data)
def __enter__(self) -> Self:
if not self.wheelpath.parent.exists():
self.wheelpath.parent.mkdir(parents=True)
self._zipfile = zipfile.ZipFile(
self.wheelpath, "w", compression=zipfile.ZIP_DEFLATED
)
return self
def __exit__(self, *args: object) -> None:
assert self._zipfile is not None
record = f"{self.dist_info}/RECORD"
data = io.StringIO()
writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
for member in self._zipfile.infolist():
assert (
"\\" not in member.filename
), f"Invalid zip contents: {member.filename}"
with self._zipfile.open(member) as f:
member_data = f.read()
sha = _b64encode(hashlib.sha256(member_data).digest()).decode("ascii")
writer.writerow((member.filename, f"sha256={sha}", member.file_size))
writer.writerow((record, "", ""))
self.writestr(record, data.getvalue().encode("utf-8"))
self._zipfile.close()
self._zipfile = None | 0.711631 | 0.188175 |
from __future__ import annotations
import contextlib
import copy
import gzip
import io
import os
import tarfile
from pathlib import Path
from packaging.utils import canonicalize_name
from packaging.version import Version
from .. import __version__
from .._compat import tomllib
from .._logging import rich_print
from ..settings.metadata import get_standard_metadata
from ..settings.skbuild_read_settings import SettingsReader
from ._file_processor import each_unignored_file
from ._init import setup_logging
from .generate import generate_file_contents
from .wheel import _build_wheel_impl
__all__ = ["build_sdist"]
def __dir__() -> list[str]:
return __all__
def get_reproducible_epoch() -> int:
"""
Return an integer representing the integer number of seconds since the Unix epoch.
If the `SOURCE_DATE_EPOCH` environment variable is set, use that value. Otherwise,
always return `1667997441`.
"""
return int(os.environ.get("SOURCE_DATE_EPOCH", "1667997441"))
def normalize_file_permissions(st_mode: int) -> int:
"""
Normalize the permission bits in the st_mode field from stat to 644/755
Popular VCSs only track whether a file is executable or not. The exact
permissions can vary on systems with different umasks. Normalising
to 644 (non executable) or 755 (executable) makes builds more reproducible.
Taken from https://github.com/pypa/flit/blob/6a2a8c6462e49f584941c667b70a6f48a7b3f9ab/flit_core/flit_core/common.py#L257
"""
# Set 644 permissions, leaving higher bits of st_mode unchanged
new_mode = (st_mode | 0o644) & ~0o133
if st_mode & 0o100:
new_mode |= 0o111 # Executable: 644 -> 755
return new_mode
def normalize_tar_info(tar_info: tarfile.TarInfo) -> tarfile.TarInfo:
"""
Normalize the TarInfo associated with a file to improve reproducibility.
Inspired by Hatch
https://github.com/pypa/hatch/blob/573192f88022bb781c698dae2c0b84ef3fb9a7ad/backend/src/hatchling/builders/sdist.py#L51
"""
tar_info = copy.copy(tar_info)
tar_info.uname = ""
tar_info.gname = ""
tar_info.uid = 0
tar_info.gid = 0
tar_info.mode = normalize_file_permissions(tar_info.mode)
tar_info.mtime = get_reproducible_epoch()
return tar_info
def add_bytes_to_tar(
tar: tarfile.TarFile, data: bytes, name: str, normalize: bool
) -> None:
"""
Write ``data`` bytes to ``name`` in a tarfile ``tar``. Normalize the info if
``normalize`` is true.
"""
tarinfo = tarfile.TarInfo(name)
if normalize:
tarinfo = normalize_tar_info(tarinfo)
with io.BytesIO(data) as bio:
tarinfo.size = bio.getbuffer().nbytes
tar.addfile(tarinfo, bio)
def build_sdist(
sdist_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
rich_print(
f"[green]***[/green] [bold][green]scikit-build-core {__version__}[/green]",
"[red](sdist)[/red]",
)
with Path("pyproject.toml").open("rb") as f:
pyproject = tomllib.load(f)
settings_reader = SettingsReader(pyproject, config_settings or {})
settings = settings_reader.settings
setup_logging(settings.logging.level)
settings_reader.validate_may_exit()
sdist_dir = Path(sdist_directory)
reproducible = settings.sdist.reproducible
timestamp = get_reproducible_epoch() if reproducible else None
metadata = get_standard_metadata(pyproject, settings)
# Using deepcopy here because of a bug in pyproject-metadata
# https://github.com/FFY00/python-pyproject-metadata/pull/49
pkg_info = bytes(copy.deepcopy(metadata).as_rfc822())
# Only normalize SDist name if 0.5+ is requested for backwards compat
should_normalize_name = (
settings.minimum_version is None or settings.minimum_version >= Version("0.5")
)
sdist_name = (
canonicalize_name(metadata.name).replace("-", "_")
if should_normalize_name
else metadata.name
)
srcdirname = f"{sdist_name}-{metadata.version}"
filename = f"{srcdirname}.tar.gz"
if settings.sdist.cmake:
_build_wheel_impl(
None, config_settings, None, exit_after_config=True, editable=False
)
for gen in settings.generate:
if gen.location == "source":
contents = generate_file_contents(gen, metadata)
gen.path.write_text(contents)
settings.sdist.include.append(str(gen.path))
sdist_dir.mkdir(parents=True, exist_ok=True)
with contextlib.ExitStack() as stack:
gzip_container = stack.enter_context(
gzip.GzipFile(sdist_dir / filename, mode="wb", mtime=timestamp)
)
tar = stack.enter_context(
tarfile.TarFile(fileobj=gzip_container, mode="w", format=tarfile.PAX_FORMAT)
)
paths = sorted(
each_unignored_file(
Path(),
include=settings.sdist.include,
exclude=settings.sdist.exclude,
)
)
for filepath in paths:
tar.add(
filepath,
arcname=srcdirname / filepath,
filter=normalize_tar_info if reproducible else lambda x: x,
)
add_bytes_to_tar(tar, pkg_info, f"{srcdirname}/PKG-INFO", reproducible)
return filename | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/build/sdist.py | sdist.py | from __future__ import annotations
import contextlib
import copy
import gzip
import io
import os
import tarfile
from pathlib import Path
from packaging.utils import canonicalize_name
from packaging.version import Version
from .. import __version__
from .._compat import tomllib
from .._logging import rich_print
from ..settings.metadata import get_standard_metadata
from ..settings.skbuild_read_settings import SettingsReader
from ._file_processor import each_unignored_file
from ._init import setup_logging
from .generate import generate_file_contents
from .wheel import _build_wheel_impl
__all__ = ["build_sdist"]
def __dir__() -> list[str]:
return __all__
def get_reproducible_epoch() -> int:
"""
Return an integer representing the integer number of seconds since the Unix epoch.
If the `SOURCE_DATE_EPOCH` environment variable is set, use that value. Otherwise,
always return `1667997441`.
"""
return int(os.environ.get("SOURCE_DATE_EPOCH", "1667997441"))
def normalize_file_permissions(st_mode: int) -> int:
"""
Normalize the permission bits in the st_mode field from stat to 644/755
Popular VCSs only track whether a file is executable or not. The exact
permissions can vary on systems with different umasks. Normalising
to 644 (non executable) or 755 (executable) makes builds more reproducible.
Taken from https://github.com/pypa/flit/blob/6a2a8c6462e49f584941c667b70a6f48a7b3f9ab/flit_core/flit_core/common.py#L257
"""
# Set 644 permissions, leaving higher bits of st_mode unchanged
new_mode = (st_mode | 0o644) & ~0o133
if st_mode & 0o100:
new_mode |= 0o111 # Executable: 644 -> 755
return new_mode
def normalize_tar_info(tar_info: tarfile.TarInfo) -> tarfile.TarInfo:
"""
Normalize the TarInfo associated with a file to improve reproducibility.
Inspired by Hatch
https://github.com/pypa/hatch/blob/573192f88022bb781c698dae2c0b84ef3fb9a7ad/backend/src/hatchling/builders/sdist.py#L51
"""
tar_info = copy.copy(tar_info)
tar_info.uname = ""
tar_info.gname = ""
tar_info.uid = 0
tar_info.gid = 0
tar_info.mode = normalize_file_permissions(tar_info.mode)
tar_info.mtime = get_reproducible_epoch()
return tar_info
def add_bytes_to_tar(
tar: tarfile.TarFile, data: bytes, name: str, normalize: bool
) -> None:
"""
Write ``data`` bytes to ``name`` in a tarfile ``tar``. Normalize the info if
``normalize`` is true.
"""
tarinfo = tarfile.TarInfo(name)
if normalize:
tarinfo = normalize_tar_info(tarinfo)
with io.BytesIO(data) as bio:
tarinfo.size = bio.getbuffer().nbytes
tar.addfile(tarinfo, bio)
def build_sdist(
sdist_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
rich_print(
f"[green]***[/green] [bold][green]scikit-build-core {__version__}[/green]",
"[red](sdist)[/red]",
)
with Path("pyproject.toml").open("rb") as f:
pyproject = tomllib.load(f)
settings_reader = SettingsReader(pyproject, config_settings or {})
settings = settings_reader.settings
setup_logging(settings.logging.level)
settings_reader.validate_may_exit()
sdist_dir = Path(sdist_directory)
reproducible = settings.sdist.reproducible
timestamp = get_reproducible_epoch() if reproducible else None
metadata = get_standard_metadata(pyproject, settings)
# Using deepcopy here because of a bug in pyproject-metadata
# https://github.com/FFY00/python-pyproject-metadata/pull/49
pkg_info = bytes(copy.deepcopy(metadata).as_rfc822())
# Only normalize SDist name if 0.5+ is requested for backwards compat
should_normalize_name = (
settings.minimum_version is None or settings.minimum_version >= Version("0.5")
)
sdist_name = (
canonicalize_name(metadata.name).replace("-", "_")
if should_normalize_name
else metadata.name
)
srcdirname = f"{sdist_name}-{metadata.version}"
filename = f"{srcdirname}.tar.gz"
if settings.sdist.cmake:
_build_wheel_impl(
None, config_settings, None, exit_after_config=True, editable=False
)
for gen in settings.generate:
if gen.location == "source":
contents = generate_file_contents(gen, metadata)
gen.path.write_text(contents)
settings.sdist.include.append(str(gen.path))
sdist_dir.mkdir(parents=True, exist_ok=True)
with contextlib.ExitStack() as stack:
gzip_container = stack.enter_context(
gzip.GzipFile(sdist_dir / filename, mode="wb", mtime=timestamp)
)
tar = stack.enter_context(
tarfile.TarFile(fileobj=gzip_container, mode="w", format=tarfile.PAX_FORMAT)
)
paths = sorted(
each_unignored_file(
Path(),
include=settings.sdist.include,
exclude=settings.sdist.exclude,
)
)
for filepath in paths:
tar.add(
filepath,
arcname=srcdirname / filepath,
filter=normalize_tar_info if reproducible else lambda x: x,
)
add_bytes_to_tar(tar, pkg_info, f"{srcdirname}/PKG-INFO", reproducible)
return filename | 0.68342 | 0.159381 |
from __future__ import annotations
import dataclasses
import os
import shutil
import sys
import sysconfig
import tempfile
from collections.abc import Sequence
from pathlib import Path
from .. import __version__
from .._compat import tomllib
from .._compat.typing import assert_never
from .._logging import logger, rich_print
from .._shutil import fix_win_37_all_permissions
from ..builder.builder import Builder, archs_to_tags, get_archs
from ..builder.wheel_tag import WheelTag
from ..cmake import CMake, CMaker
from ..resources import resources
from ..settings.metadata import get_standard_metadata
from ..settings.skbuild_read_settings import SettingsReader
from ._init import setup_logging
from ._pathutil import (
is_valid_module,
packages_to_file_mapping,
path_to_module,
scantree,
)
from ._scripts import process_script_dir
from ._wheelfile import WheelWriter
from .generate import generate_file_contents
__all__ = ["_build_wheel_impl"]
def __dir__() -> list[str]:
return __all__
def _get_packages(
*,
packages: Sequence[str] | None,
name: str,
) -> list[str]:
if packages is not None:
return list(packages)
# Auto package discovery
packages = []
for base_path in (Path("src"), Path()):
path = base_path / name
if path.is_dir() and (
(path / "__init__.py").is_file() or (path / "__init__.pyi").is_file()
):
logger.info("Discovered Python package at {}", path)
packages += [str(path)]
break
else:
logger.debug("Didn't find a Python package for {}", name)
return packages
@dataclasses.dataclass
class WheelImplReturn:
wheel_filename: str
mapping: dict[str, str] = dataclasses.field(default_factory=dict)
def _build_wheel_impl(
wheel_directory: str | None,
config_settings: dict[str, list[str] | str] | None,
metadata_directory: str | None,
*,
exit_after_config: bool = False,
editable: bool,
) -> WheelImplReturn:
"""
Build a wheel or just prepare metadata (if wheel dir is None). Can be editable.
"""
pyproject_path = Path("pyproject.toml")
with pyproject_path.open("rb") as ft:
pyproject = tomllib.load(ft)
settings_reader = SettingsReader(pyproject, config_settings or {})
settings = settings_reader.settings
setup_logging(settings.logging.level)
settings_reader.validate_may_exit()
metadata = get_standard_metadata(pyproject, settings)
if metadata.version is None:
msg = "project.version is not statically specified, must be present currently"
raise AssertionError(msg)
normalized_name = metadata.name.replace("-", "_").replace(".", "_")
action = "editable" if editable else "wheel"
if wheel_directory is None:
action = f"metadata_{action}"
if exit_after_config:
action = "sdist"
cmake = CMake.default_search(minimum_version=settings.cmake.minimum_version)
rich_print(
f"[green]***[/green] [bold][green]scikit-build-core {__version__}[/green]",
f"using [blue]CMake {cmake.version}[/blue]",
f"[red]({action})[/red]",
)
with tempfile.TemporaryDirectory() as tmpdir, fix_win_37_all_permissions(tmpdir):
build_tmp_folder = Path(tmpdir)
wheel_dir = build_tmp_folder / "wheel"
tags = WheelTag.compute_best(
archs_to_tags(get_archs(os.environ)),
settings.wheel.py_api,
expand_macos=settings.wheel.expand_macos_universal_tags,
)
# A build dir can be specified, otherwise use a temporary directory
build_dir = (
Path(
settings.build_dir.format(
cache_tag=sys.implementation.cache_tag,
wheel_tag=str(tags),
)
)
if settings.build_dir
else build_tmp_folder / "build"
)
logger.info("Build directory: {}", build_dir.resolve())
wheel_dirs = {
"platlib": wheel_dir / "platlib",
"data": wheel_dir / "data",
"headers": wheel_dir / "headers",
"scripts": wheel_dir / "scripts",
"null": wheel_dir / "null",
}
for d in wheel_dirs.values():
d.mkdir(parents=True)
if ".." in settings.wheel.install_dir:
msg = "wheel.install_dir must not contain '..'"
raise AssertionError(msg)
if settings.wheel.install_dir.startswith("/"):
if not settings.experimental:
msg = "Experimental features must be enabled to use absolute paths in wheel.install_dir"
raise AssertionError(msg)
if settings.wheel.install_dir[1:].split("/")[0] not in wheel_dirs:
msg = "Must target a valid wheel directory"
raise AssertionError(msg)
install_dir = wheel_dir / settings.wheel.install_dir[1:]
else:
install_dir = wheel_dirs["platlib"] / settings.wheel.install_dir
license_files = {
x: x.read_bytes()
for y in settings.wheel.license_files
for x in Path().glob(y)
}
if settings.wheel.license_files and not license_files:
logger.warning(
"No license files found, set wheel.license-files to [] to suppress this warning"
)
for gen in settings.generate:
if gen.location == "source":
contents = generate_file_contents(gen, metadata)
gen.path.write_text(contents)
settings.sdist.include.append(str(gen.path))
config = CMaker(
cmake,
source_dir=settings.cmake.source_dir,
build_dir=build_dir,
build_type=settings.cmake.build_type,
)
builder = Builder(
settings=settings,
config=config,
)
if wheel_directory is None and not exit_after_config:
if metadata_directory is None:
msg = "metadata_directory must be specified if wheel_directory is None"
raise AssertionError(msg)
wheel = WheelWriter(
metadata,
Path(metadata_directory),
tags.as_tags_set(),
license_files=license_files,
)
dist_info_contents = wheel.dist_info_contents()
dist_info = Path(metadata_directory) / f"{wheel.name_ver}.dist-info"
dist_info.mkdir(parents=True)
for key, data in dist_info_contents.items():
path = dist_info / key
if not path.parent.is_dir():
path.parent.mkdir(exist_ok=True, parents=True)
path.write_bytes(data)
return WheelImplReturn(wheel_filename=dist_info.name)
for gen in settings.generate:
contents = generate_file_contents(gen, metadata)
if gen.location == "source":
continue
if gen.location == "build":
path = build_dir / gen.path
elif gen.location == "install":
path = install_dir / gen.path
else:
assert_never(gen.location)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(contents, encoding="utf-8")
rich_print("[green]***[/green] [bold]Configuring CMake...")
defines: dict[str, str] = {}
cache_entries: dict[str, str | Path] = {
f"SKBUILD_{k.upper()}_DIR": v for k, v in wheel_dirs.items()
}
cache_entries["SKBUILD_STATE"] = action
builder.configure(
defines=defines,
cache_entries=cache_entries,
name=metadata.name,
version=metadata.version,
)
if exit_after_config:
return WheelImplReturn("")
assert wheel_directory is not None
generator = builder.config.env.get(
"CMAKE_GENERATOR",
"MSVC"
if sysconfig.get_platform().startswith("win")
else "Default generator",
)
rich_print(
f"[green]***[/green] [bold]Building project with [blue]{generator}[/blue]..."
)
build_args: list[str] = []
builder.build(build_args=build_args)
rich_print("[green]***[/green] [bold]Installing project into wheel...")
builder.install(install_dir)
rich_print(f"[green]***[/green] [bold]Making {action}...")
packages = _get_packages(
packages=settings.wheel.packages,
name=normalized_name,
)
mapping = packages_to_file_mapping(
packages=packages,
platlib_dir=wheel_dirs["platlib"],
include=settings.sdist.include,
exclude=settings.sdist.exclude,
)
if not editable:
for filepath, package_dir in mapping.items():
Path(package_dir).parent.mkdir(exist_ok=True, parents=True)
shutil.copyfile(filepath, package_dir)
process_script_dir(wheel_dirs["scripts"])
with WheelWriter(
metadata,
Path(wheel_directory),
tags.as_tags_set(),
license_files=license_files,
) as wheel:
wheel.build(wheel_dirs)
if editable:
modules = {
path_to_module(Path(v).relative_to(wheel_dirs["platlib"])): str(
Path(k).resolve()
)
for k, v in mapping.items()
if is_valid_module(Path(v).relative_to(wheel_dirs["platlib"]))
}
installed = {
path_to_module(v.relative_to(wheel_dirs["platlib"])): str(
v.relative_to(wheel_dirs["platlib"])
)
for v in scantree(wheel_dirs["platlib"])
}
editable_py = resources / "_editable_redirect.py"
editable_txt = editable_py.read_text(encoding="utf-8")
reload_dir = (
os.fspath(build_dir.resolve()) if settings.build_dir else None
)
options = []
if not builder.config.single_config and builder.config.build_type:
options += ["--config", builder.config.build_type]
ext_build_opts = ["-v"] if builder.settings.cmake.verbose else []
arguments = (
modules,
installed,
reload_dir,
settings.editable.rebuild,
settings.editable.verbose,
options + ext_build_opts,
options,
)
arguments_str = ", ".join(repr(x) for x in arguments)
editable_txt += f"\n\ninstall({arguments_str})\n"
wheel.writestr(
f"_{normalized_name}_editable.py",
editable_txt.encode("utf-8"),
)
wheel.writestr(
f"_{normalized_name}_editable.pth",
f"import _{normalized_name}_editable\n".encode(),
)
if metadata_directory is not None:
dist_info_contents = wheel.dist_info_contents()
dist_info = Path(metadata_directory)
for key, data in dist_info_contents.items():
path = dist_info / key
prevous_data = path.read_bytes()
if prevous_data != data:
msg = f"Metadata mismatch in {key}"
logger.error("{}: {!r} != {!r}", msg, prevous_data, data)
raise AssertionError(msg)
wheel_filename: str = wheel.wheelpath.name
rich_print(f"[green]***[/green] [bold]Created[/bold] {wheel_filename}...")
return WheelImplReturn(wheel_filename=wheel_filename, mapping=mapping) | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/build/wheel.py | wheel.py | from __future__ import annotations
import dataclasses
import os
import shutil
import sys
import sysconfig
import tempfile
from collections.abc import Sequence
from pathlib import Path
from .. import __version__
from .._compat import tomllib
from .._compat.typing import assert_never
from .._logging import logger, rich_print
from .._shutil import fix_win_37_all_permissions
from ..builder.builder import Builder, archs_to_tags, get_archs
from ..builder.wheel_tag import WheelTag
from ..cmake import CMake, CMaker
from ..resources import resources
from ..settings.metadata import get_standard_metadata
from ..settings.skbuild_read_settings import SettingsReader
from ._init import setup_logging
from ._pathutil import (
is_valid_module,
packages_to_file_mapping,
path_to_module,
scantree,
)
from ._scripts import process_script_dir
from ._wheelfile import WheelWriter
from .generate import generate_file_contents
__all__ = ["_build_wheel_impl"]
def __dir__() -> list[str]:
return __all__
def _get_packages(
*,
packages: Sequence[str] | None,
name: str,
) -> list[str]:
if packages is not None:
return list(packages)
# Auto package discovery
packages = []
for base_path in (Path("src"), Path()):
path = base_path / name
if path.is_dir() and (
(path / "__init__.py").is_file() or (path / "__init__.pyi").is_file()
):
logger.info("Discovered Python package at {}", path)
packages += [str(path)]
break
else:
logger.debug("Didn't find a Python package for {}", name)
return packages
@dataclasses.dataclass
class WheelImplReturn:
wheel_filename: str
mapping: dict[str, str] = dataclasses.field(default_factory=dict)
def _build_wheel_impl(
wheel_directory: str | None,
config_settings: dict[str, list[str] | str] | None,
metadata_directory: str | None,
*,
exit_after_config: bool = False,
editable: bool,
) -> WheelImplReturn:
"""
Build a wheel or just prepare metadata (if wheel dir is None). Can be editable.
"""
pyproject_path = Path("pyproject.toml")
with pyproject_path.open("rb") as ft:
pyproject = tomllib.load(ft)
settings_reader = SettingsReader(pyproject, config_settings or {})
settings = settings_reader.settings
setup_logging(settings.logging.level)
settings_reader.validate_may_exit()
metadata = get_standard_metadata(pyproject, settings)
if metadata.version is None:
msg = "project.version is not statically specified, must be present currently"
raise AssertionError(msg)
normalized_name = metadata.name.replace("-", "_").replace(".", "_")
action = "editable" if editable else "wheel"
if wheel_directory is None:
action = f"metadata_{action}"
if exit_after_config:
action = "sdist"
cmake = CMake.default_search(minimum_version=settings.cmake.minimum_version)
rich_print(
f"[green]***[/green] [bold][green]scikit-build-core {__version__}[/green]",
f"using [blue]CMake {cmake.version}[/blue]",
f"[red]({action})[/red]",
)
with tempfile.TemporaryDirectory() as tmpdir, fix_win_37_all_permissions(tmpdir):
build_tmp_folder = Path(tmpdir)
wheel_dir = build_tmp_folder / "wheel"
tags = WheelTag.compute_best(
archs_to_tags(get_archs(os.environ)),
settings.wheel.py_api,
expand_macos=settings.wheel.expand_macos_universal_tags,
)
# A build dir can be specified, otherwise use a temporary directory
build_dir = (
Path(
settings.build_dir.format(
cache_tag=sys.implementation.cache_tag,
wheel_tag=str(tags),
)
)
if settings.build_dir
else build_tmp_folder / "build"
)
logger.info("Build directory: {}", build_dir.resolve())
wheel_dirs = {
"platlib": wheel_dir / "platlib",
"data": wheel_dir / "data",
"headers": wheel_dir / "headers",
"scripts": wheel_dir / "scripts",
"null": wheel_dir / "null",
}
for d in wheel_dirs.values():
d.mkdir(parents=True)
if ".." in settings.wheel.install_dir:
msg = "wheel.install_dir must not contain '..'"
raise AssertionError(msg)
if settings.wheel.install_dir.startswith("/"):
if not settings.experimental:
msg = "Experimental features must be enabled to use absolute paths in wheel.install_dir"
raise AssertionError(msg)
if settings.wheel.install_dir[1:].split("/")[0] not in wheel_dirs:
msg = "Must target a valid wheel directory"
raise AssertionError(msg)
install_dir = wheel_dir / settings.wheel.install_dir[1:]
else:
install_dir = wheel_dirs["platlib"] / settings.wheel.install_dir
license_files = {
x: x.read_bytes()
for y in settings.wheel.license_files
for x in Path().glob(y)
}
if settings.wheel.license_files and not license_files:
logger.warning(
"No license files found, set wheel.license-files to [] to suppress this warning"
)
for gen in settings.generate:
if gen.location == "source":
contents = generate_file_contents(gen, metadata)
gen.path.write_text(contents)
settings.sdist.include.append(str(gen.path))
config = CMaker(
cmake,
source_dir=settings.cmake.source_dir,
build_dir=build_dir,
build_type=settings.cmake.build_type,
)
builder = Builder(
settings=settings,
config=config,
)
if wheel_directory is None and not exit_after_config:
if metadata_directory is None:
msg = "metadata_directory must be specified if wheel_directory is None"
raise AssertionError(msg)
wheel = WheelWriter(
metadata,
Path(metadata_directory),
tags.as_tags_set(),
license_files=license_files,
)
dist_info_contents = wheel.dist_info_contents()
dist_info = Path(metadata_directory) / f"{wheel.name_ver}.dist-info"
dist_info.mkdir(parents=True)
for key, data in dist_info_contents.items():
path = dist_info / key
if not path.parent.is_dir():
path.parent.mkdir(exist_ok=True, parents=True)
path.write_bytes(data)
return WheelImplReturn(wheel_filename=dist_info.name)
for gen in settings.generate:
contents = generate_file_contents(gen, metadata)
if gen.location == "source":
continue
if gen.location == "build":
path = build_dir / gen.path
elif gen.location == "install":
path = install_dir / gen.path
else:
assert_never(gen.location)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(contents, encoding="utf-8")
rich_print("[green]***[/green] [bold]Configuring CMake...")
defines: dict[str, str] = {}
cache_entries: dict[str, str | Path] = {
f"SKBUILD_{k.upper()}_DIR": v for k, v in wheel_dirs.items()
}
cache_entries["SKBUILD_STATE"] = action
builder.configure(
defines=defines,
cache_entries=cache_entries,
name=metadata.name,
version=metadata.version,
)
if exit_after_config:
return WheelImplReturn("")
assert wheel_directory is not None
generator = builder.config.env.get(
"CMAKE_GENERATOR",
"MSVC"
if sysconfig.get_platform().startswith("win")
else "Default generator",
)
rich_print(
f"[green]***[/green] [bold]Building project with [blue]{generator}[/blue]..."
)
build_args: list[str] = []
builder.build(build_args=build_args)
rich_print("[green]***[/green] [bold]Installing project into wheel...")
builder.install(install_dir)
rich_print(f"[green]***[/green] [bold]Making {action}...")
packages = _get_packages(
packages=settings.wheel.packages,
name=normalized_name,
)
mapping = packages_to_file_mapping(
packages=packages,
platlib_dir=wheel_dirs["platlib"],
include=settings.sdist.include,
exclude=settings.sdist.exclude,
)
if not editable:
for filepath, package_dir in mapping.items():
Path(package_dir).parent.mkdir(exist_ok=True, parents=True)
shutil.copyfile(filepath, package_dir)
process_script_dir(wheel_dirs["scripts"])
with WheelWriter(
metadata,
Path(wheel_directory),
tags.as_tags_set(),
license_files=license_files,
) as wheel:
wheel.build(wheel_dirs)
if editable:
modules = {
path_to_module(Path(v).relative_to(wheel_dirs["platlib"])): str(
Path(k).resolve()
)
for k, v in mapping.items()
if is_valid_module(Path(v).relative_to(wheel_dirs["platlib"]))
}
installed = {
path_to_module(v.relative_to(wheel_dirs["platlib"])): str(
v.relative_to(wheel_dirs["platlib"])
)
for v in scantree(wheel_dirs["platlib"])
}
editable_py = resources / "_editable_redirect.py"
editable_txt = editable_py.read_text(encoding="utf-8")
reload_dir = (
os.fspath(build_dir.resolve()) if settings.build_dir else None
)
options = []
if not builder.config.single_config and builder.config.build_type:
options += ["--config", builder.config.build_type]
ext_build_opts = ["-v"] if builder.settings.cmake.verbose else []
arguments = (
modules,
installed,
reload_dir,
settings.editable.rebuild,
settings.editable.verbose,
options + ext_build_opts,
options,
)
arguments_str = ", ".join(repr(x) for x in arguments)
editable_txt += f"\n\ninstall({arguments_str})\n"
wheel.writestr(
f"_{normalized_name}_editable.py",
editable_txt.encode("utf-8"),
)
wheel.writestr(
f"_{normalized_name}_editable.pth",
f"import _{normalized_name}_editable\n".encode(),
)
if metadata_directory is not None:
dist_info_contents = wheel.dist_info_contents()
dist_info = Path(metadata_directory)
for key, data in dist_info_contents.items():
path = dist_info / key
prevous_data = path.read_bytes()
if prevous_data != data:
msg = f"Metadata mismatch in {key}"
logger.error("{}: {!r} != {!r}", msg, prevous_data, data)
raise AssertionError(msg)
wheel_filename: str = wheel.wheelpath.name
rich_print(f"[green]***[/green] [bold]Created[/bold] {wheel_filename}...")
return WheelImplReturn(wheel_filename=wheel_filename, mapping=mapping) | 0.475118 | 0.10325 |
from __future__ import annotations
import sys
__all__ = [
"build_sdist",
"build_wheel",
"get_requires_for_build_sdist",
"get_requires_for_build_wheel",
"prepare_metadata_for_build_wheel",
"build_editable",
"get_requires_for_build_editable",
"prepare_metadata_for_build_editable",
]
def build_wheel(
wheel_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
metadata_directory: str | None = None,
) -> str:
from .._logging import rich_print
from ..errors import FailedLiveProcessError
from .wheel import _build_wheel_impl
try:
return _build_wheel_impl(
wheel_directory,
config_settings,
metadata_directory,
editable=False,
).wheel_filename
except FailedLiveProcessError as err:
sys.stdout.flush()
rich_print(f"\n[red bold]*** {' '.join(err.args)}", file=sys.stderr)
raise SystemExit(1) from None
def build_editable(
wheel_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
metadata_directory: str | None = None,
) -> str:
from .._logging import rich_print
from ..errors import FailedLiveProcessError
from .wheel import _build_wheel_impl
try:
return _build_wheel_impl(
wheel_directory,
config_settings,
metadata_directory,
editable=True,
).wheel_filename
except FailedLiveProcessError as err:
sys.stdout.flush()
rich_print(f"\n[red bold]*** {' '.join(err.args)}", file=sys.stderr)
raise SystemExit(1) from None
def prepare_metadata_for_build_wheel(
metadata_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
"""Prepare metadata for building a wheel. Does not build the wheel. Returns the dist-info directory."""
from .wheel import _build_wheel_impl
return _build_wheel_impl(
None, config_settings, metadata_directory, editable=False
).wheel_filename # actually returns the dist-info directory
def prepare_metadata_for_build_editable(
metadata_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
"""Prepare metadata for building a wheel. Does not build the wheel. Returns the dist-info directory."""
from .wheel import _build_wheel_impl
return _build_wheel_impl(
None, config_settings, metadata_directory, editable=True
).wheel_filename # actually returns the dist-info directory
def build_sdist(
sdist_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
from .sdist import build_sdist as skbuild_build_sdist
return skbuild_build_sdist(sdist_directory, config_settings)
def get_requires_for_build_sdist(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
# These are only injected if cmake is required for the SDist step
cmake_requires = (
[*requires.cmake(), *requires.ninja()] if requires.settings.sdist.cmake else []
)
return [
"pathspec",
"pyproject_metadata",
*cmake_requires,
*requires.dynamic_metadata(),
]
def get_requires_for_build_wheel(
config_settings: dict[str, str | list[str]] | None = None,
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
return [
"pathspec",
"pyproject_metadata",
*requires.cmake(),
*requires.ninja(),
*requires.dynamic_metadata(),
]
def get_requires_for_build_editable(
config_settings: dict[str, str | list[str]] | None = None,
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
return [
"pathspec",
"pyproject_metadata",
*requires.cmake(),
*requires.ninja(),
*requires.dynamic_metadata(),
] | scikit-build-core | /scikit_build_core-0.5.0-py3-none-any.whl/scikit_build_core/build/__init__.py | __init__.py | from __future__ import annotations
import sys
__all__ = [
"build_sdist",
"build_wheel",
"get_requires_for_build_sdist",
"get_requires_for_build_wheel",
"prepare_metadata_for_build_wheel",
"build_editable",
"get_requires_for_build_editable",
"prepare_metadata_for_build_editable",
]
def build_wheel(
wheel_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
metadata_directory: str | None = None,
) -> str:
from .._logging import rich_print
from ..errors import FailedLiveProcessError
from .wheel import _build_wheel_impl
try:
return _build_wheel_impl(
wheel_directory,
config_settings,
metadata_directory,
editable=False,
).wheel_filename
except FailedLiveProcessError as err:
sys.stdout.flush()
rich_print(f"\n[red bold]*** {' '.join(err.args)}", file=sys.stderr)
raise SystemExit(1) from None
def build_editable(
wheel_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
metadata_directory: str | None = None,
) -> str:
from .._logging import rich_print
from ..errors import FailedLiveProcessError
from .wheel import _build_wheel_impl
try:
return _build_wheel_impl(
wheel_directory,
config_settings,
metadata_directory,
editable=True,
).wheel_filename
except FailedLiveProcessError as err:
sys.stdout.flush()
rich_print(f"\n[red bold]*** {' '.join(err.args)}", file=sys.stderr)
raise SystemExit(1) from None
def prepare_metadata_for_build_wheel(
metadata_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
"""Prepare metadata for building a wheel. Does not build the wheel. Returns the dist-info directory."""
from .wheel import _build_wheel_impl
return _build_wheel_impl(
None, config_settings, metadata_directory, editable=False
).wheel_filename # actually returns the dist-info directory
def prepare_metadata_for_build_editable(
metadata_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
"""Prepare metadata for building a wheel. Does not build the wheel. Returns the dist-info directory."""
from .wheel import _build_wheel_impl
return _build_wheel_impl(
None, config_settings, metadata_directory, editable=True
).wheel_filename # actually returns the dist-info directory
def build_sdist(
sdist_directory: str,
config_settings: dict[str, list[str] | str] | None = None,
) -> str:
from .sdist import build_sdist as skbuild_build_sdist
return skbuild_build_sdist(sdist_directory, config_settings)
def get_requires_for_build_sdist(
config_settings: dict[str, str | list[str]] | None = None
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
# These are only injected if cmake is required for the SDist step
cmake_requires = (
[*requires.cmake(), *requires.ninja()] if requires.settings.sdist.cmake else []
)
return [
"pathspec",
"pyproject_metadata",
*cmake_requires,
*requires.dynamic_metadata(),
]
def get_requires_for_build_wheel(
config_settings: dict[str, str | list[str]] | None = None,
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
return [
"pathspec",
"pyproject_metadata",
*requires.cmake(),
*requires.ninja(),
*requires.dynamic_metadata(),
]
def get_requires_for_build_editable(
config_settings: dict[str, str | list[str]] | None = None,
) -> list[str]:
from ..builder.get_requires import GetRequires
requires = GetRequires(config_settings)
return [
"pathspec",
"pyproject_metadata",
*requires.cmake(),
*requires.ninja(),
*requires.dynamic_metadata(),
] | 0.559531 | 0.138491 |
from __future__ import annotations
import os
import shutil
import subprocess
import textwrap
from typing import Iterable, Mapping
from ..constants import CMAKE_DEFAULT_EXECUTABLE
from ..exceptions import SKBuildGeneratorNotFoundError
from ..utils import push_dir
test_folder = "_cmake_test_compile"
class CMakePlatform:
"""This class encapsulates the logic allowing to get the identifier of a
working CMake generator.
Derived class should at least set :attr:`default_generators`.
"""
def __init__(self) -> None:
# default_generators is a property for mocking in tests
self._default_generators: list[CMakeGenerator] = []
self.architecture: str | None = None
@property
def default_generators(self) -> list[CMakeGenerator]:
"""List of generators considered by :func:`get_best_generator()`."""
return self._default_generators
@default_generators.setter
def default_generators(self, generators: list[CMakeGenerator]) -> None:
self._default_generators = generators
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
raise NotImplementedError() # pragma: no cover
@staticmethod
def write_test_cmakelist(languages: Iterable[str]) -> None:
"""Write a minimal ``CMakeLists.txt`` useful to check if the
requested ``languages`` are supported."""
if not os.path.exists(test_folder):
os.makedirs(test_folder)
with open(f"{test_folder}/CMakeLists.txt", "w", encoding="utf-8") as f:
f.write("cmake_minimum_required(VERSION 2.8.12)\n")
f.write("PROJECT(compiler_test NONE)\n")
for language in languages:
f.write(f"ENABLE_LANGUAGE({language:s})\n")
f.write(
'if("${_SKBUILD_FORCE_MSVC}")\n'
' math(EXPR FORCE_MAX "${_SKBUILD_FORCE_MSVC}+9")\n'
' math(EXPR FORCE_MIN "${_SKBUILD_FORCE_MSVC}")\n'
" if(NOT MSVC)\n"
' message(FATAL_ERROR "MSVC is required to pass this check.")\n'
" elseif(MSVC_VERSION LESS FORCE_MIN OR MSVC_VERSION GREATER FORCE_MAX)\n"
' message(FATAL_ERROR "MSVC ${MSVC_VERSION} does pass this check.")\n'
" endif()\n"
"endif()\n"
)
@staticmethod
def cleanup_test() -> None:
"""Delete test project directory."""
if os.path.exists(test_folder):
shutil.rmtree(test_folder)
def get_generator(self, generator_name: str) -> CMakeGenerator:
"""Loop over generators and return the first that matches the given
name.
"""
for default_generator in self.default_generators:
if default_generator.name == generator_name:
return default_generator
return CMakeGenerator(generator_name)
def get_generators(self, generator_name: str) -> list[CMakeGenerator]:
"""Loop over generators and return all that match the given name."""
return [
default_generator
for default_generator in self.default_generators
if default_generator.name == generator_name
]
# TODO: this method name is not great. Does anyone have a better idea for
# renaming it?
def get_best_generator(
self,
generator_name: str | None = None,
skip_generator_test: bool = False,
languages: Iterable[str] = ("CXX", "C"),
cleanup: bool = True,
cmake_executable: str = CMAKE_DEFAULT_EXECUTABLE,
cmake_args: Iterable[str] = (),
architecture: str | None = None,
) -> CMakeGenerator:
"""Loop over generators to find one that works by configuring
and compiling a test project.
:param generator_name: If provided, uses only provided generator, \
instead of trying :attr:`default_generators`.
:type generator_name: str | None
:param skip_generator_test: If set to True and if a generator name is \
specified, the generator test is skipped. If no generator_name is specified \
and the option is set to True, the first available generator is used.
:type skip_generator_test: bool
:param languages: The languages you'll need for your project, in terms \
that CMake recognizes.
:type languages: tuple
:param cleanup: If True, cleans up temporary folder used to test \
generators. Set to False for debugging to see CMake's output files.
:type cleanup: bool
:param cmake_executable: Path to CMake executable used to configure \
and build the test project used to evaluate if a generator is working.
:type cmake_executable: str
:param cmake_args: List of CMake arguments to use when configuring \
the test project. Only arguments starting with ``-DCMAKE_`` are \
used.
:type cmake_args: tuple
:return: CMake Generator object
:rtype: :class:`CMakeGenerator` or None
:raises skbuild.exceptions.SKBuildGeneratorNotFoundError:
"""
candidate_generators: list[CMakeGenerator] = []
if generator_name is None:
candidate_generators = self.default_generators
else:
# Lookup CMakeGenerator by name. Doing this allow to get a
# generator object with its ``env`` property appropriately
# initialized.
# MSVC should be used in "-A arch" form
if architecture is not None:
self.architecture = architecture
# Support classic names for generators
generator_name, self.architecture = _parse_legacy_generator_name(generator_name, self.architecture)
candidate_generators = []
for default_generator in self.default_generators:
if default_generator.name == generator_name:
candidate_generators.append(default_generator)
if not candidate_generators:
candidate_generators = [CMakeGenerator(generator_name)]
self.write_test_cmakelist(languages)
working_generator: CMakeGenerator | None
if skip_generator_test:
working_generator = candidate_generators[0]
else:
working_generator = self.compile_test_cmakelist(cmake_executable, candidate_generators, cmake_args)
if working_generator is None:
line = "*" * 80
installation_help = self.generator_installation_help
msg = textwrap.dedent(
f"""\
{line}
scikit-build could not get a working generator for your system. Aborting build.
{installation_help}
{line}"""
)
raise SKBuildGeneratorNotFoundError(msg)
if cleanup:
CMakePlatform.cleanup_test()
return working_generator
@staticmethod
@push_dir(directory=test_folder)
def compile_test_cmakelist(
cmake_exe_path: str, candidate_generators: Iterable[CMakeGenerator], cmake_args: Iterable[str] = ()
) -> CMakeGenerator | None:
"""Attempt to configure the test project with
each :class:`CMakeGenerator` from ``candidate_generators``.
Only cmake arguments starting with ``-DCMAKE_`` are used to configure
the test project.
The function returns the first generator allowing to successfully
configure the test project using ``cmake_exe_path``."""
# working generator is the first generator we find that works.
working_generator = None
# Include only -DCMAKE_* arguments
cmake_args = [arg for arg in cmake_args if arg.startswith("-DCMAKE_")]
# Do not complain about unused CMake arguments
cmake_args.insert(0, "--no-warn-unused-cli")
def _generator_discovery_status_msg(_generator: CMakeGenerator, suffix: str = "") -> None:
outer = "-" * 80
inner = ["-" * ((idx * 5) - 3) for idx in range(1, 8)]
print("\n".join(inner) if suffix else outer)
print(f"-- Trying {_generator.description!r} generator{suffix}")
print(outer if suffix else "\n".join(inner[::-1]), flush=True)
for generator in candidate_generators:
print("\n", flush=True)
_generator_discovery_status_msg(generator)
# clear the cache for each attempted generator type
if os.path.isdir("build"):
shutil.rmtree("build")
with push_dir("build", make_directory=True):
# call cmake to see if the compiler specified by this
# generator works for the specified languages
cmd = [cmake_exe_path, "../", "-G", generator.name]
if generator.toolset:
cmd.extend(["-T", generator.toolset])
if generator.architecture and "Visual Studio" in generator.name:
cmd.extend(["-A", generator.architecture])
cmd.extend(cmake_args)
cmd.extend(generator.args)
status = subprocess.run(cmd, env=generator.env, check=False).returncode
msg = "success" if status == 0 else "failure"
_generator_discovery_status_msg(generator, f" - {msg}")
print(flush=True)
# cmake succeeded, this generator should work
if status == 0:
# we have a working generator, don't bother looking for more
working_generator = generator
break
return working_generator
class CMakeGenerator:
"""Represents a CMake generator.
.. automethod:: __init__
"""
def __init__(
self,
name: str,
env: Mapping[str, str] | None = None,
toolset: str | None = None,
arch: str | None = None,
args: Iterable[str] | None = None,
) -> None:
"""Instantiate a generator object with the given ``name``.
By default, ``os.environ`` is associated with the generator. Dictionary
passed as ``env`` parameter will be merged with ``os.environ``. If an
environment variable is set in both ``os.environ`` and ``env``, the
variable in ``env`` is used.
Some CMake generators support a ``toolset`` specification to tell the native
build system how to choose a compiler. You can also include CMake arguments.
"""
self._generator_name = name
self.args = list(args or [])
self.env = dict(list(os.environ.items()) + list(env.items() if env else []))
self._generator_toolset = toolset
self._generator_architecture = arch
description_arch = name if arch is None else f"{name} {arch}"
if toolset is None:
self._description = description_arch
else:
self._description = f"{description_arch} {toolset}"
@property
def name(self) -> str:
"""Name of CMake generator."""
return self._generator_name
@property
def toolset(self) -> str | None:
"""Toolset specification associated with the CMake generator."""
return self._generator_toolset
@property
def architecture(self) -> str | None:
"""Architecture associated with the CMake generator."""
return self._generator_architecture
@property
def description(self) -> str:
"""Name of CMake generator with properties describing the environment (e.g toolset)"""
return self._description
def _parse_legacy_generator_name(generator_name: str, arch: str | None) -> tuple[str, str | None]:
"""
Support classic names for MSVC generators. Architecture is stripped from
the name and "arch" is replaced with the arch string if a legacy name is
given.
"""
if generator_name.startswith("Visual Studio"):
if generator_name.endswith(" Win64"):
arch = "x64"
generator_name = generator_name[:-6]
elif generator_name.endswith(" ARM"):
arch = "ARM"
generator_name = generator_name[:-4]
return generator_name, arch | scikit-build | /scikit_build-0.17.6-py3-none-any.whl/skbuild/platform_specifics/abstract.py | abstract.py | from __future__ import annotations
import os
import shutil
import subprocess
import textwrap
from typing import Iterable, Mapping
from ..constants import CMAKE_DEFAULT_EXECUTABLE
from ..exceptions import SKBuildGeneratorNotFoundError
from ..utils import push_dir
test_folder = "_cmake_test_compile"
class CMakePlatform:
"""This class encapsulates the logic allowing to get the identifier of a
working CMake generator.
Derived class should at least set :attr:`default_generators`.
"""
def __init__(self) -> None:
# default_generators is a property for mocking in tests
self._default_generators: list[CMakeGenerator] = []
self.architecture: str | None = None
@property
def default_generators(self) -> list[CMakeGenerator]:
"""List of generators considered by :func:`get_best_generator()`."""
return self._default_generators
@default_generators.setter
def default_generators(self, generators: list[CMakeGenerator]) -> None:
self._default_generators = generators
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
raise NotImplementedError() # pragma: no cover
@staticmethod
def write_test_cmakelist(languages: Iterable[str]) -> None:
"""Write a minimal ``CMakeLists.txt`` useful to check if the
requested ``languages`` are supported."""
if not os.path.exists(test_folder):
os.makedirs(test_folder)
with open(f"{test_folder}/CMakeLists.txt", "w", encoding="utf-8") as f:
f.write("cmake_minimum_required(VERSION 2.8.12)\n")
f.write("PROJECT(compiler_test NONE)\n")
for language in languages:
f.write(f"ENABLE_LANGUAGE({language:s})\n")
f.write(
'if("${_SKBUILD_FORCE_MSVC}")\n'
' math(EXPR FORCE_MAX "${_SKBUILD_FORCE_MSVC}+9")\n'
' math(EXPR FORCE_MIN "${_SKBUILD_FORCE_MSVC}")\n'
" if(NOT MSVC)\n"
' message(FATAL_ERROR "MSVC is required to pass this check.")\n'
" elseif(MSVC_VERSION LESS FORCE_MIN OR MSVC_VERSION GREATER FORCE_MAX)\n"
' message(FATAL_ERROR "MSVC ${MSVC_VERSION} does pass this check.")\n'
" endif()\n"
"endif()\n"
)
@staticmethod
def cleanup_test() -> None:
"""Delete test project directory."""
if os.path.exists(test_folder):
shutil.rmtree(test_folder)
def get_generator(self, generator_name: str) -> CMakeGenerator:
"""Loop over generators and return the first that matches the given
name.
"""
for default_generator in self.default_generators:
if default_generator.name == generator_name:
return default_generator
return CMakeGenerator(generator_name)
def get_generators(self, generator_name: str) -> list[CMakeGenerator]:
"""Loop over generators and return all that match the given name."""
return [
default_generator
for default_generator in self.default_generators
if default_generator.name == generator_name
]
# TODO: this method name is not great. Does anyone have a better idea for
# renaming it?
def get_best_generator(
self,
generator_name: str | None = None,
skip_generator_test: bool = False,
languages: Iterable[str] = ("CXX", "C"),
cleanup: bool = True,
cmake_executable: str = CMAKE_DEFAULT_EXECUTABLE,
cmake_args: Iterable[str] = (),
architecture: str | None = None,
) -> CMakeGenerator:
"""Loop over generators to find one that works by configuring
and compiling a test project.
:param generator_name: If provided, uses only provided generator, \
instead of trying :attr:`default_generators`.
:type generator_name: str | None
:param skip_generator_test: If set to True and if a generator name is \
specified, the generator test is skipped. If no generator_name is specified \
and the option is set to True, the first available generator is used.
:type skip_generator_test: bool
:param languages: The languages you'll need for your project, in terms \
that CMake recognizes.
:type languages: tuple
:param cleanup: If True, cleans up temporary folder used to test \
generators. Set to False for debugging to see CMake's output files.
:type cleanup: bool
:param cmake_executable: Path to CMake executable used to configure \
and build the test project used to evaluate if a generator is working.
:type cmake_executable: str
:param cmake_args: List of CMake arguments to use when configuring \
the test project. Only arguments starting with ``-DCMAKE_`` are \
used.
:type cmake_args: tuple
:return: CMake Generator object
:rtype: :class:`CMakeGenerator` or None
:raises skbuild.exceptions.SKBuildGeneratorNotFoundError:
"""
candidate_generators: list[CMakeGenerator] = []
if generator_name is None:
candidate_generators = self.default_generators
else:
# Lookup CMakeGenerator by name. Doing this allow to get a
# generator object with its ``env`` property appropriately
# initialized.
# MSVC should be used in "-A arch" form
if architecture is not None:
self.architecture = architecture
# Support classic names for generators
generator_name, self.architecture = _parse_legacy_generator_name(generator_name, self.architecture)
candidate_generators = []
for default_generator in self.default_generators:
if default_generator.name == generator_name:
candidate_generators.append(default_generator)
if not candidate_generators:
candidate_generators = [CMakeGenerator(generator_name)]
self.write_test_cmakelist(languages)
working_generator: CMakeGenerator | None
if skip_generator_test:
working_generator = candidate_generators[0]
else:
working_generator = self.compile_test_cmakelist(cmake_executable, candidate_generators, cmake_args)
if working_generator is None:
line = "*" * 80
installation_help = self.generator_installation_help
msg = textwrap.dedent(
f"""\
{line}
scikit-build could not get a working generator for your system. Aborting build.
{installation_help}
{line}"""
)
raise SKBuildGeneratorNotFoundError(msg)
if cleanup:
CMakePlatform.cleanup_test()
return working_generator
@staticmethod
@push_dir(directory=test_folder)
def compile_test_cmakelist(
cmake_exe_path: str, candidate_generators: Iterable[CMakeGenerator], cmake_args: Iterable[str] = ()
) -> CMakeGenerator | None:
"""Attempt to configure the test project with
each :class:`CMakeGenerator` from ``candidate_generators``.
Only cmake arguments starting with ``-DCMAKE_`` are used to configure
the test project.
The function returns the first generator allowing to successfully
configure the test project using ``cmake_exe_path``."""
# working generator is the first generator we find that works.
working_generator = None
# Include only -DCMAKE_* arguments
cmake_args = [arg for arg in cmake_args if arg.startswith("-DCMAKE_")]
# Do not complain about unused CMake arguments
cmake_args.insert(0, "--no-warn-unused-cli")
def _generator_discovery_status_msg(_generator: CMakeGenerator, suffix: str = "") -> None:
outer = "-" * 80
inner = ["-" * ((idx * 5) - 3) for idx in range(1, 8)]
print("\n".join(inner) if suffix else outer)
print(f"-- Trying {_generator.description!r} generator{suffix}")
print(outer if suffix else "\n".join(inner[::-1]), flush=True)
for generator in candidate_generators:
print("\n", flush=True)
_generator_discovery_status_msg(generator)
# clear the cache for each attempted generator type
if os.path.isdir("build"):
shutil.rmtree("build")
with push_dir("build", make_directory=True):
# call cmake to see if the compiler specified by this
# generator works for the specified languages
cmd = [cmake_exe_path, "../", "-G", generator.name]
if generator.toolset:
cmd.extend(["-T", generator.toolset])
if generator.architecture and "Visual Studio" in generator.name:
cmd.extend(["-A", generator.architecture])
cmd.extend(cmake_args)
cmd.extend(generator.args)
status = subprocess.run(cmd, env=generator.env, check=False).returncode
msg = "success" if status == 0 else "failure"
_generator_discovery_status_msg(generator, f" - {msg}")
print(flush=True)
# cmake succeeded, this generator should work
if status == 0:
# we have a working generator, don't bother looking for more
working_generator = generator
break
return working_generator
class CMakeGenerator:
"""Represents a CMake generator.
.. automethod:: __init__
"""
def __init__(
self,
name: str,
env: Mapping[str, str] | None = None,
toolset: str | None = None,
arch: str | None = None,
args: Iterable[str] | None = None,
) -> None:
"""Instantiate a generator object with the given ``name``.
By default, ``os.environ`` is associated with the generator. Dictionary
passed as ``env`` parameter will be merged with ``os.environ``. If an
environment variable is set in both ``os.environ`` and ``env``, the
variable in ``env`` is used.
Some CMake generators support a ``toolset`` specification to tell the native
build system how to choose a compiler. You can also include CMake arguments.
"""
self._generator_name = name
self.args = list(args or [])
self.env = dict(list(os.environ.items()) + list(env.items() if env else []))
self._generator_toolset = toolset
self._generator_architecture = arch
description_arch = name if arch is None else f"{name} {arch}"
if toolset is None:
self._description = description_arch
else:
self._description = f"{description_arch} {toolset}"
@property
def name(self) -> str:
"""Name of CMake generator."""
return self._generator_name
@property
def toolset(self) -> str | None:
"""Toolset specification associated with the CMake generator."""
return self._generator_toolset
@property
def architecture(self) -> str | None:
"""Architecture associated with the CMake generator."""
return self._generator_architecture
@property
def description(self) -> str:
"""Name of CMake generator with properties describing the environment (e.g toolset)"""
return self._description
def _parse_legacy_generator_name(generator_name: str, arch: str | None) -> tuple[str, str | None]:
"""
Support classic names for MSVC generators. Architecture is stripped from
the name and "arch" is replaced with the arch string if a legacy name is
given.
"""
if generator_name.startswith("Visual Studio"):
if generator_name.endswith(" Win64"):
arch = "x64"
generator_name = generator_name[:-6]
elif generator_name.endswith(" ARM"):
arch = "ARM"
generator_name = generator_name[:-4]
return generator_name, arch | 0.801548 | 0.148881 |
from __future__ import annotations
import platform
import sys
import textwrap
import distro
from . import unix
class LinuxPlatform(unix.UnixPlatform):
"""Linux implementation of :class:`.abstract.CMakePlatform`"""
@staticmethod
def build_essential_install_cmd() -> tuple[str, str]:
"""Return a tuple of the form ``(distribution_name, cmd)``.
``cmd`` is the command allowing to install the build tools
in the current Linux distribution. It set to an empty string
if the command is not known.
``distribution_name`` is the name of the current distribution. It
is set to an empty string if the distribution could not be
determined.
"""
# gentoo, slackware: Compiler is available by default.
distribution_name = distro.id()
cmd = ""
if distribution_name in {"debian", "Ubuntu", "mandrake", "mandriva"}:
cmd = "sudo apt-get install build-essential"
elif distribution_name in {"centos", "fedora", "redhat", "turbolinux", "yellowdog", "rocks"}:
# http://unix.stackexchange.com/questions/16422/cant-install-build-essential-on-centos#32439
cmd = "sudo yum groupinstall 'Development Tools'"
elif distribution_name in {"SuSE"}:
# http://serverfault.com/questions/437680/equivalent-development-build-tools-for-suse-professional-11#437681
cmd = "zypper install -t pattern devel_C_C++"
return distribution_name, cmd
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
distribution_name, cmd = self.build_essential_install_cmd()
install_help = ""
if distribution_name:
install_help = f"But scikit-build does *NOT* know how to install it on {distribution_name}\n"
if distribution_name and cmd:
install_help = f"It can be installed using {distribution_name} package manager:\n\n {cmd}\n"
arch = "x64" if platform.architecture()[0] == "64bit" else "x86"
version_str = ".".join(str(v) for v in sys.version_info[:2])
return textwrap.dedent(
f"""
Building Linux wheels for Python {version_str} requires a compiler (e.g gcc).
{install_help}
To build compliant wheels, consider using the manylinux system described in PEP-513.
Get it with "dockcross/manylinux-{arch}" docker image:
https://github.com/dockcross/dockcross#readme
For more details, please refer to scikit-build documentation:
http://scikit-build.readthedocs.io/en/latest/generators.html#linux
"""
).strip() | scikit-build | /scikit_build-0.17.6-py3-none-any.whl/skbuild/platform_specifics/linux.py | linux.py |
from __future__ import annotations
import platform
import sys
import textwrap
import distro
from . import unix
class LinuxPlatform(unix.UnixPlatform):
"""Linux implementation of :class:`.abstract.CMakePlatform`"""
@staticmethod
def build_essential_install_cmd() -> tuple[str, str]:
"""Return a tuple of the form ``(distribution_name, cmd)``.
``cmd`` is the command allowing to install the build tools
in the current Linux distribution. It set to an empty string
if the command is not known.
``distribution_name`` is the name of the current distribution. It
is set to an empty string if the distribution could not be
determined.
"""
# gentoo, slackware: Compiler is available by default.
distribution_name = distro.id()
cmd = ""
if distribution_name in {"debian", "Ubuntu", "mandrake", "mandriva"}:
cmd = "sudo apt-get install build-essential"
elif distribution_name in {"centos", "fedora", "redhat", "turbolinux", "yellowdog", "rocks"}:
# http://unix.stackexchange.com/questions/16422/cant-install-build-essential-on-centos#32439
cmd = "sudo yum groupinstall 'Development Tools'"
elif distribution_name in {"SuSE"}:
# http://serverfault.com/questions/437680/equivalent-development-build-tools-for-suse-professional-11#437681
cmd = "zypper install -t pattern devel_C_C++"
return distribution_name, cmd
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
distribution_name, cmd = self.build_essential_install_cmd()
install_help = ""
if distribution_name:
install_help = f"But scikit-build does *NOT* know how to install it on {distribution_name}\n"
if distribution_name and cmd:
install_help = f"It can be installed using {distribution_name} package manager:\n\n {cmd}\n"
arch = "x64" if platform.architecture()[0] == "64bit" else "x86"
version_str = ".".join(str(v) for v in sys.version_info[:2])
return textwrap.dedent(
f"""
Building Linux wheels for Python {version_str} requires a compiler (e.g gcc).
{install_help}
To build compliant wheels, consider using the manylinux system described in PEP-513.
Get it with "dockcross/manylinux-{arch}" docker image:
https://github.com/dockcross/dockcross#readme
For more details, please refer to scikit-build documentation:
http://scikit-build.readthedocs.io/en/latest/generators.html#linux
"""
).strip() | 0.538498 | 0.1443 |
from __future__ import annotations
import os
import platform
import re
import subprocess
import sys
import textwrap
from typing import Iterable
from setuptools import monkey
from .._compat.typing import TypedDict
from . import abstract
from .abstract import CMakeGenerator
VS_YEAR_TO_VERSION = {
"2017": 15,
"2019": 16,
"2022": 17,
}
"""Describes the version of `Visual Studio` supported by
:class:`CMakeVisualStudioIDEGenerator` and
:class:`CMakeVisualStudioCommandLineGenerator`.
The different version are identified by their year.
"""
VS_YEAR_TO_MSC_VER = {
"2017": "1910", # VS 2017 - can be +9
"2019": "1920", # VS 2019 - can be +9
"2022": "1930", # VS 2022 - can be +9
}
ARCH_TO_MSVC_ARCH = {
"Win32": "x86",
"ARM64": "x86_arm64",
"x64": "x86_amd64",
}
class CachedEnv(TypedDict):
PATH: str
INCLUDE: str
LIB: str
class WindowsPlatform(abstract.CMakePlatform):
"""Windows implementation of :class:`.abstract.CMakePlatform`."""
def __init__(self) -> None:
super().__init__()
self._vs_help = ""
vs_help_template = (
textwrap.dedent(
"""
Building windows wheels for Python {pyver} requires Microsoft Visual Studio %s.
Get it with "%s":
%s
"""
)
.strip()
.format(pyver=".".join(str(v) for v in sys.version_info[:2]))
)
# For Python 3.7 and above: VS2022, VS2019, VS2017
supported_vs_years = [("2022", "v143"), ("2019", "v142"), ("2017", "v141")]
self._vs_help = vs_help_template % (
supported_vs_years[0][0],
"Visual Studio 2017",
"https://visualstudio.microsoft.com/vs/",
)
self._vs_help += (
"\n\n"
+ textwrap.dedent(
"""
Or with "Visual Studio 2019":
https://visualstudio.microsoft.com/vs/
Or with "Visual Studio 2022":
https://visualstudio.microsoft.com/vs/
"""
).strip()
)
try:
import ninja # pylint: disable=import-outside-toplevel
ninja_executable_path = os.path.join(ninja.BIN_DIR, "ninja")
ninja_args = ["-DCMAKE_MAKE_PROGRAM:FILEPATH=" + ninja_executable_path]
except ImportError:
ninja_args = []
extra = []
for vs_year, vs_toolset in supported_vs_years:
vs_version = VS_YEAR_TO_MSC_VER[vs_year]
args = [f"-D_SKBUILD_FORCE_MSVC={vs_version}"]
self.default_generators.extend(
[
CMakeVisualStudioCommandLineGenerator("Ninja", vs_year, vs_toolset, args=ninja_args + args),
CMakeVisualStudioIDEGenerator(vs_year, vs_toolset),
]
)
extra.append(CMakeVisualStudioCommandLineGenerator("NMake Makefiles", vs_year, vs_toolset, args=args))
self.default_generators.extend(extra)
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
return self._vs_help
def _compute_arch() -> str:
"""Currently only supports Intel -> ARM cross-compilation."""
if platform.machine() == "ARM64" or "arm64" in os.environ.get("SETUPTOOLS_EXT_SUFFIX", "").lower():
return "ARM64"
if platform.architecture()[0] == "64bit":
return "x64"
return "Win32"
class CMakeVisualStudioIDEGenerator(CMakeGenerator):
"""
Represents a Visual Studio CMake generator.
.. automethod:: __init__
"""
def __init__(self, year: str, toolset: str | None = None) -> None:
"""Instantiate a generator object with its name set to the `Visual
Studio` generator associated with the given ``year``
(see :data:`VS_YEAR_TO_VERSION`), the current platform (32-bit
or 64-bit) and the selected ``toolset`` (if applicable).
"""
vs_version = VS_YEAR_TO_VERSION[year]
vs_base = f"Visual Studio {vs_version} {year}"
vs_arch = _compute_arch()
super().__init__(vs_base, toolset=toolset, arch=vs_arch)
def _find_visual_studio_2017_or_newer(vs_version: int) -> str:
"""Adapted from https://github.com/python/cpython/blob/3.7/Lib/distutils/_msvccompiler.py
The ``vs_version`` corresponds to the `Visual Studio` version to lookup.
See :data:`VS_YEAR_TO_VERSION`.
Returns `path` based on the result of invoking ``vswhere.exe``.
If no install is found, returns an empty string.
..note:
If ``vswhere.exe`` is not available, by definition, VS 2017 or newer is not installed.
"""
root = os.environ.get("PROGRAMFILES(X86)") or os.environ.get("PROGRAMFILES")
if not root:
return ""
try:
path = subprocess.run(
[
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-version",
f"[{vs_version:.1f}, {vs_version + 1:.1f})",
"-prerelease",
"-requires",
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property",
"installationPath",
"-products",
"*",
],
encoding="utf-8" if sys.platform.startswith("cygwin") else "mbcs",
check=True,
stdout=subprocess.PIPE,
errors="strict",
).stdout.strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return ""
path = os.path.join(path, "VC", "Auxiliary", "Build")
if os.path.isdir(path):
return path
return ""
def find_visual_studio(vs_version: int) -> str:
"""Return Visual Studio installation path associated with ``vs_version`` or an empty string if any.
The ``vs_version`` corresponds to the `Visual Studio` version to lookup.
See :data:`VS_YEAR_TO_VERSION`.
.. note::
- Returns `path` based on the result of invoking ``vswhere.exe``.
"""
return _find_visual_studio_2017_or_newer(vs_version)
# To avoid multiple slow calls to ``subprocess.run()`` (either directly or
# indirectly through ``query_vcvarsall``), results of previous calls are cached.
__get_msvc_compiler_env_cache: dict[str, CachedEnv] = {}
def _get_msvc_compiler_env(vs_version: int, vs_toolset: str | None = None) -> CachedEnv | dict[str, str]:
"""
Return a dictionary of environment variables corresponding to ``vs_version``
that can be used with :class:`CMakeVisualStudioCommandLineGenerator`.
The ``vs_toolset`` is used only for Visual Studio 2017 or newer (``vs_version >= 15``).
If specified, ``vs_toolset`` is used to set the `-vcvars_ver=XX.Y` argument passed to
``vcvarsall.bat`` script.
"""
# Set architecture
vc_arch = ARCH_TO_MSVC_ARCH[_compute_arch()]
# If any, return cached version
cache_key = ",".join([str(vs_version), vc_arch, str(vs_toolset)])
if cache_key in __get_msvc_compiler_env_cache:
return __get_msvc_compiler_env_cache[cache_key]
monkey.patch_for_msvc_specialized_compiler() # type: ignore[no-untyped-call]
vc_dir = find_visual_studio(vs_version)
vcvarsall = os.path.join(vc_dir, "vcvarsall.bat")
if not os.path.exists(vcvarsall):
return {}
# Set vcvars_ver argument based on toolset
vcvars_ver = ""
if vs_toolset is not None:
match = re.findall(r"^v(\d\d)(\d+)$", vs_toolset)[0]
if match:
match_str = ".".join(match)
vcvars_ver = f"-vcvars_ver={match_str}"
try:
out_bytes = subprocess.run(
f'cmd /u /c "{vcvarsall}" {vc_arch} {vcvars_ver} && set',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=sys.platform.startswith("cygwin"),
check=True,
).stdout
out = out_bytes.decode("utf-16le", errors="replace")
vc_env = {
key.lower(): value for key, _, value in (line.partition("=") for line in out.splitlines()) if key and value
}
cached_env: CachedEnv = {
"PATH": vc_env.get("path", ""),
"INCLUDE": vc_env.get("include", ""),
"LIB": vc_env.get("lib", ""),
}
__get_msvc_compiler_env_cache[cache_key] = cached_env
return cached_env
except subprocess.CalledProcessError as exc:
print(exc.output.decode("utf-16le", errors="replace"), file=sys.stderr, flush=True)
return {}
class CMakeVisualStudioCommandLineGenerator(CMakeGenerator):
"""
Represents a command-line CMake generator initialized with a
specific `Visual Studio` environment.
.. automethod:: __init__
"""
def __init__(self, name: str, year: str, toolset: str | None = None, args: Iterable[str] | None = None):
"""Instantiate CMake command-line generator.
The generator ``name`` can be values like `Ninja`, `NMake Makefiles`
or `NMake Makefiles JOM`.
The ``year`` defines the `Visual Studio` environment associated
with the generator. See :data:`VS_YEAR_TO_VERSION`.
If set, the ``toolset`` defines the `Visual Studio Toolset` to select.
The platform (32-bit or 64-bit or ARM) is automatically selected.
"""
arch = _compute_arch()
vc_env = _get_msvc_compiler_env(VS_YEAR_TO_VERSION[year], toolset)
env = {str(key.upper()): str(value) for key, value in vc_env.items()}
super().__init__(name, env, arch=arch, args=args)
self._description = f"{self.name} ({CMakeVisualStudioIDEGenerator(year, toolset).description})" | scikit-build | /scikit_build-0.17.6-py3-none-any.whl/skbuild/platform_specifics/windows.py | windows.py | from __future__ import annotations
import os
import platform
import re
import subprocess
import sys
import textwrap
from typing import Iterable
from setuptools import monkey
from .._compat.typing import TypedDict
from . import abstract
from .abstract import CMakeGenerator
VS_YEAR_TO_VERSION = {
"2017": 15,
"2019": 16,
"2022": 17,
}
"""Describes the version of `Visual Studio` supported by
:class:`CMakeVisualStudioIDEGenerator` and
:class:`CMakeVisualStudioCommandLineGenerator`.
The different version are identified by their year.
"""
VS_YEAR_TO_MSC_VER = {
"2017": "1910", # VS 2017 - can be +9
"2019": "1920", # VS 2019 - can be +9
"2022": "1930", # VS 2022 - can be +9
}
ARCH_TO_MSVC_ARCH = {
"Win32": "x86",
"ARM64": "x86_arm64",
"x64": "x86_amd64",
}
class CachedEnv(TypedDict):
PATH: str
INCLUDE: str
LIB: str
class WindowsPlatform(abstract.CMakePlatform):
"""Windows implementation of :class:`.abstract.CMakePlatform`."""
def __init__(self) -> None:
super().__init__()
self._vs_help = ""
vs_help_template = (
textwrap.dedent(
"""
Building windows wheels for Python {pyver} requires Microsoft Visual Studio %s.
Get it with "%s":
%s
"""
)
.strip()
.format(pyver=".".join(str(v) for v in sys.version_info[:2]))
)
# For Python 3.7 and above: VS2022, VS2019, VS2017
supported_vs_years = [("2022", "v143"), ("2019", "v142"), ("2017", "v141")]
self._vs_help = vs_help_template % (
supported_vs_years[0][0],
"Visual Studio 2017",
"https://visualstudio.microsoft.com/vs/",
)
self._vs_help += (
"\n\n"
+ textwrap.dedent(
"""
Or with "Visual Studio 2019":
https://visualstudio.microsoft.com/vs/
Or with "Visual Studio 2022":
https://visualstudio.microsoft.com/vs/
"""
).strip()
)
try:
import ninja # pylint: disable=import-outside-toplevel
ninja_executable_path = os.path.join(ninja.BIN_DIR, "ninja")
ninja_args = ["-DCMAKE_MAKE_PROGRAM:FILEPATH=" + ninja_executable_path]
except ImportError:
ninja_args = []
extra = []
for vs_year, vs_toolset in supported_vs_years:
vs_version = VS_YEAR_TO_MSC_VER[vs_year]
args = [f"-D_SKBUILD_FORCE_MSVC={vs_version}"]
self.default_generators.extend(
[
CMakeVisualStudioCommandLineGenerator("Ninja", vs_year, vs_toolset, args=ninja_args + args),
CMakeVisualStudioIDEGenerator(vs_year, vs_toolset),
]
)
extra.append(CMakeVisualStudioCommandLineGenerator("NMake Makefiles", vs_year, vs_toolset, args=args))
self.default_generators.extend(extra)
@property
def generator_installation_help(self) -> str:
"""Return message guiding the user for installing a valid toolchain."""
return self._vs_help
def _compute_arch() -> str:
"""Currently only supports Intel -> ARM cross-compilation."""
if platform.machine() == "ARM64" or "arm64" in os.environ.get("SETUPTOOLS_EXT_SUFFIX", "").lower():
return "ARM64"
if platform.architecture()[0] == "64bit":
return "x64"
return "Win32"
class CMakeVisualStudioIDEGenerator(CMakeGenerator):
"""
Represents a Visual Studio CMake generator.
.. automethod:: __init__
"""
def __init__(self, year: str, toolset: str | None = None) -> None:
"""Instantiate a generator object with its name set to the `Visual
Studio` generator associated with the given ``year``
(see :data:`VS_YEAR_TO_VERSION`), the current platform (32-bit
or 64-bit) and the selected ``toolset`` (if applicable).
"""
vs_version = VS_YEAR_TO_VERSION[year]
vs_base = f"Visual Studio {vs_version} {year}"
vs_arch = _compute_arch()
super().__init__(vs_base, toolset=toolset, arch=vs_arch)
def _find_visual_studio_2017_or_newer(vs_version: int) -> str:
"""Adapted from https://github.com/python/cpython/blob/3.7/Lib/distutils/_msvccompiler.py
The ``vs_version`` corresponds to the `Visual Studio` version to lookup.
See :data:`VS_YEAR_TO_VERSION`.
Returns `path` based on the result of invoking ``vswhere.exe``.
If no install is found, returns an empty string.
..note:
If ``vswhere.exe`` is not available, by definition, VS 2017 or newer is not installed.
"""
root = os.environ.get("PROGRAMFILES(X86)") or os.environ.get("PROGRAMFILES")
if not root:
return ""
try:
path = subprocess.run(
[
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-version",
f"[{vs_version:.1f}, {vs_version + 1:.1f})",
"-prerelease",
"-requires",
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property",
"installationPath",
"-products",
"*",
],
encoding="utf-8" if sys.platform.startswith("cygwin") else "mbcs",
check=True,
stdout=subprocess.PIPE,
errors="strict",
).stdout.strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return ""
path = os.path.join(path, "VC", "Auxiliary", "Build")
if os.path.isdir(path):
return path
return ""
def find_visual_studio(vs_version: int) -> str:
"""Return Visual Studio installation path associated with ``vs_version`` or an empty string if any.
The ``vs_version`` corresponds to the `Visual Studio` version to lookup.
See :data:`VS_YEAR_TO_VERSION`.
.. note::
- Returns `path` based on the result of invoking ``vswhere.exe``.
"""
return _find_visual_studio_2017_or_newer(vs_version)
# To avoid multiple slow calls to ``subprocess.run()`` (either directly or
# indirectly through ``query_vcvarsall``), results of previous calls are cached.
__get_msvc_compiler_env_cache: dict[str, CachedEnv] = {}
def _get_msvc_compiler_env(vs_version: int, vs_toolset: str | None = None) -> CachedEnv | dict[str, str]:
"""
Return a dictionary of environment variables corresponding to ``vs_version``
that can be used with :class:`CMakeVisualStudioCommandLineGenerator`.
The ``vs_toolset`` is used only for Visual Studio 2017 or newer (``vs_version >= 15``).
If specified, ``vs_toolset`` is used to set the `-vcvars_ver=XX.Y` argument passed to
``vcvarsall.bat`` script.
"""
# Set architecture
vc_arch = ARCH_TO_MSVC_ARCH[_compute_arch()]
# If any, return cached version
cache_key = ",".join([str(vs_version), vc_arch, str(vs_toolset)])
if cache_key in __get_msvc_compiler_env_cache:
return __get_msvc_compiler_env_cache[cache_key]
monkey.patch_for_msvc_specialized_compiler() # type: ignore[no-untyped-call]
vc_dir = find_visual_studio(vs_version)
vcvarsall = os.path.join(vc_dir, "vcvarsall.bat")
if not os.path.exists(vcvarsall):
return {}
# Set vcvars_ver argument based on toolset
vcvars_ver = ""
if vs_toolset is not None:
match = re.findall(r"^v(\d\d)(\d+)$", vs_toolset)[0]
if match:
match_str = ".".join(match)
vcvars_ver = f"-vcvars_ver={match_str}"
try:
out_bytes = subprocess.run(
f'cmd /u /c "{vcvarsall}" {vc_arch} {vcvars_ver} && set',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=sys.platform.startswith("cygwin"),
check=True,
).stdout
out = out_bytes.decode("utf-16le", errors="replace")
vc_env = {
key.lower(): value for key, _, value in (line.partition("=") for line in out.splitlines()) if key and value
}
cached_env: CachedEnv = {
"PATH": vc_env.get("path", ""),
"INCLUDE": vc_env.get("include", ""),
"LIB": vc_env.get("lib", ""),
}
__get_msvc_compiler_env_cache[cache_key] = cached_env
return cached_env
except subprocess.CalledProcessError as exc:
print(exc.output.decode("utf-16le", errors="replace"), file=sys.stderr, flush=True)
return {}
class CMakeVisualStudioCommandLineGenerator(CMakeGenerator):
"""
Represents a command-line CMake generator initialized with a
specific `Visual Studio` environment.
.. automethod:: __init__
"""
def __init__(self, name: str, year: str, toolset: str | None = None, args: Iterable[str] | None = None):
"""Instantiate CMake command-line generator.
The generator ``name`` can be values like `Ninja`, `NMake Makefiles`
or `NMake Makefiles JOM`.
The ``year`` defines the `Visual Studio` environment associated
with the generator. See :data:`VS_YEAR_TO_VERSION`.
If set, the ``toolset`` defines the `Visual Studio Toolset` to select.
The platform (32-bit or 64-bit or ARM) is automatically selected.
"""
arch = _compute_arch()
vc_env = _get_msvc_compiler_env(VS_YEAR_TO_VERSION[year], toolset)
env = {str(key.upper()): str(value) for key, value in vc_env.items()}
super().__init__(name, env, arch=arch, args=args)
self._description = f"{self.name} ({CMakeVisualStudioIDEGenerator(year, toolset).description})" | 0.759939 | 0.166913 |
from __future__ import annotations
import os
from setuptools.command.build_py import build_py as _build_py
from ..constants import CMAKE_INSTALL_DIR
from ..utils import distribution_hide_listing, logger
from . import set_build_base_mixin
class build_py(set_build_base_mixin, _build_py):
"""Custom implementation of ``build_py`` setuptools command."""
def initialize_options(self) -> None:
"""Handle --hide-listing option.
Initializes ``outfiles_count``.
"""
super().initialize_options()
self.outfiles_count = 0
def build_module(self, module: str | list[str] | tuple[str, ...], module_file: str, package: str) -> None:
"""Handle --hide-listing option.
Increments ``outfiles_count``.
"""
super().build_module(module, module_file, package) # type: ignore[no-untyped-call]
self.outfiles_count += 1
def run(self, *args: object, **kwargs: object) -> None:
"""Handle --hide-listing option.
Display number of copied files. It corresponds to the value
of ``outfiles_count``.
"""
with distribution_hide_listing(self.distribution):
super().run(*args, **kwargs)
logger.info("copied %d files", self.outfiles_count)
def find_modules(self) -> list[tuple[str, str, str]]:
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages: dict[str, tuple[str, bool]] = {}
# List of (package, module, filename) tuples to return
modules: list[tuple[str, str, str]] = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split(".")
package = ".".join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package) # type: ignore[no-untyped-call]
checked = False
if not checked:
init_py = self.check_package(package, package_dir) # type: ignore[no-untyped-call]
packages[package] = (package_dir, True)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
# skbuild: prepend CMAKE_INSTALL_DIR if file exists in the
# CMake install tree.
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), module_file)):
module_file = os.path.join(CMAKE_INSTALL_DIR(), module_file)
if not self.check_module(module, module_file): # type: ignore[no-untyped-call]
continue
modules.append((package, module_base, module_file))
return modules | scikit-build | /scikit_build-0.17.6-py3-none-any.whl/skbuild/command/build_py.py | build_py.py | from __future__ import annotations
import os
from setuptools.command.build_py import build_py as _build_py
from ..constants import CMAKE_INSTALL_DIR
from ..utils import distribution_hide_listing, logger
from . import set_build_base_mixin
class build_py(set_build_base_mixin, _build_py):
"""Custom implementation of ``build_py`` setuptools command."""
def initialize_options(self) -> None:
"""Handle --hide-listing option.
Initializes ``outfiles_count``.
"""
super().initialize_options()
self.outfiles_count = 0
def build_module(self, module: str | list[str] | tuple[str, ...], module_file: str, package: str) -> None:
"""Handle --hide-listing option.
Increments ``outfiles_count``.
"""
super().build_module(module, module_file, package) # type: ignore[no-untyped-call]
self.outfiles_count += 1
def run(self, *args: object, **kwargs: object) -> None:
"""Handle --hide-listing option.
Display number of copied files. It corresponds to the value
of ``outfiles_count``.
"""
with distribution_hide_listing(self.distribution):
super().run(*args, **kwargs)
logger.info("copied %d files", self.outfiles_count)
def find_modules(self) -> list[tuple[str, str, str]]:
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages: dict[str, tuple[str, bool]] = {}
# List of (package, module, filename) tuples to return
modules: list[tuple[str, str, str]] = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split(".")
package = ".".join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package) # type: ignore[no-untyped-call]
checked = False
if not checked:
init_py = self.check_package(package, package_dir) # type: ignore[no-untyped-call]
packages[package] = (package_dir, True)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
# skbuild: prepend CMAKE_INSTALL_DIR if file exists in the
# CMake install tree.
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), module_file)):
module_file = os.path.join(CMAKE_INSTALL_DIR(), module_file)
if not self.check_module(module, module_file): # type: ignore[no-untyped-call]
continue
modules.append((package, module_base, module_file))
return modules | 0.719088 | 0.165054 |
from __future__ import annotations
import contextlib
import logging
import os
import typing
from contextlib import contextmanager
from typing import Any, Iterable, Iterator, Mapping, NamedTuple, Sequence, TypeVar
from distutils.command.build_py import build_py as distutils_build_py
from distutils.errors import DistutilsTemplateError
from distutils.filelist import FileList
from distutils.text_file import TextFile
from .._compat.typing import Protocol
if typing.TYPE_CHECKING:
import setuptools._distutils.dist
class CommonLog(Protocol):
def info(self, __msg: str, *args: object) -> None:
...
logger: CommonLog
try:
import setuptools.logging
skb_log = logging.getLogger("skbuild")
skb_log.setLevel(logging.INFO)
logging_module = True
logger = skb_log
except ImportError:
from distutils import log as distutils_log
logger = distutils_log
logging_module = False
class Distribution(NamedTuple):
script_name: str
def _log_warning(msg: str, *args: object) -> None:
try:
if logging_module:
skb_log.warning(msg, *args)
else:
# pylint: disable-next=deprecated-method
distutils_log.warn(msg, *args)
except ValueError:
# Setuptools might disconnect the logger. That shouldn't be an error for a warning.
print(msg % args, flush=True)
def mkdir_p(path: str) -> None:
"""Ensure directory ``path`` exists. If needed, parent directories
are created.
"""
return os.makedirs(path, exist_ok=True)
Self = TypeVar("Self", bound="push_dir")
class push_dir(contextlib.ContextDecorator):
"""Context manager to change current directory."""
def __init__(self, directory: str | None = None, make_directory: bool = False) -> None:
"""
:param directory:
Path to set as current working directory. If ``None``
is passed, ``os.getcwd()`` is used instead.
:param make_directory:
If True, ``directory`` is created.
"""
super().__init__()
self.directory = directory
self.make_directory = make_directory
self.old_cwd: str | None = None
def __enter__(self: Self) -> Self:
self.old_cwd = os.getcwd()
if self.directory:
if self.make_directory:
os.makedirs(self.directory, exist_ok=True)
os.chdir(self.directory)
return self
def __exit__(self, typ: None, val: None, traceback: None) -> None:
assert self.old_cwd is not None
os.chdir(self.old_cwd)
class PythonModuleFinder(distutils_build_py):
"""Convenience class to search for python modules.
This class is based on ``distutils.command.build_py.build_by`` and
provides a specialized version of ``find_all_modules()``.
"""
distribution: Distribution # type: ignore[assignment]
# pylint: disable-next=super-init-not-called
def __init__(
self,
packages: Sequence[str],
package_dir: Mapping[str, str],
py_modules: Sequence[str],
alternative_build_base: str | None = None,
) -> None:
"""
:param packages: List of packages to search.
:param package_dir: Dictionary mapping ``package`` with ``directory``.
:param py_modules: List of python modules.
:param alternative_build_base: Additional directory to search in.
"""
self.packages = packages
self.package_dir = package_dir
self.py_modules = py_modules
self.alternative_build_base = alternative_build_base
self.distribution = Distribution("setup.py")
def find_all_modules(self, project_dir: str | None = None) -> list[Any | tuple[str, str, str]]:
"""Compute the list of all modules that would be built by
project located in current directory, whether they are
specified one-module-at-a-time ``py_modules`` or by whole
packages ``packages``.
By default, the function will search for modules in the current
directory. Specifying ``project_dir`` parameter allow to change
this.
Return a list of tuples ``(package, module, module_file)``.
"""
with push_dir(project_dir):
# TODO: typestubs for distutils
return super().find_all_modules() # type: ignore[no-any-return, no-untyped-call]
def find_package_modules(self, package: str, package_dir: str) -> Iterable[tuple[str, str, str]]:
"""Temporally prepend the ``alternative_build_base`` to ``module_file``.
Doing so will ensure modules can also be found in other location
(e.g ``skbuild.constants.CMAKE_INSTALL_DIR``).
"""
if package_dir and not os.path.exists(package_dir) and self.alternative_build_base is not None:
package_dir = os.path.join(self.alternative_build_base, package_dir)
modules: Iterable[tuple[str, str, str]] = super().find_package_modules(package, package_dir) # type: ignore[no-untyped-call]
# Strip the alternative base from module_file
def _strip_directory(entry: tuple[str, str, str]) -> tuple[str, str, str]:
module_file = entry[2]
if self.alternative_build_base is not None and module_file.startswith(self.alternative_build_base):
module_file = module_file[len(self.alternative_build_base) + 1 :]
return entry[0], entry[1], module_file
return map(_strip_directory, modules)
def check_module(self, module: str, module_file: str) -> bool:
"""Return True if ``module_file`` belongs to ``module``."""
if self.alternative_build_base is not None:
updated_module_file = os.path.join(self.alternative_build_base, module_file)
if os.path.exists(updated_module_file):
module_file = updated_module_file
if not os.path.isfile(module_file):
_log_warning("file %s (for module %s) not found", module_file, module)
return False
return True
OptStr = TypeVar("OptStr", str, None)
def to_platform_path(path: OptStr) -> OptStr:
"""Return a version of ``path`` where all separator are :attr:`os.sep`"""
if path is None:
return path
return path.replace("/", os.sep).replace("\\", os.sep)
def to_unix_path(path: OptStr) -> OptStr:
"""Return a version of ``path`` where all separator are ``/``"""
if path is None:
return path
return path.replace("\\", "/")
@contextmanager
def distribution_hide_listing(
distribution: setuptools._distutils.dist.Distribution | Distribution,
) -> Iterator[bool | int]:
"""Given a ``distribution``, this context manager temporarily
sets distutils threshold to WARN if ``--hide-listing`` argument
was provided.
It yields True if ``--hide-listing`` argument was provided.
"""
hide_listing = getattr(distribution, "hide_listing", False)
wheel_log = logging.getLogger("wheel")
root_log = logging.getLogger() # setuptools 65.6+ needs this hidden too
if logging_module:
# Setuptools 60.2+, will always be on Python 3.7+
old_wheel_level = wheel_log.getEffectiveLevel()
old_root_level = root_log.getEffectiveLevel()
try:
if hide_listing:
wheel_log.setLevel(logging.WARNING)
root_log.setLevel(logging.WARNING)
# The classic logger doesn't respond to set_threshold anymore,
# but it does log info and above to stdout, so let's hide that
with open(os.devnull, "w", encoding="utf-8") as f, contextlib.redirect_stdout(f):
yield hide_listing
else:
yield hide_listing
finally:
if hide_listing:
wheel_log.setLevel(old_wheel_level)
root_log.setLevel(old_root_level)
else:
old_threshold = distutils_log._global_log.threshold # type: ignore[attr-defined]
if hide_listing:
distutils_log.set_threshold(distutils_log.WARN)
try:
yield hide_listing
finally:
distutils_log.set_threshold(old_threshold)
def parse_manifestin(template: str) -> list[str]:
"""This function parses template file (usually MANIFEST.in)"""
if not os.path.exists(template):
return []
template_file = TextFile(
template,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True,
)
file_list = FileList()
try:
while True:
line = template_file.readline()
if line is None: # end of file
break
try:
file_list.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
filename = template_file.filename if hasattr(template_file, "filename") else "Unknown"
current_line = template_file.current_line if hasattr(template_file, "current_line") else "Unknown"
print(f"{filename}, line {current_line}: {msg}", flush=True)
return file_list.files
finally:
template_file.close() | scikit-build | /scikit_build-0.17.6-py3-none-any.whl/skbuild/utils/__init__.py | __init__.py |
from __future__ import annotations
import contextlib
import logging
import os
import typing
from contextlib import contextmanager
from typing import Any, Iterable, Iterator, Mapping, NamedTuple, Sequence, TypeVar
from distutils.command.build_py import build_py as distutils_build_py
from distutils.errors import DistutilsTemplateError
from distutils.filelist import FileList
from distutils.text_file import TextFile
from .._compat.typing import Protocol
if typing.TYPE_CHECKING:
import setuptools._distutils.dist
class CommonLog(Protocol):
def info(self, __msg: str, *args: object) -> None:
...
logger: CommonLog
try:
import setuptools.logging
skb_log = logging.getLogger("skbuild")
skb_log.setLevel(logging.INFO)
logging_module = True
logger = skb_log
except ImportError:
from distutils import log as distutils_log
logger = distutils_log
logging_module = False
class Distribution(NamedTuple):
script_name: str
def _log_warning(msg: str, *args: object) -> None:
try:
if logging_module:
skb_log.warning(msg, *args)
else:
# pylint: disable-next=deprecated-method
distutils_log.warn(msg, *args)
except ValueError:
# Setuptools might disconnect the logger. That shouldn't be an error for a warning.
print(msg % args, flush=True)
def mkdir_p(path: str) -> None:
"""Ensure directory ``path`` exists. If needed, parent directories
are created.
"""
return os.makedirs(path, exist_ok=True)
Self = TypeVar("Self", bound="push_dir")
class push_dir(contextlib.ContextDecorator):
"""Context manager to change current directory."""
def __init__(self, directory: str | None = None, make_directory: bool = False) -> None:
"""
:param directory:
Path to set as current working directory. If ``None``
is passed, ``os.getcwd()`` is used instead.
:param make_directory:
If True, ``directory`` is created.
"""
super().__init__()
self.directory = directory
self.make_directory = make_directory
self.old_cwd: str | None = None
def __enter__(self: Self) -> Self:
self.old_cwd = os.getcwd()
if self.directory:
if self.make_directory:
os.makedirs(self.directory, exist_ok=True)
os.chdir(self.directory)
return self
def __exit__(self, typ: None, val: None, traceback: None) -> None:
assert self.old_cwd is not None
os.chdir(self.old_cwd)
class PythonModuleFinder(distutils_build_py):
"""Convenience class to search for python modules.
This class is based on ``distutils.command.build_py.build_by`` and
provides a specialized version of ``find_all_modules()``.
"""
distribution: Distribution # type: ignore[assignment]
# pylint: disable-next=super-init-not-called
def __init__(
self,
packages: Sequence[str],
package_dir: Mapping[str, str],
py_modules: Sequence[str],
alternative_build_base: str | None = None,
) -> None:
"""
:param packages: List of packages to search.
:param package_dir: Dictionary mapping ``package`` with ``directory``.
:param py_modules: List of python modules.
:param alternative_build_base: Additional directory to search in.
"""
self.packages = packages
self.package_dir = package_dir
self.py_modules = py_modules
self.alternative_build_base = alternative_build_base
self.distribution = Distribution("setup.py")
def find_all_modules(self, project_dir: str | None = None) -> list[Any | tuple[str, str, str]]:
"""Compute the list of all modules that would be built by
project located in current directory, whether they are
specified one-module-at-a-time ``py_modules`` or by whole
packages ``packages``.
By default, the function will search for modules in the current
directory. Specifying ``project_dir`` parameter allow to change
this.
Return a list of tuples ``(package, module, module_file)``.
"""
with push_dir(project_dir):
# TODO: typestubs for distutils
return super().find_all_modules() # type: ignore[no-any-return, no-untyped-call]
def find_package_modules(self, package: str, package_dir: str) -> Iterable[tuple[str, str, str]]:
"""Temporally prepend the ``alternative_build_base`` to ``module_file``.
Doing so will ensure modules can also be found in other location
(e.g ``skbuild.constants.CMAKE_INSTALL_DIR``).
"""
if package_dir and not os.path.exists(package_dir) and self.alternative_build_base is not None:
package_dir = os.path.join(self.alternative_build_base, package_dir)
modules: Iterable[tuple[str, str, str]] = super().find_package_modules(package, package_dir) # type: ignore[no-untyped-call]
# Strip the alternative base from module_file
def _strip_directory(entry: tuple[str, str, str]) -> tuple[str, str, str]:
module_file = entry[2]
if self.alternative_build_base is not None and module_file.startswith(self.alternative_build_base):
module_file = module_file[len(self.alternative_build_base) + 1 :]
return entry[0], entry[1], module_file
return map(_strip_directory, modules)
def check_module(self, module: str, module_file: str) -> bool:
"""Return True if ``module_file`` belongs to ``module``."""
if self.alternative_build_base is not None:
updated_module_file = os.path.join(self.alternative_build_base, module_file)
if os.path.exists(updated_module_file):
module_file = updated_module_file
if not os.path.isfile(module_file):
_log_warning("file %s (for module %s) not found", module_file, module)
return False
return True
OptStr = TypeVar("OptStr", str, None)
def to_platform_path(path: OptStr) -> OptStr:
"""Return a version of ``path`` where all separator are :attr:`os.sep`"""
if path is None:
return path
return path.replace("/", os.sep).replace("\\", os.sep)
def to_unix_path(path: OptStr) -> OptStr:
"""Return a version of ``path`` where all separator are ``/``"""
if path is None:
return path
return path.replace("\\", "/")
@contextmanager
def distribution_hide_listing(
distribution: setuptools._distutils.dist.Distribution | Distribution,
) -> Iterator[bool | int]:
"""Given a ``distribution``, this context manager temporarily
sets distutils threshold to WARN if ``--hide-listing`` argument
was provided.
It yields True if ``--hide-listing`` argument was provided.
"""
hide_listing = getattr(distribution, "hide_listing", False)
wheel_log = logging.getLogger("wheel")
root_log = logging.getLogger() # setuptools 65.6+ needs this hidden too
if logging_module:
# Setuptools 60.2+, will always be on Python 3.7+
old_wheel_level = wheel_log.getEffectiveLevel()
old_root_level = root_log.getEffectiveLevel()
try:
if hide_listing:
wheel_log.setLevel(logging.WARNING)
root_log.setLevel(logging.WARNING)
# The classic logger doesn't respond to set_threshold anymore,
# but it does log info and above to stdout, so let's hide that
with open(os.devnull, "w", encoding="utf-8") as f, contextlib.redirect_stdout(f):
yield hide_listing
else:
yield hide_listing
finally:
if hide_listing:
wheel_log.setLevel(old_wheel_level)
root_log.setLevel(old_root_level)
else:
old_threshold = distutils_log._global_log.threshold # type: ignore[attr-defined]
if hide_listing:
distutils_log.set_threshold(distutils_log.WARN)
try:
yield hide_listing
finally:
distutils_log.set_threshold(old_threshold)
def parse_manifestin(template: str) -> list[str]:
"""This function parses template file (usually MANIFEST.in)"""
if not os.path.exists(template):
return []
template_file = TextFile(
template,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True,
)
file_list = FileList()
try:
while True:
line = template_file.readline()
if line is None: # end of file
break
try:
file_list.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
filename = template_file.filename if hasattr(template_file, "filename") else "Unknown"
current_line = template_file.current_line if hasattr(template_file, "current_line") else "Unknown"
print(f"{filename}, line {current_line}: {msg}", flush=True)
return file_list.files
finally:
template_file.close() | 0.731346 | 0.147003 |
[![pypi](https://img.shields.io/pypi/v/scikit-cache.svg)](https://pypi.org/project/scikit-cache/)
[![pypi](https://img.shields.io/pypi/pyversions/scikit-cache.svg)](https://pypi.org/project/scikit-cache/)
[![pypi](https://img.shields.io/pypi/l/scikit-cache.svg)](https://raw.githubusercontent.com/deniskrumko/scikit-cache/master/LICENSE)
# Scikit Cache
Pickle-based caching library. Supports file-system caching only.
## Installation
```
pip install scikit_cache
```
Or to develop package you may install dev dependencies:
```
pip install -e ".[dev]" && pip uninstall -y scikit_cache
```
## How to disable logs
### Option 1: Disable all logs in cache controller
```
from scikit_cache import CacheController
cache = CacheController(..., logger=None)
```
### Option 2: Disable specific logs
To disable specific logs you need to add one of these lines before executing code with cache:
```
import logging
# Disable basic logs like "cache enabled" or "cache disabled"
logging.getLogger('scikit_cache.controller').setLevel(logging.ERROR)
# Disable logs from "@cache.decorator" only
logging.getLogger('scikit_cache.decorator').setLevel(logging.ERROR)
# Disable logs for estimators created by "make_cached_estimator"
logging.getLogger('scikit_cache.estimator').setLevel(logging.ERROR)
# Disable hashing errors
logging.getLogger('scikit_cache.hashing').setLevel(logging.ERROR)
```
| scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/README.md | README.md | pip install scikit_cache
pip install -e ".[dev]" && pip uninstall -y scikit_cache
from scikit_cache import CacheController
cache = CacheController(..., logger=None)
import logging
# Disable basic logs like "cache enabled" or "cache disabled"
logging.getLogger('scikit_cache.controller').setLevel(logging.ERROR)
# Disable logs from "@cache.decorator" only
logging.getLogger('scikit_cache.decorator').setLevel(logging.ERROR)
# Disable logs for estimators created by "make_cached_estimator"
logging.getLogger('scikit_cache.estimator').setLevel(logging.ERROR)
# Disable hashing errors
logging.getLogger('scikit_cache.hashing').setLevel(logging.ERROR) | 0.599837 | 0.903379 |
import logging
from functools import wraps
from typing import (
Any,
Callable,
List,
Optional,
Tuple,
)
from ..resources import (
CacheKey,
ObjCacheMeta,
)
from ..utils import (
CACHE_HIT_ATTR,
format_bytes_to_str,
)
decorator_logger = logging.getLogger('scikit_cache.decorator')
class DecoratorMixin:
"""Mixin for ``CacheController`` class with cache decorator."""
def decorator(
self,
ignored_kwargs: Optional[List[str]] = None,
external_packages: Optional[List[str]] = None,
ttl: Optional[int] = None,
fixed_hash: Optional[str] = None,
) -> Callable:
"""Decorator for function caching.
Cache key will be automatically generated using:
- full function name (module path + name)
- passed args/kwargs
- current state of function code
By default, if cache is not enabled yet, decorated function will works as normal, without
cache. When cache is activated (using ``cache.enable()`` function) then decorated function
will check existing cache and save new cache too.
Additionally, decorated function can accepts extra ``use_cache`` keyword argument to
manually enable/disable caching on function call. For example: ``foo(..., use_cache=True)``
will enable cache just for this call of ``foo`` function using default parameters even if
``CacheController`` is not yet enabled.
On the over hand, ``use_cache=False`` allows to manually disable cache for specific func
call even if cache is enabled.
:param ignored_kwargs: list of kwarg names that will be ignored during creating cache key.
Use it for params that don't affect function usage (like ``logger`` param and so on).
:param external_packages: list of external packages names. It's a good practise to define
all external packages that were used inside specific function. It allows to check
less packages if ``check_external_packages`` option is enabled in ``CacheController``.
:param ttl: optional TTL for specific decorated function (in seconds). Set to -1 for
infinite TTL. Set to None to use ``cache.default_ttl`` value (by default).
:param fixed_hash: fixed func code hash. Use any string as hash to skip checking if
function were modified or not. We do not recommend manually set func code hash as it may
cause unexpected returning results of decorated function!
:return: decorated function
"""
def inner(func: Callable) -> Callable:
@wraps(func)
def wrapper(*func_args: Any, **func_kwargs: Any) -> Any:
# Force cache enabled if function called like "my_func(..., use_cache=True)"
force_use_cache = func_kwargs.pop('use_cache', None)
if force_use_cache is True:
if self.is_enabled_for_functions:
self._log(
'use_cache=True ignored, cache already enabled',
func=func,
level='warning',
logger=decorator_logger,
)
elif force_use_cache is False:
if self.is_enabled_for_functions:
self._log(
'use_cache=False enabled, cache is ignored',
func=func,
level='warning',
logger=decorator_logger,
)
# Disable cache by force -> return result immediatelly
return func(*func_args, **func_kwargs)
# Use cache only if it's enabled and func not in blacklist.
# Or if force_use_cache=True
use_cache = (
self.is_enabled_for_func(func)
if force_use_cache is None
else force_use_cache
)
if not use_cache:
# If cache is disabled (of func ignored), return func result immediately
return func(*func_args, **func_kwargs)
if not func_args and not func_kwargs:
raise ValueError(
'Could not cache function that has no args/kwargs!\n'
f'Remove cache.decorator() from function {func} or add args/kwargs.',
)
# Build cache key and meta object for specific function call
func_cache_key, func_meta = self._build_key_meta(
func=func,
func_args=func_args,
func_kwargs=func_kwargs,
ignored_kwargs=ignored_kwargs,
ttl=ttl,
fixed_hash=fixed_hash,
)
# Save to function new attribute to detect if result was retrieved from cache or not
setattr(func, CACHE_HIT_ATTR, None)
if 'r' in self.__mode__:
found, cached_result = self._func_cache_get(
func_cache_key=func_cache_key,
func_meta=func_meta,
external_packages=external_packages,
)
if found:
self._log(
'cache hit',
func=func,
level='info',
color='green',
logger=decorator_logger,
)
setattr(func, CACHE_HIT_ATTR, True)
return cached_result
else:
setattr(func, CACHE_HIT_ATTR, False)
self._log(
'cache miss',
func=func,
level='warning',
logger=decorator_logger,
)
func_result = func(*func_args, **func_kwargs)
if 'w' in self.__mode__:
self._func_cache_set(
func_cache_key=func_cache_key,
func_meta=func_meta,
func_result=func_result,
)
size = format_bytes_to_str(func_meta.object_size)
self._log(
f'cache write - {size}',
func=func,
level='info',
logger=decorator_logger,
)
return func_result
return wrapper
return inner
def _func_cache_set(
self,
func_cache_key: CacheKey,
func_meta: ObjCacheMeta,
func_result: Any,
) -> Tuple[CacheKey, ObjCacheMeta]:
"""High-level function to set cache for function result.
:param func: function (callable) that returned result
:param func_result: result that will be cached
:param func_ttl: function TTL in seconds
:param func_args: function arguments
:param func_kwargs: function keyword arguments
:param fixed_hash: fixed function code hash
:return: generated cache key and cache meta (with object size)
"""
cache_key = func_cache_key.add_random_part()
self._set(key=cache_key, value=func_result, meta=func_meta)
return cache_key, func_meta
def _func_cache_get(
self,
func_cache_key: CacheKey,
func_meta: ObjCacheMeta,
external_packages: Optional[List[str]] = None,
) -> Tuple[bool, Any]:
"""High-level function to get cache result for function.
:param func_cache_key: cache key of function
:param func_meta: meta information about called function
:param external_packages: list of specific packages to count when getting cached result.
:return: tuple with hit or not (boolean), and cached value (if cache hit)
"""
child_keys = self._find_child_keys(func_cache_key)
for child_key in child_keys:
child_meta: Optional[ObjCacheMeta] = self._get_cache_meta(child_key)
if child_meta is None:
continue
if child_meta.is_similar_to(
to=func_meta,
check_python_version=self.check_python_version,
check_pickle_version=self.check_pickle_version,
check_self_version=self.check_self_version,
check_func_source=self.check_func_source,
check_external_packages=self._collect_external_packages(external_packages),
check_version_level=self.check_version_level,
):
return self._get(key=child_key) # type: ignore
return False, None
def _filter_func_kwargs(
self,
func_kwargs: dict,
ignored_kwargs: Optional[List[str]] = None,
) -> dict:
"""Get list of kwargs that will be used as cache key (and not ignored)."""
return {
k: func_kwargs[k] for k in func_kwargs if k not in ignored_kwargs
} if ignored_kwargs else func_kwargs
def _build_key_meta(
self,
func: Callable,
func_args: tuple,
func_kwargs: dict,
ignored_kwargs: Optional[List[str]] = None,
ttl: Optional[int] = None,
fixed_hash: Optional[str] = None,
) -> Tuple[CacheKey, ObjCacheMeta]:
"""Build cache key and meta object for specific function call."""
cachable_kwargs = self._filter_func_kwargs(func_kwargs, ignored_kwargs)
func_cache_key = CacheKey.from_func(
func=func,
func_args=func_args,
func_kwargs=cachable_kwargs,
)
func_meta = ObjCacheMeta.from_func(
func=func,
func_args=func_args,
func_kwargs=cachable_kwargs,
fixed_hash=fixed_hash,
func_ttl=ttl if ttl is not None else self.default_ttl,
base_meta=self._base_meta,
)
return func_cache_key, func_meta | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/components/decorators.py | decorators.py | import logging
from functools import wraps
from typing import (
Any,
Callable,
List,
Optional,
Tuple,
)
from ..resources import (
CacheKey,
ObjCacheMeta,
)
from ..utils import (
CACHE_HIT_ATTR,
format_bytes_to_str,
)
decorator_logger = logging.getLogger('scikit_cache.decorator')
class DecoratorMixin:
"""Mixin for ``CacheController`` class with cache decorator."""
def decorator(
self,
ignored_kwargs: Optional[List[str]] = None,
external_packages: Optional[List[str]] = None,
ttl: Optional[int] = None,
fixed_hash: Optional[str] = None,
) -> Callable:
"""Decorator for function caching.
Cache key will be automatically generated using:
- full function name (module path + name)
- passed args/kwargs
- current state of function code
By default, if cache is not enabled yet, decorated function will works as normal, without
cache. When cache is activated (using ``cache.enable()`` function) then decorated function
will check existing cache and save new cache too.
Additionally, decorated function can accepts extra ``use_cache`` keyword argument to
manually enable/disable caching on function call. For example: ``foo(..., use_cache=True)``
will enable cache just for this call of ``foo`` function using default parameters even if
``CacheController`` is not yet enabled.
On the over hand, ``use_cache=False`` allows to manually disable cache for specific func
call even if cache is enabled.
:param ignored_kwargs: list of kwarg names that will be ignored during creating cache key.
Use it for params that don't affect function usage (like ``logger`` param and so on).
:param external_packages: list of external packages names. It's a good practise to define
all external packages that were used inside specific function. It allows to check
less packages if ``check_external_packages`` option is enabled in ``CacheController``.
:param ttl: optional TTL for specific decorated function (in seconds). Set to -1 for
infinite TTL. Set to None to use ``cache.default_ttl`` value (by default).
:param fixed_hash: fixed func code hash. Use any string as hash to skip checking if
function were modified or not. We do not recommend manually set func code hash as it may
cause unexpected returning results of decorated function!
:return: decorated function
"""
def inner(func: Callable) -> Callable:
@wraps(func)
def wrapper(*func_args: Any, **func_kwargs: Any) -> Any:
# Force cache enabled if function called like "my_func(..., use_cache=True)"
force_use_cache = func_kwargs.pop('use_cache', None)
if force_use_cache is True:
if self.is_enabled_for_functions:
self._log(
'use_cache=True ignored, cache already enabled',
func=func,
level='warning',
logger=decorator_logger,
)
elif force_use_cache is False:
if self.is_enabled_for_functions:
self._log(
'use_cache=False enabled, cache is ignored',
func=func,
level='warning',
logger=decorator_logger,
)
# Disable cache by force -> return result immediatelly
return func(*func_args, **func_kwargs)
# Use cache only if it's enabled and func not in blacklist.
# Or if force_use_cache=True
use_cache = (
self.is_enabled_for_func(func)
if force_use_cache is None
else force_use_cache
)
if not use_cache:
# If cache is disabled (of func ignored), return func result immediately
return func(*func_args, **func_kwargs)
if not func_args and not func_kwargs:
raise ValueError(
'Could not cache function that has no args/kwargs!\n'
f'Remove cache.decorator() from function {func} or add args/kwargs.',
)
# Build cache key and meta object for specific function call
func_cache_key, func_meta = self._build_key_meta(
func=func,
func_args=func_args,
func_kwargs=func_kwargs,
ignored_kwargs=ignored_kwargs,
ttl=ttl,
fixed_hash=fixed_hash,
)
# Save to function new attribute to detect if result was retrieved from cache or not
setattr(func, CACHE_HIT_ATTR, None)
if 'r' in self.__mode__:
found, cached_result = self._func_cache_get(
func_cache_key=func_cache_key,
func_meta=func_meta,
external_packages=external_packages,
)
if found:
self._log(
'cache hit',
func=func,
level='info',
color='green',
logger=decorator_logger,
)
setattr(func, CACHE_HIT_ATTR, True)
return cached_result
else:
setattr(func, CACHE_HIT_ATTR, False)
self._log(
'cache miss',
func=func,
level='warning',
logger=decorator_logger,
)
func_result = func(*func_args, **func_kwargs)
if 'w' in self.__mode__:
self._func_cache_set(
func_cache_key=func_cache_key,
func_meta=func_meta,
func_result=func_result,
)
size = format_bytes_to_str(func_meta.object_size)
self._log(
f'cache write - {size}',
func=func,
level='info',
logger=decorator_logger,
)
return func_result
return wrapper
return inner
def _func_cache_set(
self,
func_cache_key: CacheKey,
func_meta: ObjCacheMeta,
func_result: Any,
) -> Tuple[CacheKey, ObjCacheMeta]:
"""High-level function to set cache for function result.
:param func: function (callable) that returned result
:param func_result: result that will be cached
:param func_ttl: function TTL in seconds
:param func_args: function arguments
:param func_kwargs: function keyword arguments
:param fixed_hash: fixed function code hash
:return: generated cache key and cache meta (with object size)
"""
cache_key = func_cache_key.add_random_part()
self._set(key=cache_key, value=func_result, meta=func_meta)
return cache_key, func_meta
def _func_cache_get(
self,
func_cache_key: CacheKey,
func_meta: ObjCacheMeta,
external_packages: Optional[List[str]] = None,
) -> Tuple[bool, Any]:
"""High-level function to get cache result for function.
:param func_cache_key: cache key of function
:param func_meta: meta information about called function
:param external_packages: list of specific packages to count when getting cached result.
:return: tuple with hit or not (boolean), and cached value (if cache hit)
"""
child_keys = self._find_child_keys(func_cache_key)
for child_key in child_keys:
child_meta: Optional[ObjCacheMeta] = self._get_cache_meta(child_key)
if child_meta is None:
continue
if child_meta.is_similar_to(
to=func_meta,
check_python_version=self.check_python_version,
check_pickle_version=self.check_pickle_version,
check_self_version=self.check_self_version,
check_func_source=self.check_func_source,
check_external_packages=self._collect_external_packages(external_packages),
check_version_level=self.check_version_level,
):
return self._get(key=child_key) # type: ignore
return False, None
def _filter_func_kwargs(
self,
func_kwargs: dict,
ignored_kwargs: Optional[List[str]] = None,
) -> dict:
"""Get list of kwargs that will be used as cache key (and not ignored)."""
return {
k: func_kwargs[k] for k in func_kwargs if k not in ignored_kwargs
} if ignored_kwargs else func_kwargs
def _build_key_meta(
self,
func: Callable,
func_args: tuple,
func_kwargs: dict,
ignored_kwargs: Optional[List[str]] = None,
ttl: Optional[int] = None,
fixed_hash: Optional[str] = None,
) -> Tuple[CacheKey, ObjCacheMeta]:
"""Build cache key and meta object for specific function call."""
cachable_kwargs = self._filter_func_kwargs(func_kwargs, ignored_kwargs)
func_cache_key = CacheKey.from_func(
func=func,
func_args=func_args,
func_kwargs=cachable_kwargs,
)
func_meta = ObjCacheMeta.from_func(
func=func,
func_args=func_args,
func_kwargs=cachable_kwargs,
fixed_hash=fixed_hash,
func_ttl=ttl if ttl is not None else self.default_ttl,
base_meta=self._base_meta,
)
return func_cache_key, func_meta | 0.859531 | 0.119331 |
import os
import pickle
import shutil
from pathlib import Path
from typing import (
Any,
List,
Optional,
Tuple,
)
import yaml # type: ignore
from scikit_cache.utils import set_file_access_time
from ..resources import (
CacheKey,
ObjCacheMeta,
)
PICKLE_FILE = 'pickle.obj'
META_FILE = 'meta.yml'
ARGS_KWARGS_FILE = 'args_kwargs.yml'
class FileCacheHandler:
"""File cache handler.
Sets/gets cached value to/from file directories.
"""
def __init__(self, cache_dir: str):
"""Initialize class instance."""
self.parent_cache_dir: Path = Path(cache_dir)
def set(self, key: CacheKey, value: Any, meta: ObjCacheMeta) -> None:
"""Set value to file cache by key."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
if not isinstance(meta, ObjCacheMeta):
raise TypeError(f'Meta must be ``ObjCacheMeta`` instance, not {type(meta)}')
cache_dir = self.parent_cache_dir / key.as_filepath
cache_dir.mkdir(exist_ok=True, parents=True)
pickle_file_path = cache_dir / PICKLE_FILE
with open(pickle_file_path, 'wb') as f:
pickle.dump(value, f)
meta.object_size = pickle_file_path.stat().st_size
with open(cache_dir / META_FILE, 'w') as f:
yaml.dump(meta.dict(), f, allow_unicode=True)
if meta.func_args_kwargs:
args_kwargs = cache_dir.parent / ARGS_KWARGS_FILE
if not args_kwargs.exists():
with open(args_kwargs, 'w') as f:
yaml.dump(meta.func_args_kwargs, f, allow_unicode=True)
def get(self, key: CacheKey) -> Tuple[bool, Any]:
"""Get value from cache by key."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
try:
# Manually set access time for cleanup mechanism
pickle_path = self.get_cache_pickle_path(key)
set_file_access_time(str(pickle_path), atime='now')
with open(pickle_path, 'rb') as f:
return True, pickle.load(f)
except FileNotFoundError:
return False, None
def delete(self, key: CacheKey) -> bool:
"""Delete cache value."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
cache_obj_dir = self.parent_cache_dir / key.as_filepath
try:
shutil.rmtree(cache_obj_dir)
return True
except FileNotFoundError:
return False
def get_cache_meta(self, key: CacheKey) -> Optional[ObjCacheMeta]:
"""Get cache meta by key."""
meta_path = self.get_cache_meta_path(key)
try:
with open(meta_path, 'r') as f:
return ObjCacheMeta(**yaml.safe_load(f))
except FileNotFoundError:
return None
def get_cache_meta_path(self, key: CacheKey) -> Path:
return self.parent_cache_dir / key.as_filepath / META_FILE
def get_cache_pickle_path(self, key: CacheKey) -> Path:
return self.parent_cache_dir / key.as_filepath / PICKLE_FILE
def find_child_keys(self, key: CacheKey) -> List[CacheKey]:
"""Get child keys for current key."""
child_keys = []
cache_dir = self.parent_cache_dir / key.as_filepath
for root, _, files in os.walk(cache_dir):
if META_FILE in files and root != str(cache_dir):
relative_path = Path(root).relative_to(self.parent_cache_dir)
child_keys.append(CacheKey.from_filepath(relative_path))
return child_keys
def wipe_cache_dir(self) -> None:
"""Drop all existing cache.
Removes cache directory completely.
"""
shutil.rmtree(self.parent_cache_dir, ignore_errors=True) | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/components/file_handler.py | file_handler.py | import os
import pickle
import shutil
from pathlib import Path
from typing import (
Any,
List,
Optional,
Tuple,
)
import yaml # type: ignore
from scikit_cache.utils import set_file_access_time
from ..resources import (
CacheKey,
ObjCacheMeta,
)
PICKLE_FILE = 'pickle.obj'
META_FILE = 'meta.yml'
ARGS_KWARGS_FILE = 'args_kwargs.yml'
class FileCacheHandler:
"""File cache handler.
Sets/gets cached value to/from file directories.
"""
def __init__(self, cache_dir: str):
"""Initialize class instance."""
self.parent_cache_dir: Path = Path(cache_dir)
def set(self, key: CacheKey, value: Any, meta: ObjCacheMeta) -> None:
"""Set value to file cache by key."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
if not isinstance(meta, ObjCacheMeta):
raise TypeError(f'Meta must be ``ObjCacheMeta`` instance, not {type(meta)}')
cache_dir = self.parent_cache_dir / key.as_filepath
cache_dir.mkdir(exist_ok=True, parents=True)
pickle_file_path = cache_dir / PICKLE_FILE
with open(pickle_file_path, 'wb') as f:
pickle.dump(value, f)
meta.object_size = pickle_file_path.stat().st_size
with open(cache_dir / META_FILE, 'w') as f:
yaml.dump(meta.dict(), f, allow_unicode=True)
if meta.func_args_kwargs:
args_kwargs = cache_dir.parent / ARGS_KWARGS_FILE
if not args_kwargs.exists():
with open(args_kwargs, 'w') as f:
yaml.dump(meta.func_args_kwargs, f, allow_unicode=True)
def get(self, key: CacheKey) -> Tuple[bool, Any]:
"""Get value from cache by key."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
try:
# Manually set access time for cleanup mechanism
pickle_path = self.get_cache_pickle_path(key)
set_file_access_time(str(pickle_path), atime='now')
with open(pickle_path, 'rb') as f:
return True, pickle.load(f)
except FileNotFoundError:
return False, None
def delete(self, key: CacheKey) -> bool:
"""Delete cache value."""
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
cache_obj_dir = self.parent_cache_dir / key.as_filepath
try:
shutil.rmtree(cache_obj_dir)
return True
except FileNotFoundError:
return False
def get_cache_meta(self, key: CacheKey) -> Optional[ObjCacheMeta]:
"""Get cache meta by key."""
meta_path = self.get_cache_meta_path(key)
try:
with open(meta_path, 'r') as f:
return ObjCacheMeta(**yaml.safe_load(f))
except FileNotFoundError:
return None
def get_cache_meta_path(self, key: CacheKey) -> Path:
return self.parent_cache_dir / key.as_filepath / META_FILE
def get_cache_pickle_path(self, key: CacheKey) -> Path:
return self.parent_cache_dir / key.as_filepath / PICKLE_FILE
def find_child_keys(self, key: CacheKey) -> List[CacheKey]:
"""Get child keys for current key."""
child_keys = []
cache_dir = self.parent_cache_dir / key.as_filepath
for root, _, files in os.walk(cache_dir):
if META_FILE in files and root != str(cache_dir):
relative_path = Path(root).relative_to(self.parent_cache_dir)
child_keys.append(CacheKey.from_filepath(relative_path))
return child_keys
def wipe_cache_dir(self) -> None:
"""Drop all existing cache.
Removes cache directory completely.
"""
shutil.rmtree(self.parent_cache_dir, ignore_errors=True) | 0.730386 | 0.078926 |
from typing import (
Dict,
List,
Optional,
Set,
)
from ..resources import (
CacheKey,
ObjCacheMeta,
)
class InternalCacheMixin:
"""Mixin for ``CacheController`` class with internal (private) methods only."""
def _get_cache_meta(self, key: CacheKey) -> Optional[ObjCacheMeta]:
"""Get cache meta by key.
Proxied method from cache_handler with internal caching.
"""
if key in self.__meta_cache__:
return self.__meta_cache__[key] # type: ignore
meta: Optional[ObjCacheMeta] = self._handler.get_cache_meta(key)
self.__meta_cache__[key] = meta # save to internal cache
return meta
def _get_all_cache_meta(self) -> Dict[CacheKey, ObjCacheMeta]:
"""Get all cache meta."""
return {k: v for k, v in self.__meta_cache__.items() if v is not None}
def _find_child_keys(self, key: CacheKey) -> List[CacheKey]:
"""Get child keys for current key.
Proxied method from cache_handler with internal caching.
"""
if key in self.__child_keys_cache__:
return self.__child_keys_cache__[key] # type: ignore
child_keys: List[CacheKey] = self._handler.find_child_keys(key)
self.__child_keys_cache__[key] = child_keys # save to internal cache
return child_keys
def _init_internal_cache(self, invalidate_first: bool = False) -> None:
"""Warm internal cache.
Method searches for all existing cache keys and meta files and add them to internal cache.
"""
if invalidate_first:
self._invalidate_internal_cache(clear_all=True)
root_key = CacheKey('__root__')
for child_key in self._handler.find_child_keys(root_key):
parent_key = []
for part in child_key.split('__'):
parent_key.append(part)
self._find_child_keys(key=CacheKey('__'.join(parent_key)))
self._get_cache_meta(child_key) # warm meta cache
def _invalidate_internal_cache(self, *keys: CacheKey, clear_all: bool = False) -> int:
"""Invalidate internal controller cache.
Method can invalidate only specific cache keys or drop all internal cache if parameter
``clear_all`` is True.
"""
if clear_all:
dropped_amount = len(self.__meta_cache__)
self.__meta_cache__.clear()
self.__child_keys_cache__.clear()
return dropped_amount
keys_to_drop: Set[CacheKey] = set()
for key in keys:
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
keys_to_drop.update(key.get_parent_keys())
for key in keys_to_drop:
self.__meta_cache__.pop(key, None)
self.__child_keys_cache__.pop(key, None)
return len(keys_to_drop) | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/components/internal_cache.py | internal_cache.py | from typing import (
Dict,
List,
Optional,
Set,
)
from ..resources import (
CacheKey,
ObjCacheMeta,
)
class InternalCacheMixin:
"""Mixin for ``CacheController`` class with internal (private) methods only."""
def _get_cache_meta(self, key: CacheKey) -> Optional[ObjCacheMeta]:
"""Get cache meta by key.
Proxied method from cache_handler with internal caching.
"""
if key in self.__meta_cache__:
return self.__meta_cache__[key] # type: ignore
meta: Optional[ObjCacheMeta] = self._handler.get_cache_meta(key)
self.__meta_cache__[key] = meta # save to internal cache
return meta
def _get_all_cache_meta(self) -> Dict[CacheKey, ObjCacheMeta]:
"""Get all cache meta."""
return {k: v for k, v in self.__meta_cache__.items() if v is not None}
def _find_child_keys(self, key: CacheKey) -> List[CacheKey]:
"""Get child keys for current key.
Proxied method from cache_handler with internal caching.
"""
if key in self.__child_keys_cache__:
return self.__child_keys_cache__[key] # type: ignore
child_keys: List[CacheKey] = self._handler.find_child_keys(key)
self.__child_keys_cache__[key] = child_keys # save to internal cache
return child_keys
def _init_internal_cache(self, invalidate_first: bool = False) -> None:
"""Warm internal cache.
Method searches for all existing cache keys and meta files and add them to internal cache.
"""
if invalidate_first:
self._invalidate_internal_cache(clear_all=True)
root_key = CacheKey('__root__')
for child_key in self._handler.find_child_keys(root_key):
parent_key = []
for part in child_key.split('__'):
parent_key.append(part)
self._find_child_keys(key=CacheKey('__'.join(parent_key)))
self._get_cache_meta(child_key) # warm meta cache
def _invalidate_internal_cache(self, *keys: CacheKey, clear_all: bool = False) -> int:
"""Invalidate internal controller cache.
Method can invalidate only specific cache keys or drop all internal cache if parameter
``clear_all`` is True.
"""
if clear_all:
dropped_amount = len(self.__meta_cache__)
self.__meta_cache__.clear()
self.__child_keys_cache__.clear()
return dropped_amount
keys_to_drop: Set[CacheKey] = set()
for key in keys:
if not isinstance(key, CacheKey):
raise TypeError(f'Key must be ``CacheKey`` instance, not {type(key)}')
keys_to_drop.update(key.get_parent_keys())
for key in keys_to_drop:
self.__meta_cache__.pop(key, None)
self.__child_keys_cache__.pop(key, None)
return len(keys_to_drop) | 0.880848 | 0.096791 |
import logging
from contextlib import contextmanager
from typing import Any
from ..resources import (
CacheKey,
ObjCacheMeta,
)
from ..utils import (
format_bytes_to_str,
hash_for_iterable,
)
estimator_logger = logging.getLogger('scikit_cache.estimator')
class EstimatorsMixin:
"""Mixin for cache controller to work with SKLearn estimators."""
@contextmanager
def make_cached_estimator(self, estimator: Any) -> Any:
"""Make estimator instance with cachable methods.
This is context manager, works like this:
with cache.make_cached_estimator(estimator) as cached_estimator:
cached_estimator.fit()
This function modifies existing estimator instance. Returned instance has same class but it
containes modified ``.fit()`` method.
This "cached estimator" can be used anywhere just as usual SKLearn estimator, but every
time ``.fit()`` method is called it will go to cache to check if estimator was already
calculated and cached.
To enable caching for cached estimator - you need to enable cache using ``cache.enable()``
function. By default, all cached estimator work as normal estimators.
"""
estimator_class = estimator.__class__
if not hasattr(estimator_class, '__original_fit__'):
estimator_class.__original_fit__ = estimator_class.fit
estimator_class.fit = self._estimator_fit_with_cache
estimator_class.__cache_ctrl__ = self
try:
yield estimator
finally:
if hasattr(estimator_class, '__original_fit__'):
estimator_class.fit = estimator_class.__original_fit__
delattr(estimator_class, '__original_fit__')
delattr(estimator_class, '__cache_ctrl__')
@staticmethod
def _estimator_fit_with_cache(instance: Any, *args: Any, **kwargs: Any) -> Any:
"""Function that implements ``BaseEstimator.fit()`` with cache mechanisms."""
from sklearn.utils.validation import check_is_fitted
cache = instance.__cache_ctrl__
# If caching is disabled then use original ``.fit()`` function
if not cache.is_enabled_for_estimators:
return instance.__original_fit__(*args, **kwargs)
# Get hash of all fit params including class and original parameters
estimator_hash = hash_for_iterable((
instance.__class__,
instance.get_params(),
args,
kwargs,
))
# Make cache key
raw_key = f'estimators__{estimator_hash}'
cache_key = CacheKey(raw_key)
# Check if cached result exists (if read mode enabled)
if 'r' in cache.__mode__:
found, cached_result = cache._get(cache_key)
if found:
instance.__dict__ = cached_result.__dict__
check_is_fitted(instance)
cache._log(
'estimator cache hit',
level='info',
logger=estimator_logger,
)
return instance
else:
cache._log(
'estimator cache miss',
level='warning',
logger=estimator_logger,
)
# Call original ``.fit()`` function
fit_result = instance.__original_fit__(*args, **kwargs)
check_is_fitted(fit_result)
# Save fit result to cache
if 'w' in cache.__mode__:
cache_meta = ObjCacheMeta(
raw_key=raw_key,
ttl=cache.default_ttl,
**cache._base_meta.dict(),
)
cache._set(cache_key, fit_result, cache_meta)
size = format_bytes_to_str(cache_meta.object_size)
cache._log(
f'estimator cache write - {size}',
level='info',
logger=estimator_logger,
)
return fit_result | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/components/estimators.py | estimators.py | import logging
from contextlib import contextmanager
from typing import Any
from ..resources import (
CacheKey,
ObjCacheMeta,
)
from ..utils import (
format_bytes_to_str,
hash_for_iterable,
)
estimator_logger = logging.getLogger('scikit_cache.estimator')
class EstimatorsMixin:
"""Mixin for cache controller to work with SKLearn estimators."""
@contextmanager
def make_cached_estimator(self, estimator: Any) -> Any:
"""Make estimator instance with cachable methods.
This is context manager, works like this:
with cache.make_cached_estimator(estimator) as cached_estimator:
cached_estimator.fit()
This function modifies existing estimator instance. Returned instance has same class but it
containes modified ``.fit()`` method.
This "cached estimator" can be used anywhere just as usual SKLearn estimator, but every
time ``.fit()`` method is called it will go to cache to check if estimator was already
calculated and cached.
To enable caching for cached estimator - you need to enable cache using ``cache.enable()``
function. By default, all cached estimator work as normal estimators.
"""
estimator_class = estimator.__class__
if not hasattr(estimator_class, '__original_fit__'):
estimator_class.__original_fit__ = estimator_class.fit
estimator_class.fit = self._estimator_fit_with_cache
estimator_class.__cache_ctrl__ = self
try:
yield estimator
finally:
if hasattr(estimator_class, '__original_fit__'):
estimator_class.fit = estimator_class.__original_fit__
delattr(estimator_class, '__original_fit__')
delattr(estimator_class, '__cache_ctrl__')
@staticmethod
def _estimator_fit_with_cache(instance: Any, *args: Any, **kwargs: Any) -> Any:
"""Function that implements ``BaseEstimator.fit()`` with cache mechanisms."""
from sklearn.utils.validation import check_is_fitted
cache = instance.__cache_ctrl__
# If caching is disabled then use original ``.fit()`` function
if not cache.is_enabled_for_estimators:
return instance.__original_fit__(*args, **kwargs)
# Get hash of all fit params including class and original parameters
estimator_hash = hash_for_iterable((
instance.__class__,
instance.get_params(),
args,
kwargs,
))
# Make cache key
raw_key = f'estimators__{estimator_hash}'
cache_key = CacheKey(raw_key)
# Check if cached result exists (if read mode enabled)
if 'r' in cache.__mode__:
found, cached_result = cache._get(cache_key)
if found:
instance.__dict__ = cached_result.__dict__
check_is_fitted(instance)
cache._log(
'estimator cache hit',
level='info',
logger=estimator_logger,
)
return instance
else:
cache._log(
'estimator cache miss',
level='warning',
logger=estimator_logger,
)
# Call original ``.fit()`` function
fit_result = instance.__original_fit__(*args, **kwargs)
check_is_fitted(fit_result)
# Save fit result to cache
if 'w' in cache.__mode__:
cache_meta = ObjCacheMeta(
raw_key=raw_key,
ttl=cache.default_ttl,
**cache._base_meta.dict(),
)
cache._set(cache_key, fit_result, cache_meta)
size = format_bytes_to_str(cache_meta.object_size)
cache._log(
f'estimator cache write - {size}',
level='info',
logger=estimator_logger,
)
return fit_result | 0.894588 | 0.156008 |
from datetime import (
datetime,
timedelta,
)
from functools import wraps
from typing import (
Any,
Callable,
List,
TypeVar,
Union,
cast,
)
from scikit_cache.utils import (
format_str_to_bytes,
get_file_access_time,
)
from ..resources import CacheKey
F = TypeVar('F', bound=Callable[..., Any])
class CleanUpMixin:
"""Mixin for ``CacheController`` class with cleanup private methods."""
def _get_clean_objects_by_expired_tl(self) -> List[CacheKey]:
"""Get list of cache keys with expired TTL."""
current_time = datetime.now()
expired_keys = []
for cache_key, meta_cache in self._get_all_cache_meta().items():
if meta_cache.ttl >= 0:
creation_time = datetime.fromisoformat(meta_cache.creation_time)
expire_time = creation_time + timedelta(seconds=meta_cache.ttl)
if current_time > expire_time:
expired_keys.append(cache_key)
return expired_keys
def _get_clean_objects_by_max_number(self, max_number: int) -> List[CacheKey]:
"""Get list of cache keys to delete that exceed max number of objects."""
meta_dict = self._get_all_cache_meta()
delete_number = len(meta_dict) - max_number
if delete_number < 1:
return []
return sorted(meta_dict, key=self._clean_sorting_func)[:delete_number]
def _get_clean_objects_by_max_size(self, max_size: Union[int, str]) -> List[CacheKey]:
"""Get list of cache keys to delete that exceed max cache dir size."""
if not isinstance(max_size, int):
max_size = format_str_to_bytes(max_size)
total_size, result_keys = 0, []
meta_dict = self._get_all_cache_meta()
for cache_key in sorted(meta_dict, key=self._clean_sorting_func, reverse=True):
total_size += meta_dict[cache_key].object_size
if total_size > max_size:
result_keys.append(cache_key)
return result_keys
@property
def _clean_sorting_func(self) -> Callable:
"""Get function that will be used for cache keys sorting.
Result function depends on ``autoclean_mode`` parameter:
- if it's "last_used", then result function will return file access time
- if it's "last_created", then result function will return file creation time
"""
if self.autoclean_mode == 'last_used':
return self._get_access_time_by_cache_key
elif self.autoclean_mode == 'last_created':
return self._get_creation_time_by_cache_key
else:
raise ValueError(f'Unknown ``autoclean_mode`` value: {self.autoclean_mode}')
def _get_access_time_by_cache_key(self, cache_key: CacheKey) -> float:
"""Get file access time using cache key."""
pickle_path = self._handler.get_cache_pickle_path(cache_key)
return get_file_access_time(filename=str(pickle_path))
def _get_creation_time_by_cache_key(self, cache_key: CacheKey) -> float:
"""Get file creation time using cache key."""
return self.__meta_cache__[cache_key].creation_timestamp # type: ignore
def cache_autoclean(func: F) -> F:
"""Decorator to automatically call ``self.clean`` after each function call.
Decorator can be applied only to ``CacheController`` class methods.
"""
if not func.__qualname__.startswith('CacheController.'):
raise ValueError(
'Decorator ``cache_autoclean`` can only be applied to ``CacheController`` methods',
)
@wraps(func)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
result = func(self, *args, **kwargs)
if self.autoclean:
self.clean()
return result
return cast(F, wrapper) | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/components/cleanup.py | cleanup.py | from datetime import (
datetime,
timedelta,
)
from functools import wraps
from typing import (
Any,
Callable,
List,
TypeVar,
Union,
cast,
)
from scikit_cache.utils import (
format_str_to_bytes,
get_file_access_time,
)
from ..resources import CacheKey
F = TypeVar('F', bound=Callable[..., Any])
class CleanUpMixin:
"""Mixin for ``CacheController`` class with cleanup private methods."""
def _get_clean_objects_by_expired_tl(self) -> List[CacheKey]:
"""Get list of cache keys with expired TTL."""
current_time = datetime.now()
expired_keys = []
for cache_key, meta_cache in self._get_all_cache_meta().items():
if meta_cache.ttl >= 0:
creation_time = datetime.fromisoformat(meta_cache.creation_time)
expire_time = creation_time + timedelta(seconds=meta_cache.ttl)
if current_time > expire_time:
expired_keys.append(cache_key)
return expired_keys
def _get_clean_objects_by_max_number(self, max_number: int) -> List[CacheKey]:
"""Get list of cache keys to delete that exceed max number of objects."""
meta_dict = self._get_all_cache_meta()
delete_number = len(meta_dict) - max_number
if delete_number < 1:
return []
return sorted(meta_dict, key=self._clean_sorting_func)[:delete_number]
def _get_clean_objects_by_max_size(self, max_size: Union[int, str]) -> List[CacheKey]:
"""Get list of cache keys to delete that exceed max cache dir size."""
if not isinstance(max_size, int):
max_size = format_str_to_bytes(max_size)
total_size, result_keys = 0, []
meta_dict = self._get_all_cache_meta()
for cache_key in sorted(meta_dict, key=self._clean_sorting_func, reverse=True):
total_size += meta_dict[cache_key].object_size
if total_size > max_size:
result_keys.append(cache_key)
return result_keys
@property
def _clean_sorting_func(self) -> Callable:
"""Get function that will be used for cache keys sorting.
Result function depends on ``autoclean_mode`` parameter:
- if it's "last_used", then result function will return file access time
- if it's "last_created", then result function will return file creation time
"""
if self.autoclean_mode == 'last_used':
return self._get_access_time_by_cache_key
elif self.autoclean_mode == 'last_created':
return self._get_creation_time_by_cache_key
else:
raise ValueError(f'Unknown ``autoclean_mode`` value: {self.autoclean_mode}')
def _get_access_time_by_cache_key(self, cache_key: CacheKey) -> float:
"""Get file access time using cache key."""
pickle_path = self._handler.get_cache_pickle_path(cache_key)
return get_file_access_time(filename=str(pickle_path))
def _get_creation_time_by_cache_key(self, cache_key: CacheKey) -> float:
"""Get file creation time using cache key."""
return self.__meta_cache__[cache_key].creation_timestamp # type: ignore
def cache_autoclean(func: F) -> F:
"""Decorator to automatically call ``self.clean`` after each function call.
Decorator can be applied only to ``CacheController`` class methods.
"""
if not func.__qualname__.startswith('CacheController.'):
raise ValueError(
'Decorator ``cache_autoclean`` can only be applied to ``CacheController`` methods',
)
@wraps(func)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
result = func(self, *args, **kwargs)
if self.autoclean:
self.clean()
return result
return cast(F, wrapper) | 0.894444 | 0.154153 |
import inspect
import logging
from types import CodeType
from typing import (
Any,
Callable,
)
import joblib
from .base import get_func_name
from .estimators import (
get_estimator_params,
is_estimator,
)
logger = logging.getLogger('scikit_cache.hashing')
def hash_for_simple_object(obj: Any) -> str:
"""Get hash for any object."""
return str(joblib.hash(obj))
def hash_for_none() -> str:
"""Get simple hash for None objects."""
return '0' * 32
def hash_for_code(code: CodeType) -> str:
"""Get hash for ``code`` object."""
if not isinstance(code, CodeType):
raise TypeError(f'Parameter ``code`` must be ``CodeType``, not {type(code)}')
try:
co_consts_hash = hash_for_iterable(code.co_consts)
except Exception as e:
logger.warning(f'Error on hashing code consts {code}\n{e!r}')
co_consts_hash = hash_for_simple_object(code.co_consts)
return hash_for_simple_object(co_consts_hash.encode() + code.co_code)
def hash_for_iterable(iterable: Any) -> str:
"""Get hash for iterable objects."""
return hash_for_simple_object(''.join(hash_by_type(value) for value in iterable))
def hash_for_dict(_dict: dict) -> str:
"""Get hash for dict objects."""
if not isinstance(_dict, dict):
raise TypeError(f'Parameter ``_dict`` must be dict, not {type(_dict)}')
return hash_for_simple_object({k: hash_by_type(v) for k, v in _dict.items()})
def hash_for_callable(func: Callable, include_name: bool = True) -> str:
"""Hash for callable objects."""
if not callable(func):
raise TypeError(f'Parameter ``func`` must be callable, not {type(func)}')
try:
result = hash_for_code(func.__code__)
except Exception as e:
logger.warning(f'Error on hashing func code {func}\n{e!r}')
result = hash_for_simple_object(func)
if include_name:
result = hash_for_simple_object(f'{result}.{get_func_name(func)}')
return result
def hash_for_class(_class: type) -> str:
"""Get hash for ``class`` object.
NOTE: It's poor hash implementation but works for some cases.
"""
try:
return hash_for_simple_object(inspect.getsource(_class))
except Exception as e:
logger.warning(f'Error on hashing class {_class}\n{e!r}')
return hash_for_simple_object(_class)
def hash_for_estimator(obj: Any) -> str:
"""Get hash for ``sklearn.BaseEstimator`` instance."""
estimator_class = obj.__class__
estimator_params = get_estimator_params(obj, all_params=True)
return hash_for_class(estimator_class) + hash_for_dict(estimator_params)
def hash_by_type(obj: Any) -> str:
"""Hash for any object depending on it's type."""
if obj is None:
return hash_for_none()
elif isinstance(obj, (list, tuple, set)):
return hash_for_iterable(obj)
elif isinstance(obj, dict):
return hash_for_dict(obj)
elif is_estimator(obj):
return hash_for_estimator(obj)
elif isinstance(obj, (str, int, float, bytes, frozenset)):
pass
elif inspect.isclass(obj):
return hash_for_class(obj)
elif callable(obj):
return hash_for_callable(obj)
elif isinstance(obj, CodeType):
return hash_for_code(obj)
return hash_for_simple_object(obj) | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/utils/hashing.py | hashing.py | import inspect
import logging
from types import CodeType
from typing import (
Any,
Callable,
)
import joblib
from .base import get_func_name
from .estimators import (
get_estimator_params,
is_estimator,
)
logger = logging.getLogger('scikit_cache.hashing')
def hash_for_simple_object(obj: Any) -> str:
"""Get hash for any object."""
return str(joblib.hash(obj))
def hash_for_none() -> str:
"""Get simple hash for None objects."""
return '0' * 32
def hash_for_code(code: CodeType) -> str:
"""Get hash for ``code`` object."""
if not isinstance(code, CodeType):
raise TypeError(f'Parameter ``code`` must be ``CodeType``, not {type(code)}')
try:
co_consts_hash = hash_for_iterable(code.co_consts)
except Exception as e:
logger.warning(f'Error on hashing code consts {code}\n{e!r}')
co_consts_hash = hash_for_simple_object(code.co_consts)
return hash_for_simple_object(co_consts_hash.encode() + code.co_code)
def hash_for_iterable(iterable: Any) -> str:
"""Get hash for iterable objects."""
return hash_for_simple_object(''.join(hash_by_type(value) for value in iterable))
def hash_for_dict(_dict: dict) -> str:
"""Get hash for dict objects."""
if not isinstance(_dict, dict):
raise TypeError(f'Parameter ``_dict`` must be dict, not {type(_dict)}')
return hash_for_simple_object({k: hash_by_type(v) for k, v in _dict.items()})
def hash_for_callable(func: Callable, include_name: bool = True) -> str:
"""Hash for callable objects."""
if not callable(func):
raise TypeError(f'Parameter ``func`` must be callable, not {type(func)}')
try:
result = hash_for_code(func.__code__)
except Exception as e:
logger.warning(f'Error on hashing func code {func}\n{e!r}')
result = hash_for_simple_object(func)
if include_name:
result = hash_for_simple_object(f'{result}.{get_func_name(func)}')
return result
def hash_for_class(_class: type) -> str:
"""Get hash for ``class`` object.
NOTE: It's poor hash implementation but works for some cases.
"""
try:
return hash_for_simple_object(inspect.getsource(_class))
except Exception as e:
logger.warning(f'Error on hashing class {_class}\n{e!r}')
return hash_for_simple_object(_class)
def hash_for_estimator(obj: Any) -> str:
"""Get hash for ``sklearn.BaseEstimator`` instance."""
estimator_class = obj.__class__
estimator_params = get_estimator_params(obj, all_params=True)
return hash_for_class(estimator_class) + hash_for_dict(estimator_params)
def hash_by_type(obj: Any) -> str:
"""Hash for any object depending on it's type."""
if obj is None:
return hash_for_none()
elif isinstance(obj, (list, tuple, set)):
return hash_for_iterable(obj)
elif isinstance(obj, dict):
return hash_for_dict(obj)
elif is_estimator(obj):
return hash_for_estimator(obj)
elif isinstance(obj, (str, int, float, bytes, frozenset)):
pass
elif inspect.isclass(obj):
return hash_for_class(obj)
elif callable(obj):
return hash_for_callable(obj)
elif isinstance(obj, CodeType):
return hash_for_code(obj)
return hash_for_simple_object(obj) | 0.833799 | 0.177775 |
import os
import pwd
import random
from datetime import datetime
from typing import (
Any,
Callable,
Tuple,
)
SIZE_UNITS = (' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')
CACHE_HIT_ATTR = '__scikit_cache_hit__'
def is_scikit_cache_hit(func: Callable) -> Any:
"""Get saved attribute if where is cache hit or not.
This CACHE_HIT_ATTR automatically added in ``DecoratorMixin`` to function dictionary and
allows to detect cache hit/miss after function call.
"""
if hasattr(func, '__wrapped__'):
func = func.__wrapped__ # Extract original func from decorated
return getattr(func, CACHE_HIT_ATTR, None)
def get_datetime_str() -> str:
"""Get datetime as string is ISO format."""
return datetime.now().isoformat()
def get_random_hex(bits: int = 128) -> str:
"""Get random HEX string."""
return '{0:x}'.format(random.getrandbits(bits))
def get_func_name(func: Callable) -> str:
"""Get full function name (with module path)."""
try:
return f'{func.__module__}.{func.__name__}'.replace('__', '')
except AttributeError:
raise ValueError(f'``get_func_name`` accepts callable objects, not {type(func)}')
def yaml_repr(value: Any) -> Any:
"""Represent value for YAML format."""
# Pandas ``DataFrame`` or ``Series``
if hasattr(value, 'shape'):
return f'<{value.__class__.__name__}: {value.shape}>'
# List/tuple
if isinstance(value, (list, tuple)):
return [yaml_repr(v) for v in value]
# Dict
if isinstance(value, dict):
return {yaml_repr(k): yaml_repr(v) for k, v in value.items()}
# YAML supported native types
if isinstance(value, (int, float, bool, str)) or value is None:
return value
# All other objects
return repr(value)
def get_username() -> str:
"""Get current username."""
try:
return pwd.getpwuid(os.getuid())[0]
except Exception:
return os.path.expanduser('~').split('/')[-1]
def format_bytes_to_str(
size: int,
units: Tuple[str, ...] = SIZE_UNITS,
) -> str:
"""Get human readable string representation of size in bytes."""
return str(size) + units[0] if size < 1024 else format_bytes_to_str(size >> 10, units[1:])
def format_str_to_bytes(size: str) -> int:
"""Convert human readable strinb representaion of file size to integer.
For example:
>>> format_str_to_bytes(size='1 MB')
1048576
"""
size_multiplier = 1
for i, unit in enumerate(SIZE_UNITS):
if unit in size:
size_part, _ = size.split(unit)
size_multiplier = pow(1024, i) or 1
return int(float(size_part.strip()) * size_multiplier)
raise ValueError(f'No units found in string. Available units: {SIZE_UNITS}') | scikit-cache | /scikit-cache-0.1.2.tar.gz/scikit-cache-0.1.2/scikit_cache/utils/base.py | base.py | import os
import pwd
import random
from datetime import datetime
from typing import (
Any,
Callable,
Tuple,
)
SIZE_UNITS = (' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')
CACHE_HIT_ATTR = '__scikit_cache_hit__'
def is_scikit_cache_hit(func: Callable) -> Any:
"""Get saved attribute if where is cache hit or not.
This CACHE_HIT_ATTR automatically added in ``DecoratorMixin`` to function dictionary and
allows to detect cache hit/miss after function call.
"""
if hasattr(func, '__wrapped__'):
func = func.__wrapped__ # Extract original func from decorated
return getattr(func, CACHE_HIT_ATTR, None)
def get_datetime_str() -> str:
"""Get datetime as string is ISO format."""
return datetime.now().isoformat()
def get_random_hex(bits: int = 128) -> str:
"""Get random HEX string."""
return '{0:x}'.format(random.getrandbits(bits))
def get_func_name(func: Callable) -> str:
"""Get full function name (with module path)."""
try:
return f'{func.__module__}.{func.__name__}'.replace('__', '')
except AttributeError:
raise ValueError(f'``get_func_name`` accepts callable objects, not {type(func)}')
def yaml_repr(value: Any) -> Any:
"""Represent value for YAML format."""
# Pandas ``DataFrame`` or ``Series``
if hasattr(value, 'shape'):
return f'<{value.__class__.__name__}: {value.shape}>'
# List/tuple
if isinstance(value, (list, tuple)):
return [yaml_repr(v) for v in value]
# Dict
if isinstance(value, dict):
return {yaml_repr(k): yaml_repr(v) for k, v in value.items()}
# YAML supported native types
if isinstance(value, (int, float, bool, str)) or value is None:
return value
# All other objects
return repr(value)
def get_username() -> str:
"""Get current username."""
try:
return pwd.getpwuid(os.getuid())[0]
except Exception:
return os.path.expanduser('~').split('/')[-1]
def format_bytes_to_str(
size: int,
units: Tuple[str, ...] = SIZE_UNITS,
) -> str:
"""Get human readable string representation of size in bytes."""
return str(size) + units[0] if size < 1024 else format_bytes_to_str(size >> 10, units[1:])
def format_str_to_bytes(size: str) -> int:
"""Convert human readable strinb representaion of file size to integer.
For example:
>>> format_str_to_bytes(size='1 MB')
1048576
"""
size_multiplier = 1
for i, unit in enumerate(SIZE_UNITS):
if unit in size:
size_part, _ = size.split(unit)
size_multiplier = pow(1024, i) or 1
return int(float(size_part.strip()) * size_multiplier)
raise ValueError(f'No units found in string. Available units: {SIZE_UNITS}') | 0.782704 | 0.25118 |
__author__ = 'du'
from abc import ABCMeta, abstractmethod
from six import add_metaclass
import numpy as np
from chainer import Chain, Variable, optimizers
from chainer import functions as F
from sklearn import base
@add_metaclass(ABCMeta)
class BaseChainerEstimator(base.BaseEstimator):
def __init__(self, optimizer=optimizers.SGD(), batch_size=10, n_iter=100, report=10,
network_params=None):
if network_params is None:
network_params = dict()
self.network_params = network_params
self.network = self._setup_network(**network_params)
self.optimizer = optimizer
self.optimizer.setup(self.network)
self.n_iter = n_iter
self.report = report
self.batch_size = batch_size
@abstractmethod
def _setup_network(self, **params):
return Chain(l1=F.Linear(1, 1))
@abstractmethod
def _forward(self, x, train=False):
y = self.network.l1(x)
return y
@abstractmethod
def _loss_func(self, y, t):
return F.mean_squared_error(y, t)
def fit(self, x_data, y_data=None):
score = 1e100
if y_data is None:
y_data = x_data
all_x = Variable(x_data)
all_y = Variable(y_data)
data_size = len(x_data)
for epoch in range(self.n_iter):
indexes = np.random.permutation(data_size)
for i in range(0, data_size, self.batch_size):
xx = Variable(x_data[indexes[i: i + self.batch_size]])
yy = Variable(y_data[indexes[i: i + self.batch_size]])
self.optimizer.zero_grads()
loss = self._loss_func(self._forward(xx, train=True), yy)
loss.backward()
self.optimizer.update()
if self.report > 0 and epoch % self.report == 0:
loss = self._loss_func(self._forward(all_x), all_y)
d_score = score - loss.data
score = loss.data
print(epoch, loss.data, d_score)
return self
class ChainerRegresser(BaseChainerEstimator, base.RegressorMixin):
def predict(self, x_data):
x = Variable(x_data)
y = self._forward(x, train=False)
return y.data
class ChainerClassifier(BaseChainerEstimator, base.ClassifierMixin):
def predict(self, x_data):
x = Variable(x_data)
y = self._forward(x, train=False)
return F.softmax(y).data.argmax(1)
class ChainerTransformer(BaseChainerEstimator, base.TransformerMixin):
@abstractmethod
def _transform(self, x, train=False):
raise NotImplementedError
def transform(self, x_data):
x = Variable(x_data)
z = self._transform(x)
return z.data
def fit(self, x_data, y_data=None):
return BaseChainerEstimator.fit(self, x_data, None) | scikit-chainer | /scikit-chainer-0.4.2.tar.gz/scikit-chainer-0.4.2/skchainer/__init__.py | __init__.py | __author__ = 'du'
from abc import ABCMeta, abstractmethod
from six import add_metaclass
import numpy as np
from chainer import Chain, Variable, optimizers
from chainer import functions as F
from sklearn import base
@add_metaclass(ABCMeta)
class BaseChainerEstimator(base.BaseEstimator):
def __init__(self, optimizer=optimizers.SGD(), batch_size=10, n_iter=100, report=10,
network_params=None):
if network_params is None:
network_params = dict()
self.network_params = network_params
self.network = self._setup_network(**network_params)
self.optimizer = optimizer
self.optimizer.setup(self.network)
self.n_iter = n_iter
self.report = report
self.batch_size = batch_size
@abstractmethod
def _setup_network(self, **params):
return Chain(l1=F.Linear(1, 1))
@abstractmethod
def _forward(self, x, train=False):
y = self.network.l1(x)
return y
@abstractmethod
def _loss_func(self, y, t):
return F.mean_squared_error(y, t)
def fit(self, x_data, y_data=None):
score = 1e100
if y_data is None:
y_data = x_data
all_x = Variable(x_data)
all_y = Variable(y_data)
data_size = len(x_data)
for epoch in range(self.n_iter):
indexes = np.random.permutation(data_size)
for i in range(0, data_size, self.batch_size):
xx = Variable(x_data[indexes[i: i + self.batch_size]])
yy = Variable(y_data[indexes[i: i + self.batch_size]])
self.optimizer.zero_grads()
loss = self._loss_func(self._forward(xx, train=True), yy)
loss.backward()
self.optimizer.update()
if self.report > 0 and epoch % self.report == 0:
loss = self._loss_func(self._forward(all_x), all_y)
d_score = score - loss.data
score = loss.data
print(epoch, loss.data, d_score)
return self
class ChainerRegresser(BaseChainerEstimator, base.RegressorMixin):
def predict(self, x_data):
x = Variable(x_data)
y = self._forward(x, train=False)
return y.data
class ChainerClassifier(BaseChainerEstimator, base.ClassifierMixin):
def predict(self, x_data):
x = Variable(x_data)
y = self._forward(x, train=False)
return F.softmax(y).data.argmax(1)
class ChainerTransformer(BaseChainerEstimator, base.TransformerMixin):
@abstractmethod
def _transform(self, x, train=False):
raise NotImplementedError
def transform(self, x_data):
x = Variable(x_data)
z = self._transform(x)
return z.data
def fit(self, x_data, y_data=None):
return BaseChainerEstimator.fit(self, x_data, None) | 0.885155 | 0.249584 |
import subprocess
from abc import ABCMeta, abstractmethod
from tempfile import NamedTemporaryFile
import time
import logging
import pandas as pd
from .utils import NamedProgressBar
from . import core
from .utils import iterable_to_series, optional_second_method, nanarray, squeeze
from . import io
LOGGER = logging.getLogger(__name__)
class BaseTransformer(object):
""" Transformer Base Class.
Specific Base Transformer classes inherit from this class and implement `transform` and `axis_names`.
"""
__metaclass__ = ABCMeta
# To share some functionality betweeen Transformer and AtomTransformer
def __init__(self, verbose=True):
self.verbose = verbose
def optional_bar(self, **kwargs):
if self.verbose:
bar = NamedProgressBar(name=self.__class__.__name__, **kwargs)
else:
def bar(x):
return x
return bar
@property
@abstractmethod
def axes_names(self):
""" tuple: The names of the axes. """
pass
@abstractmethod
def transform(self, mols):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
pass
class Transformer(BaseTransformer):
""" Molecular based Transformer Base class.
Concrete Transformers inherit from this class and must implement `_transform_mol` and `_columns`.
See Also:
AtomTransformer."""
@property
@abstractmethod
def columns(self):
""" pd.Index: The column index to use. """
return pd.Index(None)
@abstractmethod
def _transform_mol(self, mol):
""" Transform a molecule. """
pass
def _transform_series(self, ser):
""" Transform a series of molecules to an np.ndarray. """
bar = self.optional_bar()
return [self._transform_mol(mol) for mol in bar(ser)]
@optional_second_method
def transform(self, mols, **kwargs):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
if isinstance(mols, core.Mol):
# just squeeze works on series
return pd.Series(self._transform_mol(mols),
index=self.columns,
name=self.__class__.__name__).squeeze()
elif not isinstance(mols, pd.Series):
mols = iterable_to_series(mols)
res = pd.DataFrame(self._transform_series(mols),
index=mols.index,
columns=self.columns)
return squeeze(res, axis=1)
@property
def axes_names(self):
""" tuple: The names of the axes. """
return 'batch', self.columns.name
class BatchTransformer(BaseTransformer):
""" Transformer Mixin in which transforms on multiple molecules save overhead.
Implement `_transform_series` with the transformation rather than `_transform_mol`. Must occur before
`Transformer` or `AtomTransformer` in method resolution order.
See Also:
Transformer, AtomTransformer.
"""
def _transform_mol(self, mol):
""" Transform a molecule. """
v = self.verbose
self.verbose = False
res = self.transform([mol]).iloc[0]
self.verbose = v
return res
@abstractmethod
def _transform_series(self, ser):
""" Transform a series of molecules to an np.ndarray. """
pass
class AtomTransformer(BaseTransformer):
""" Transformer that will produce a Panel.
Concrete classes inheriting from this should implement `_transform_atom`, `_transform_mol` and `minor_axis`.
See Also:
Transformer
"""
def __init__(self, max_atoms=100, **kwargs):
self.max_atoms = max_atoms
self.major_axis = pd.RangeIndex(self.max_atoms, name='atom_idx')
super(AtomTransformer, self).__init__(**kwargs)
@property
@abstractmethod
def minor_axis(self):
""" pd.Index: Minor axis of transformed values. """
return pd.Index(None) # expects a length
@property
def axes_names(self):
""" tuple: The names of the axes. """
return 'batch', 'atom_idx', self.minor_axis.name
@optional_second_method
def transform(self, mols):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
if isinstance(mols, core.Atom):
# just squeeze works on series
return pd.Series(self._transform_atom(mols),
index=self.minor_axis).squeeze()
elif isinstance(mols, core.Mol):
res = pd.DataFrame(self._transform_mol(mols),
index=self.major_axis[:len(mols.atoms)],
columns=self.minor_axis)
return squeeze(res, axis=1)
elif not isinstance(mols, pd.Series):
mols = iterable_to_series(mols)
res = pd.Panel(self._transform_series(mols),
items=mols.index,
major_axis=self.major_axis,
minor_axis=self.minor_axis)
return squeeze(res, axis=(1, 2))
@abstractmethod
def _transform_atom(self, atom):
""" Transform an atom to a 1D array of length `len(self.columns)`. """
pass
def _transform_mol(self, mol):
""" Transform a Mol to a 2D array. """
res = nanarray((len(mol.atoms), len(self.minor_axis)))
for i, atom in enumerate(mol.atoms):
res[i] = self._transform_atom(atom)
return res
def _transform_series(self, ser):
""" Transform a Series<Mol> to a 3D array. """
if self.verbose:
bar = NamedProgressBar(name=self.__class__.__name__)
else:
# use identity.
def bar(obj):
return obj
res = nanarray((len(ser), self.max_atoms, len(self.minor_axis)))
for i, mol in enumerate(bar(ser)):
res[i, :len(mol.atoms), :len(self.minor_axis)] = self._transform_mol(mol)
return res
class External(object):
""" Mixin for wrappers of external CLI tools.
Concrete classes must implement `validate_install`."""
__metaclass__ = ABCMeta
install_hint = "" # give an explanation of how to install external tool here.
def __init__(self, **kwargs):
assert self.validated, 'External tool not installed. ' + self.install_hint
super(External, self).__init__(**kwargs)
@property
def validated(self):
""" bool: whether the external tool is installed and active. """
if not hasattr(self.__class__, '_validated'):
self.__class__._validated = self.validate_install()
return self.__class__._validated
@staticmethod
@abstractmethod
def validate_install():
""" Determine if the external tool is available. """
pass
class CLIWrapper(External, BaseTransformer):
""" CLI wrapper.
Concrete classes inheriting from this must implement `_cli_args`, `monitor_progress`,
`_parse_outfile`, `_parse_errors`."""
def __init__(self, error_on_fail=False, warn_on_fail=True, **kwargs):
super(CLIWrapper, self).__init__(**kwargs)
self.error_on_fail = error_on_fail
self.warn_on_fail = warn_on_fail
def _transform_series(self, ser):
""" Transform a series. """
with NamedTemporaryFile(suffix='.sdf') as infile, NamedTemporaryFile() as outfile:
io.write_sdf(ser, infile.name)
args = self._cli_args(infile.name, outfile.name)
p = subprocess.Popen(args, stderr=subprocess.PIPE)
if self.verbose:
bar = self.optional_bar(max_value=len(ser))
while p.poll() is None:
time.sleep(0.5)
bar.update(self.monitor_progress(outfile.name))
bar.finish()
p.wait()
res = self._parse_outfile(outfile.name)
errs = p.stderr.read().decode()
errs = self._parse_errors(errs)
# set the index of results to that of the input, with the failed indices removed
if isinstance(res, (pd.Series, pd.DataFrame)):
res.index = ser.index.delete(errs)
elif isinstance(res, pd.Panel):
res.items = ser.index.delete(errs)
else:
raise ValueError('Parsed datatype ({}) not supported.'.format(type(res)))
# go through the errors and put them back in (transform doesn't lose instances)
if len(errs):
for err in errs:
err = ser.index[err]
if self.error_on_fail:
raise ValueError('Failed to transform {}.'.format(err))
if self.warn_on_fail:
LOGGER.warn('Failed to transform %s', err)
res.ix[err] = None
return res.loc[ser.index].values
@abstractmethod
def _cli_args(self, infile, outfile):
""" list: The cli arguments. """
return []
@abstractmethod
def monitor_progress(self, filename):
""" Report the progress. """
pass
@abstractmethod
def _parse_outfile(self, outfile):
""" Parse the file written and return a series. """
pass
@abstractmethod
def _parse_errors(self, errs):
""" Parse stderr and return error indices. """
pass
class Featurizer(object):
""" Base class for m -> data transforms, such as Fingerprinting etc.
Concrete subclasses should implement `name`, returning a string uniquely identifying the featurizer. """
__metaclass__ = ABCMeta | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/base.py | base.py | import subprocess
from abc import ABCMeta, abstractmethod
from tempfile import NamedTemporaryFile
import time
import logging
import pandas as pd
from .utils import NamedProgressBar
from . import core
from .utils import iterable_to_series, optional_second_method, nanarray, squeeze
from . import io
LOGGER = logging.getLogger(__name__)
class BaseTransformer(object):
""" Transformer Base Class.
Specific Base Transformer classes inherit from this class and implement `transform` and `axis_names`.
"""
__metaclass__ = ABCMeta
# To share some functionality betweeen Transformer and AtomTransformer
def __init__(self, verbose=True):
self.verbose = verbose
def optional_bar(self, **kwargs):
if self.verbose:
bar = NamedProgressBar(name=self.__class__.__name__, **kwargs)
else:
def bar(x):
return x
return bar
@property
@abstractmethod
def axes_names(self):
""" tuple: The names of the axes. """
pass
@abstractmethod
def transform(self, mols):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
pass
class Transformer(BaseTransformer):
""" Molecular based Transformer Base class.
Concrete Transformers inherit from this class and must implement `_transform_mol` and `_columns`.
See Also:
AtomTransformer."""
@property
@abstractmethod
def columns(self):
""" pd.Index: The column index to use. """
return pd.Index(None)
@abstractmethod
def _transform_mol(self, mol):
""" Transform a molecule. """
pass
def _transform_series(self, ser):
""" Transform a series of molecules to an np.ndarray. """
bar = self.optional_bar()
return [self._transform_mol(mol) for mol in bar(ser)]
@optional_second_method
def transform(self, mols, **kwargs):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
if isinstance(mols, core.Mol):
# just squeeze works on series
return pd.Series(self._transform_mol(mols),
index=self.columns,
name=self.__class__.__name__).squeeze()
elif not isinstance(mols, pd.Series):
mols = iterable_to_series(mols)
res = pd.DataFrame(self._transform_series(mols),
index=mols.index,
columns=self.columns)
return squeeze(res, axis=1)
@property
def axes_names(self):
""" tuple: The names of the axes. """
return 'batch', self.columns.name
class BatchTransformer(BaseTransformer):
""" Transformer Mixin in which transforms on multiple molecules save overhead.
Implement `_transform_series` with the transformation rather than `_transform_mol`. Must occur before
`Transformer` or `AtomTransformer` in method resolution order.
See Also:
Transformer, AtomTransformer.
"""
def _transform_mol(self, mol):
""" Transform a molecule. """
v = self.verbose
self.verbose = False
res = self.transform([mol]).iloc[0]
self.verbose = v
return res
@abstractmethod
def _transform_series(self, ser):
""" Transform a series of molecules to an np.ndarray. """
pass
class AtomTransformer(BaseTransformer):
""" Transformer that will produce a Panel.
Concrete classes inheriting from this should implement `_transform_atom`, `_transform_mol` and `minor_axis`.
See Also:
Transformer
"""
def __init__(self, max_atoms=100, **kwargs):
self.max_atoms = max_atoms
self.major_axis = pd.RangeIndex(self.max_atoms, name='atom_idx')
super(AtomTransformer, self).__init__(**kwargs)
@property
@abstractmethod
def minor_axis(self):
""" pd.Index: Minor axis of transformed values. """
return pd.Index(None) # expects a length
@property
def axes_names(self):
""" tuple: The names of the axes. """
return 'batch', 'atom_idx', self.minor_axis.name
@optional_second_method
def transform(self, mols):
""" Transform objects according to the objects transform protocol.
Args:
mols (skchem.Mol or pd.Series or iterable):
The mol objects to transform.
Returns:
pd.Series or pd.DataFrame
"""
if isinstance(mols, core.Atom):
# just squeeze works on series
return pd.Series(self._transform_atom(mols),
index=self.minor_axis).squeeze()
elif isinstance(mols, core.Mol):
res = pd.DataFrame(self._transform_mol(mols),
index=self.major_axis[:len(mols.atoms)],
columns=self.minor_axis)
return squeeze(res, axis=1)
elif not isinstance(mols, pd.Series):
mols = iterable_to_series(mols)
res = pd.Panel(self._transform_series(mols),
items=mols.index,
major_axis=self.major_axis,
minor_axis=self.minor_axis)
return squeeze(res, axis=(1, 2))
@abstractmethod
def _transform_atom(self, atom):
""" Transform an atom to a 1D array of length `len(self.columns)`. """
pass
def _transform_mol(self, mol):
""" Transform a Mol to a 2D array. """
res = nanarray((len(mol.atoms), len(self.minor_axis)))
for i, atom in enumerate(mol.atoms):
res[i] = self._transform_atom(atom)
return res
def _transform_series(self, ser):
""" Transform a Series<Mol> to a 3D array. """
if self.verbose:
bar = NamedProgressBar(name=self.__class__.__name__)
else:
# use identity.
def bar(obj):
return obj
res = nanarray((len(ser), self.max_atoms, len(self.minor_axis)))
for i, mol in enumerate(bar(ser)):
res[i, :len(mol.atoms), :len(self.minor_axis)] = self._transform_mol(mol)
return res
class External(object):
""" Mixin for wrappers of external CLI tools.
Concrete classes must implement `validate_install`."""
__metaclass__ = ABCMeta
install_hint = "" # give an explanation of how to install external tool here.
def __init__(self, **kwargs):
assert self.validated, 'External tool not installed. ' + self.install_hint
super(External, self).__init__(**kwargs)
@property
def validated(self):
""" bool: whether the external tool is installed and active. """
if not hasattr(self.__class__, '_validated'):
self.__class__._validated = self.validate_install()
return self.__class__._validated
@staticmethod
@abstractmethod
def validate_install():
""" Determine if the external tool is available. """
pass
class CLIWrapper(External, BaseTransformer):
""" CLI wrapper.
Concrete classes inheriting from this must implement `_cli_args`, `monitor_progress`,
`_parse_outfile`, `_parse_errors`."""
def __init__(self, error_on_fail=False, warn_on_fail=True, **kwargs):
super(CLIWrapper, self).__init__(**kwargs)
self.error_on_fail = error_on_fail
self.warn_on_fail = warn_on_fail
def _transform_series(self, ser):
""" Transform a series. """
with NamedTemporaryFile(suffix='.sdf') as infile, NamedTemporaryFile() as outfile:
io.write_sdf(ser, infile.name)
args = self._cli_args(infile.name, outfile.name)
p = subprocess.Popen(args, stderr=subprocess.PIPE)
if self.verbose:
bar = self.optional_bar(max_value=len(ser))
while p.poll() is None:
time.sleep(0.5)
bar.update(self.monitor_progress(outfile.name))
bar.finish()
p.wait()
res = self._parse_outfile(outfile.name)
errs = p.stderr.read().decode()
errs = self._parse_errors(errs)
# set the index of results to that of the input, with the failed indices removed
if isinstance(res, (pd.Series, pd.DataFrame)):
res.index = ser.index.delete(errs)
elif isinstance(res, pd.Panel):
res.items = ser.index.delete(errs)
else:
raise ValueError('Parsed datatype ({}) not supported.'.format(type(res)))
# go through the errors and put them back in (transform doesn't lose instances)
if len(errs):
for err in errs:
err = ser.index[err]
if self.error_on_fail:
raise ValueError('Failed to transform {}.'.format(err))
if self.warn_on_fail:
LOGGER.warn('Failed to transform %s', err)
res.ix[err] = None
return res.loc[ser.index].values
@abstractmethod
def _cli_args(self, infile, outfile):
""" list: The cli arguments. """
return []
@abstractmethod
def monitor_progress(self, filename):
""" Report the progress. """
pass
@abstractmethod
def _parse_outfile(self, outfile):
""" Parse the file written and return a series. """
pass
@abstractmethod
def _parse_errors(self, errs):
""" Parse stderr and return error indices. """
pass
class Featurizer(object):
""" Base class for m -> data transforms, such as Fingerprinting etc.
Concrete subclasses should implement `name`, returning a string uniquely identifying the featurizer. """
__metaclass__ = ABCMeta | 0.837387 | 0.4231 |
import os
import sys
import re
import subprocess
import logging
import warnings
import pandas as pd
from .. import io
from ..utils import sdf_count
from ..base import CLIWrapper, Transformer, BatchTransformer
from ..filters.base import TransformFilter
LOGGER = logging.getLogger(__name__)
if sys.version_info[0] == 2:
NoFoundError = OSError
subprocess.DEVNULL = open(os.devnull, 'w')
else:
NoFoundError = FileNotFoundError
class ChemAxonStandardizer(CLIWrapper, BatchTransformer, Transformer, TransformFilter):
""" ChemAxon Standardizer Wrapper.
Args:
config_path (str):
The path of the config_file. If None, use the default one.
Notes:
ChemAxon Standardizer must be installed and accessible as `standardize`
from the shell launching the program.
Warnings:
Must use a unique index (see #31).
Examples:
>>> import skchem
>>> std = skchem.standardizers.ChemAxonStandardizer() # doctest:+SKIP
>>> m = skchem.Mol.from_smiles('CC.CCC')
>>> print(std.transform(m)) # doctest:+SKIP
<Mol: CCC>
>>> data = [m, skchem.Mol.from_smiles('C=CO'), skchem.Mol.from_smiles('C[O-]')]
>>> std.transform(data) # doctest:+SKIP
0 <Mol: CCC>
1 <Mol: CC=O>
2 <Mol: CO>
Name: structure, dtype: object
>>> will_fail = mol = '''932-97-8
... RDKit 3D
...
... 9 9 0 0 0 0 0 0 0 0999 V2000
... -0.9646 0.0000 0.0032 C 0 0 0 0 0 0 0 0 0 0 0 0
... -0.2894 -1.2163 0.0020 C 0 0 0 0 0 0 0 0 0 0 0 0
... -0.2894 1.2163 0.0025 C 0 0 0 0 0 0 0 0 0 0 0 0
... -2.2146 0.0000 -0.0004 N 0 0 0 0 0 0 0 0 0 0 0 0
... 1.0710 -1.2610 0.0002 C 0 0 0 0 0 0 0 0 0 0 0 0
... 1.0710 1.2610 0.0007 C 0 0 0 0 0 0 0 0 0 0 0 0
... -3.3386 0.0000 -0.0037 N 0 0 0 0 0 0 0 0 0 0 0 0
... 1.8248 0.0000 -0.0005 C 0 0 0 0 0 0 0 0 0 0 0 0
... 3.0435 0.0000 -0.0026 O 0 0 0 0 0 0 0 0 0 0 0 0
... 1 2 1 0
... 1 3 1 0
... 1 4 2 3
... 2 5 2 0
... 3 6 2 0
... 4 7 2 0
... 5 8 1 0
... 8 9 2 0
... 6 8 1 0
... M CHG 2 4 1 7 -1
... M END
... '''
>>> will_fail = skchem.Mol.from_molblock(will_fail)
>>> std.transform(will_fail) # doctest:+SKIP
nan
>>> data = [will_fail] + data
>>> std.transform(data) # doctest:+SKIP
0 None
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
>>> std.transform_filter(data) # doctest:+SKIP
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
>>> std.keep_failed = True # doctest:+SKIP
>>> std.transform(data) # doctest:+SKIP
0 <Mol: [N-]=[N+]=C1C=CC(=O)C=C1>
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
"""
install_hint = """ Install ChemAxon from https://www.chemaxon.com. It requires a license,
which can be freely obtained for academics. """
DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'default_config.xml')
def __init__(self, config_path=None, keep_failed=False, **kwargs):
super(ChemAxonStandardizer, self).__init__(**kwargs)
if not config_path:
config_path = self.DEFAULT_CONFIG
self.config_path = config_path
self.keep_failed = keep_failed
@property
def columns(self):
return ['structure']
def _transform_series(self, ser):
# implement keep_failed functionality here
res = super(ChemAxonStandardizer, self)._transform_series(ser)
mask = pd.isnull(res)
for m_in, m_out in zip(ser[~mask], res[~mask]):
m_out.name = m_in.name
if self.keep_failed:
res[mask] = ser.iloc[mask]
return res
def _parse_outfile(self, outfile):
""" Reads output file and returns a list"""
return io.read_sdf(outfile, read_props=False)
def _parse_errors(self, errs):
""" Reads stderr and parses out failures as a list of indices. """
LOGGER.debug('stderr: %s', errs if errs else None)
errs = errs.strip().split('\n')
errs = [re.findall('No. ([0-9]+):', err) for err in errs]
return [int(err[0]) - 1 for err in errs if len(err)]
def _cli_args(self, infile, outfile):
""" The command line arguments to use for the subprocess. """
return ['standardize', infile,
'-c', self.config_path,
'-f', 'sdf',
'-o', outfile,
'--ignore-error']
@staticmethod
def validate_install():
""" Check if we can call cxcalc. """
try:
return subprocess.call(['standardize', '-h'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
except NoFoundError:
return False
def monitor_progress(self, filename):
return sdf_count(filename)
def filter(self, *args, **kwargs):
warnings.warn('Filter returns the unstandardized Mols. Did you mean to use `transform_filter`?')
super(ChemAxonStandardizer, self).filter(*args, **kwargs) | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/standardizers/chemaxon.py | chemaxon.py | import os
import sys
import re
import subprocess
import logging
import warnings
import pandas as pd
from .. import io
from ..utils import sdf_count
from ..base import CLIWrapper, Transformer, BatchTransformer
from ..filters.base import TransformFilter
LOGGER = logging.getLogger(__name__)
if sys.version_info[0] == 2:
NoFoundError = OSError
subprocess.DEVNULL = open(os.devnull, 'w')
else:
NoFoundError = FileNotFoundError
class ChemAxonStandardizer(CLIWrapper, BatchTransformer, Transformer, TransformFilter):
""" ChemAxon Standardizer Wrapper.
Args:
config_path (str):
The path of the config_file. If None, use the default one.
Notes:
ChemAxon Standardizer must be installed and accessible as `standardize`
from the shell launching the program.
Warnings:
Must use a unique index (see #31).
Examples:
>>> import skchem
>>> std = skchem.standardizers.ChemAxonStandardizer() # doctest:+SKIP
>>> m = skchem.Mol.from_smiles('CC.CCC')
>>> print(std.transform(m)) # doctest:+SKIP
<Mol: CCC>
>>> data = [m, skchem.Mol.from_smiles('C=CO'), skchem.Mol.from_smiles('C[O-]')]
>>> std.transform(data) # doctest:+SKIP
0 <Mol: CCC>
1 <Mol: CC=O>
2 <Mol: CO>
Name: structure, dtype: object
>>> will_fail = mol = '''932-97-8
... RDKit 3D
...
... 9 9 0 0 0 0 0 0 0 0999 V2000
... -0.9646 0.0000 0.0032 C 0 0 0 0 0 0 0 0 0 0 0 0
... -0.2894 -1.2163 0.0020 C 0 0 0 0 0 0 0 0 0 0 0 0
... -0.2894 1.2163 0.0025 C 0 0 0 0 0 0 0 0 0 0 0 0
... -2.2146 0.0000 -0.0004 N 0 0 0 0 0 0 0 0 0 0 0 0
... 1.0710 -1.2610 0.0002 C 0 0 0 0 0 0 0 0 0 0 0 0
... 1.0710 1.2610 0.0007 C 0 0 0 0 0 0 0 0 0 0 0 0
... -3.3386 0.0000 -0.0037 N 0 0 0 0 0 0 0 0 0 0 0 0
... 1.8248 0.0000 -0.0005 C 0 0 0 0 0 0 0 0 0 0 0 0
... 3.0435 0.0000 -0.0026 O 0 0 0 0 0 0 0 0 0 0 0 0
... 1 2 1 0
... 1 3 1 0
... 1 4 2 3
... 2 5 2 0
... 3 6 2 0
... 4 7 2 0
... 5 8 1 0
... 8 9 2 0
... 6 8 1 0
... M CHG 2 4 1 7 -1
... M END
... '''
>>> will_fail = skchem.Mol.from_molblock(will_fail)
>>> std.transform(will_fail) # doctest:+SKIP
nan
>>> data = [will_fail] + data
>>> std.transform(data) # doctest:+SKIP
0 None
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
>>> std.transform_filter(data) # doctest:+SKIP
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
>>> std.keep_failed = True # doctest:+SKIP
>>> std.transform(data) # doctest:+SKIP
0 <Mol: [N-]=[N+]=C1C=CC(=O)C=C1>
1 <Mol: CCC>
2 <Mol: CC=O>
3 <Mol: CO>
Name: structure, dtype: object
"""
install_hint = """ Install ChemAxon from https://www.chemaxon.com. It requires a license,
which can be freely obtained for academics. """
DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'default_config.xml')
def __init__(self, config_path=None, keep_failed=False, **kwargs):
super(ChemAxonStandardizer, self).__init__(**kwargs)
if not config_path:
config_path = self.DEFAULT_CONFIG
self.config_path = config_path
self.keep_failed = keep_failed
@property
def columns(self):
return ['structure']
def _transform_series(self, ser):
# implement keep_failed functionality here
res = super(ChemAxonStandardizer, self)._transform_series(ser)
mask = pd.isnull(res)
for m_in, m_out in zip(ser[~mask], res[~mask]):
m_out.name = m_in.name
if self.keep_failed:
res[mask] = ser.iloc[mask]
return res
def _parse_outfile(self, outfile):
""" Reads output file and returns a list"""
return io.read_sdf(outfile, read_props=False)
def _parse_errors(self, errs):
""" Reads stderr and parses out failures as a list of indices. """
LOGGER.debug('stderr: %s', errs if errs else None)
errs = errs.strip().split('\n')
errs = [re.findall('No. ([0-9]+):', err) for err in errs]
return [int(err[0]) - 1 for err in errs if len(err)]
def _cli_args(self, infile, outfile):
""" The command line arguments to use for the subprocess. """
return ['standardize', infile,
'-c', self.config_path,
'-f', 'sdf',
'-o', outfile,
'--ignore-error']
@staticmethod
def validate_install():
""" Check if we can call cxcalc. """
try:
return subprocess.call(['standardize', '-h'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
except NoFoundError:
return False
def monitor_progress(self, filename):
return sdf_count(filename)
def filter(self, *args, **kwargs):
warnings.warn('Filter returns the unstandardized Mols. Did you mean to use `transform_filter`?')
super(ChemAxonStandardizer, self).filter(*args, **kwargs) | 0.510008 | 0.139572 |
import warnings
from abc import ABCMeta, abstractmethod
import pandas as pd
from rdkit.Chem.rdDistGeom import EmbedMolecule
from .. import core
from ..utils import Suppressor
from ..base import Transformer
from ..filters.base import TransformFilter
class ForceField(Transformer, TransformFilter):
# TODO: Multiple conformer generation handling.
""" Base forcefield class.
Filter drops those that fail to be optimized.
"""
def __init__(self, embed=True, warn_on_fail=True, error_on_fail=False,
drop_failed=True, add_hs=True, **kwargs):
self.add_hs = add_hs
self.drop_failed = drop_failed
self.warn_on_fail = warn_on_fail
self.error_on_fail = error_on_fail
self.preembed = embed
super(ForceField, self).__init__(**kwargs)
@property
def columns(self):
return pd.Index(['structure'])
def embed(self, mol):
success = EmbedMolecule(mol)
if success == -1:
msg = 'Failed to Embed Molecule {}'.format(mol.name)
if self.error_on_fail:
raise RuntimeError(msg)
elif self.warn_on_fail:
warnings.warn(msg)
return None
if self.add_hs:
return mol.add_hs(add_coords=True)
else:
return mol
def _transform_mol(self, mol):
with Suppressor():
if self.preembed:
mol = self.embed(mol)
if mol is None: # embedding failed
return None
res = self._optimize(mol)
if res == -1:
msg = 'Failed to optimize molecule \'{}\' using {}'.format(mol.name, self.__class__)
if self.error_on_fail:
raise RuntimeError(msg)
elif self.warn_on_fail:
warnings.warn(msg)
return None
return mol
@abstractmethod
def _optimize(self, mol):
pass
class RoughEmbedding(ForceField):
def _optimize(self, mol):
return mol | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/forcefields/base.py | base.py | import warnings
from abc import ABCMeta, abstractmethod
import pandas as pd
from rdkit.Chem.rdDistGeom import EmbedMolecule
from .. import core
from ..utils import Suppressor
from ..base import Transformer
from ..filters.base import TransformFilter
class ForceField(Transformer, TransformFilter):
# TODO: Multiple conformer generation handling.
""" Base forcefield class.
Filter drops those that fail to be optimized.
"""
def __init__(self, embed=True, warn_on_fail=True, error_on_fail=False,
drop_failed=True, add_hs=True, **kwargs):
self.add_hs = add_hs
self.drop_failed = drop_failed
self.warn_on_fail = warn_on_fail
self.error_on_fail = error_on_fail
self.preembed = embed
super(ForceField, self).__init__(**kwargs)
@property
def columns(self):
return pd.Index(['structure'])
def embed(self, mol):
success = EmbedMolecule(mol)
if success == -1:
msg = 'Failed to Embed Molecule {}'.format(mol.name)
if self.error_on_fail:
raise RuntimeError(msg)
elif self.warn_on_fail:
warnings.warn(msg)
return None
if self.add_hs:
return mol.add_hs(add_coords=True)
else:
return mol
def _transform_mol(self, mol):
with Suppressor():
if self.preembed:
mol = self.embed(mol)
if mol is None: # embedding failed
return None
res = self._optimize(mol)
if res == -1:
msg = 'Failed to optimize molecule \'{}\' using {}'.format(mol.name, self.__class__)
if self.error_on_fail:
raise RuntimeError(msg)
elif self.warn_on_fail:
warnings.warn(msg)
return None
return mol
@abstractmethod
def _optimize(self, mol):
pass
class RoughEmbedding(ForceField):
def _optimize(self, mol):
return mol | 0.438304 | 0.175927 |
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import pandas as pd
from pandas.core.base import NoNewAttributesMixin, AccessorProperty
from pandas.core.series import Series
from pandas.core.index import Index
from .. import core
from .. import descriptors
DIM_RED = {
'tsne': TSNE,
'pca': PCA,
'mds': MDS
}
class StructureMethods(NoNewAttributesMixin):
""" Accessor for calling chemical methods on series of molecules. """
def __init__(self, data):
self._data = data
def add_hs(self, **kwargs):
return self._data.apply(lambda m: m.add_hs(**kwargs))
def remove_hs(self, **kwargs):
return self._data.apply(lambda m: m.remove_hs(**kwargs))
def visualize(self, fper='morgan', dim_red='tsne', dim_red_kw={}, **kwargs):
if isinstance(dim_red, str):
dim_red = DIM_RED.get(dim_red.lower())(**dim_red_kw)
fper = descriptors.get(fper)
fper.verbose = False
feats = fper.transform(self._data)
feats = feats.fillna(feats.mean())
twod = pd.DataFrame(dim_red.fit_transform(feats))
return twod.plot.scatter(x=0, y=1, **kwargs)
@property
def atoms(self):
return self._data.apply(lambda m: m.atoms)
def only_contains_mols(ser):
return ser.apply(lambda s: isinstance(s, core.Mol)).all()
class StructureAccessorMixin(object):
""" Mixin to bind chemical methods to objects. """
def _make_structure_accessor(self):
if isinstance(self, Index):
raise AttributeError('Can only use .mol accessor with molecules,'
'which use np.object_ in scikit-chem.')
if not only_contains_mols(self):
raise AttributeError('Can only use .mol accessor with '
'Series that only contain mols.')
return StructureMethods(self)
mol = AccessorProperty(StructureMethods, _make_structure_accessor)
Series.__bases__ += StructureAccessorMixin, | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/pandas_ext/structure_methods.py | structure_methods.py | from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import pandas as pd
from pandas.core.base import NoNewAttributesMixin, AccessorProperty
from pandas.core.series import Series
from pandas.core.index import Index
from .. import core
from .. import descriptors
DIM_RED = {
'tsne': TSNE,
'pca': PCA,
'mds': MDS
}
class StructureMethods(NoNewAttributesMixin):
""" Accessor for calling chemical methods on series of molecules. """
def __init__(self, data):
self._data = data
def add_hs(self, **kwargs):
return self._data.apply(lambda m: m.add_hs(**kwargs))
def remove_hs(self, **kwargs):
return self._data.apply(lambda m: m.remove_hs(**kwargs))
def visualize(self, fper='morgan', dim_red='tsne', dim_red_kw={}, **kwargs):
if isinstance(dim_red, str):
dim_red = DIM_RED.get(dim_red.lower())(**dim_red_kw)
fper = descriptors.get(fper)
fper.verbose = False
feats = fper.transform(self._data)
feats = feats.fillna(feats.mean())
twod = pd.DataFrame(dim_red.fit_transform(feats))
return twod.plot.scatter(x=0, y=1, **kwargs)
@property
def atoms(self):
return self._data.apply(lambda m: m.atoms)
def only_contains_mols(ser):
return ser.apply(lambda s: isinstance(s, core.Mol)).all()
class StructureAccessorMixin(object):
""" Mixin to bind chemical methods to objects. """
def _make_structure_accessor(self):
if isinstance(self, Index):
raise AttributeError('Can only use .mol accessor with molecules,'
'which use np.object_ in scikit-chem.')
if not only_contains_mols(self):
raise AttributeError('Can only use .mol accessor with '
'Series that only contain mols.')
return StructureMethods(self)
mol = AccessorProperty(StructureMethods, _make_structure_accessor)
Series.__bases__ += StructureAccessorMixin, | 0.853806 | 0.390331 |
import functools
from abc import ABCMeta
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import Lipinski
from rdkit.Chem import rdMolDescriptors, rdPartialCharges
from rdkit.Chem.rdchem import HybridizationType
from ..core import Mol
from ..resource import PERIODIC_TABLE, ORGANIC
from ..base import AtomTransformer, Featurizer
from ..utils import nanarray
def element(a):
""" Return the element """
return a.GetSymbol()
def is_element(a, symbol='C'):
""" Is the atom of a given element """
return element(a) == symbol
element_features = {'is_{}'.format(e): functools.partial(is_element, symbol=e) for e in ORGANIC}
def is_h_acceptor(a):
""" Is an H acceptor? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._HAcceptors(m)]
def is_h_donor(a):
""" Is an H donor? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._HDonors(m)]
def is_hetero(a):
""" Is a heteroatom? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._Heteroatoms(m)]
def atomic_number(a):
""" Atomic number of atom """
return a.GetAtomicNum()
def atomic_mass(a):
""" Atomic mass of atom """
return a.mass
def explicit_valence(a):
""" Explicit valence of atom """
return a.GetExplicitValence()
def implicit_valence(a):
""" Implicit valence of atom """
return a.GetImplicitValence()
def valence(a):
""" returns the valence of the atom """
return explicit_valence(a) + implicit_valence(a)
def formal_charge(a):
""" Formal charge of atom """
return a.GetFormalCharge()
def is_aromatic(a):
""" Boolean if atom is aromatic"""
return a.GetIsAromatic()
def num_implicit_hydrogens(a):
""" Number of implicit hydrogens """
return a.GetNumImplicitHs()
def num_explicit_hydrogens(a):
""" Number of explicit hydrodgens """
return a.GetNumExplicitHs()
def num_hydrogens(a):
""" Number of hydrogens """
return num_implicit_hydrogens(a) + num_explicit_hydrogens(a)
def is_in_ring(a):
""" Whether the atom is in a ring """
return a.IsInRing()
def crippen_log_p_contrib(a):
""" Hacky way of getting logP contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return Crippen._GetAtomContribs(m)[idx][0]
def crippen_molar_refractivity_contrib(a):
""" Hacky way of getting molar refractivity contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return Crippen._GetAtomContribs(m)[idx][1]
def tpsa_contrib(a):
""" Hacky way of getting total polar surface area contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return rdMolDescriptors._CalcTPSAContribs(m)[idx]
def labute_asa_contrib(a):
""" Hacky way of getting accessible surface area contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return rdMolDescriptors._CalcLabuteASAContribs(m)[0][idx]
def gasteiger_charge(a, force_calc=False):
""" Hacky way of getting gasteiger charge """
res = a.props.get('_GasteigerCharge', None)
if res and not force_calc:
return float(res)
else:
idx = a.GetIdx()
m = a.GetOwningMol()
rdPartialCharges.ComputeGasteigerCharges(m)
return float(a.props['_GasteigerCharge'])
def electronegativity(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'pauling_electronegativity']
def first_ionization(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'first_ionisation_energy']
def group(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'group']
def period(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'period']
def is_hybridized(a, hybrid_type=HybridizationType.SP3):
""" Hybridized as type hybrid_type, default SP3 """
return str(a.GetHybridization()) is hybrid_type
hybridization_features = {'is_' + n + '_hybridized': functools.partial(is_hybridized, hybrid_type=n) for n in HybridizationType.names}
ATOM_FEATURES = {
'atomic_number': atomic_number,
'atomic_mass': atomic_mass,
'formal_charge': formal_charge,
'gasteiger_charge': gasteiger_charge,
'electronegativity': electronegativity,
'first_ionisation': first_ionization,
'group': group,
'period': period,
'valence': valence,
'is_aromatic': is_aromatic,
'num_hydrogens': num_hydrogens,
'is_in_ring': is_in_ring,
'log_p_contrib': crippen_log_p_contrib,
'molar_refractivity_contrib': crippen_molar_refractivity_contrib,
'is_h_acceptor': is_h_acceptor,
'is_h_donor': is_h_donor,
'is_heteroatom': is_hetero,
'total_polar_surface_area_contrib': tpsa_contrib,
'total_labute_accessible_surface_area': labute_asa_contrib,
}
ATOM_FEATURES.update(element_features)
ATOM_FEATURES.update(hybridization_features)
class AtomFeaturizer(AtomTransformer, Featurizer):
def __init__(self, features='all', **kwargs):
self.features = features
super(AtomFeaturizer, self).__init__(**kwargs)
@property
def name(self):
return 'atom_feat'
@property
def features(self):
return self._features
@features.setter
def features(self, features):
if features == 'all':
features = ATOM_FEATURES
elif isinstance(features, str):
features = {features: ATOM_FEATURES[features]}
elif isinstance(features, list):
features = {feature: ATOM_FEATURES[feature] for feature in features}
elif isinstance(features, (dict, pd.Series)):
features = features
else:
raise NotImplementedError('Cannot use features {}'.format(features))
self._features = pd.Series(features)
self._features.index.name = 'atom_features'
@property
def minor_axis(self):
return self.features.index
def _transform_atom(self, atom):
return self.features.apply(lambda f: f(atom)).values
def _transform_mol(self, mol):
return np.array([self.transform(a) for a in mol.atoms])
class DistanceTransformer(AtomTransformer, Featurizer):
""" Base class implementing Distance Matrix transformers.
Concrete classes inheriting from this should implement `_transform_mol`.
"""
__metaclass__ = ABCMeta
@property
def minor_axis(self):
return pd.RangeIndex(self.max_atoms, name='atom_idx')
def _transform_atom(self, atom):
return NotImplemented
def transform(self, mols):
res = super(DistanceTransformer, self).transform(mols)
if isinstance(mols, Mol):
res = res.iloc[:len(mols.atoms), :len(mols.atoms)]
return res
class SpacialDistanceTransformer(DistanceTransformer):
""" Transformer class for generating 3D distance matrices. """
# TODO: handle multiple conformers
def name(self):
return 'spacial_dist'
def _transform_mol(self, mol):
res = nanarray((len(mol.atoms), self.max_atoms))
res[:, :len(mol.atoms)] = Chem.Get3DDistanceMatrix(mol)
return res
class GraphDistanceTransformer(DistanceTransformer):
""" Transformer class for generating Graph distance matrices. """
# TODO: handle multiple conformers
def name(self):
return 'graph_dist'
def _transform_mol(self, mol):
res = nanarray((len(mol.atoms), self.max_atoms))
res[:len(mol.atoms), :len(mol.atoms)] = Chem.GetDistanceMatrix(mol)
return res | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/descriptors/atom.py | atom.py | import functools
from abc import ABCMeta
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import Lipinski
from rdkit.Chem import rdMolDescriptors, rdPartialCharges
from rdkit.Chem.rdchem import HybridizationType
from ..core import Mol
from ..resource import PERIODIC_TABLE, ORGANIC
from ..base import AtomTransformer, Featurizer
from ..utils import nanarray
def element(a):
""" Return the element """
return a.GetSymbol()
def is_element(a, symbol='C'):
""" Is the atom of a given element """
return element(a) == symbol
element_features = {'is_{}'.format(e): functools.partial(is_element, symbol=e) for e in ORGANIC}
def is_h_acceptor(a):
""" Is an H acceptor? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._HAcceptors(m)]
def is_h_donor(a):
""" Is an H donor? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._HDonors(m)]
def is_hetero(a):
""" Is a heteroatom? """
m = a.GetOwningMol()
idx = a.GetIdx()
return idx in [i[0] for i in Lipinski._Heteroatoms(m)]
def atomic_number(a):
""" Atomic number of atom """
return a.GetAtomicNum()
def atomic_mass(a):
""" Atomic mass of atom """
return a.mass
def explicit_valence(a):
""" Explicit valence of atom """
return a.GetExplicitValence()
def implicit_valence(a):
""" Implicit valence of atom """
return a.GetImplicitValence()
def valence(a):
""" returns the valence of the atom """
return explicit_valence(a) + implicit_valence(a)
def formal_charge(a):
""" Formal charge of atom """
return a.GetFormalCharge()
def is_aromatic(a):
""" Boolean if atom is aromatic"""
return a.GetIsAromatic()
def num_implicit_hydrogens(a):
""" Number of implicit hydrogens """
return a.GetNumImplicitHs()
def num_explicit_hydrogens(a):
""" Number of explicit hydrodgens """
return a.GetNumExplicitHs()
def num_hydrogens(a):
""" Number of hydrogens """
return num_implicit_hydrogens(a) + num_explicit_hydrogens(a)
def is_in_ring(a):
""" Whether the atom is in a ring """
return a.IsInRing()
def crippen_log_p_contrib(a):
""" Hacky way of getting logP contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return Crippen._GetAtomContribs(m)[idx][0]
def crippen_molar_refractivity_contrib(a):
""" Hacky way of getting molar refractivity contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return Crippen._GetAtomContribs(m)[idx][1]
def tpsa_contrib(a):
""" Hacky way of getting total polar surface area contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return rdMolDescriptors._CalcTPSAContribs(m)[idx]
def labute_asa_contrib(a):
""" Hacky way of getting accessible surface area contribution. """
idx = a.GetIdx()
m = a.GetOwningMol()
return rdMolDescriptors._CalcLabuteASAContribs(m)[0][idx]
def gasteiger_charge(a, force_calc=False):
""" Hacky way of getting gasteiger charge """
res = a.props.get('_GasteigerCharge', None)
if res and not force_calc:
return float(res)
else:
idx = a.GetIdx()
m = a.GetOwningMol()
rdPartialCharges.ComputeGasteigerCharges(m)
return float(a.props['_GasteigerCharge'])
def electronegativity(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'pauling_electronegativity']
def first_ionization(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'first_ionisation_energy']
def group(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'group']
def period(a):
return PERIODIC_TABLE.loc[a.atomic_number, 'period']
def is_hybridized(a, hybrid_type=HybridizationType.SP3):
""" Hybridized as type hybrid_type, default SP3 """
return str(a.GetHybridization()) is hybrid_type
hybridization_features = {'is_' + n + '_hybridized': functools.partial(is_hybridized, hybrid_type=n) for n in HybridizationType.names}
ATOM_FEATURES = {
'atomic_number': atomic_number,
'atomic_mass': atomic_mass,
'formal_charge': formal_charge,
'gasteiger_charge': gasteiger_charge,
'electronegativity': electronegativity,
'first_ionisation': first_ionization,
'group': group,
'period': period,
'valence': valence,
'is_aromatic': is_aromatic,
'num_hydrogens': num_hydrogens,
'is_in_ring': is_in_ring,
'log_p_contrib': crippen_log_p_contrib,
'molar_refractivity_contrib': crippen_molar_refractivity_contrib,
'is_h_acceptor': is_h_acceptor,
'is_h_donor': is_h_donor,
'is_heteroatom': is_hetero,
'total_polar_surface_area_contrib': tpsa_contrib,
'total_labute_accessible_surface_area': labute_asa_contrib,
}
ATOM_FEATURES.update(element_features)
ATOM_FEATURES.update(hybridization_features)
class AtomFeaturizer(AtomTransformer, Featurizer):
def __init__(self, features='all', **kwargs):
self.features = features
super(AtomFeaturizer, self).__init__(**kwargs)
@property
def name(self):
return 'atom_feat'
@property
def features(self):
return self._features
@features.setter
def features(self, features):
if features == 'all':
features = ATOM_FEATURES
elif isinstance(features, str):
features = {features: ATOM_FEATURES[features]}
elif isinstance(features, list):
features = {feature: ATOM_FEATURES[feature] for feature in features}
elif isinstance(features, (dict, pd.Series)):
features = features
else:
raise NotImplementedError('Cannot use features {}'.format(features))
self._features = pd.Series(features)
self._features.index.name = 'atom_features'
@property
def minor_axis(self):
return self.features.index
def _transform_atom(self, atom):
return self.features.apply(lambda f: f(atom)).values
def _transform_mol(self, mol):
return np.array([self.transform(a) for a in mol.atoms])
class DistanceTransformer(AtomTransformer, Featurizer):
""" Base class implementing Distance Matrix transformers.
Concrete classes inheriting from this should implement `_transform_mol`.
"""
__metaclass__ = ABCMeta
@property
def minor_axis(self):
return pd.RangeIndex(self.max_atoms, name='atom_idx')
def _transform_atom(self, atom):
return NotImplemented
def transform(self, mols):
res = super(DistanceTransformer, self).transform(mols)
if isinstance(mols, Mol):
res = res.iloc[:len(mols.atoms), :len(mols.atoms)]
return res
class SpacialDistanceTransformer(DistanceTransformer):
""" Transformer class for generating 3D distance matrices. """
# TODO: handle multiple conformers
def name(self):
return 'spacial_dist'
def _transform_mol(self, mol):
res = nanarray((len(mol.atoms), self.max_atoms))
res[:, :len(mol.atoms)] = Chem.Get3DDistanceMatrix(mol)
return res
class GraphDistanceTransformer(DistanceTransformer):
""" Transformer class for generating Graph distance matrices. """
# TODO: handle multiple conformers
def name(self):
return 'graph_dist'
def _transform_mol(self, mol):
res = nanarray((len(mol.atoms), self.max_atoms))
res[:len(mol.atoms), :len(mol.atoms)] = Chem.GetDistanceMatrix(mol)
return res | 0.745769 | 0.51562 |
import pandas as pd
from rdkit.Chem import GetDistanceMatrix
from rdkit.DataStructs import ConvertToNumpyArray
from rdkit.Chem.rdMolDescriptors import (GetMorganFingerprint,
GetHashedMorganFingerprint,
GetMorganFingerprintAsBitVect,
GetAtomPairFingerprint,
GetHashedAtomPairFingerprint,
GetHashedAtomPairFingerprintAsBitVect,
GetTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprintAsBitVect,
GetMACCSKeysFingerprint,
GetFeatureInvariants,
GetConnectivityInvariants)
from rdkit.Chem.rdReducedGraphs import GetErGFingerprint
from rdkit.Chem.rdmolops import RDKFingerprint
import numpy as np
from ..base import Transformer, Featurizer
class MorganFeaturizer(Transformer, Featurizer):
""" Morgan fingerprints, implemented by RDKit.
Notes:
Currently, folded bits are by far the fastest implementation.
Examples:
>>> import skchem
>>> import pandas as pd
>>> pd.options.display.max_rows = pd.options.display.max_columns = 5
>>> mf = skchem.descriptors.MorganFeaturizer()
>>> m = skchem.Mol.from_smiles('CCC')
Can transform an individual molecule to yield a Series:
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
2046 0
2047 0
Name: MorganFeaturizer, dtype: uint8
Can transform a list of molecules to yield a DataFrame:
>>> mf.transform([m])
morgan_fp_idx 0 1 ... 2046 2047
0 0 0 ... 0 0
<BLANKLINE>
[1 rows x 2048 columns]
Change the number of features the fingerprint is folded down to using `n_feats`.
>>> mf.n_feats = 1024
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
1022 0
1023 0
Name: MorganFeaturizer, dtype: uint8
Count fingerprints with `as_bits` = False
>>> mf.as_bits = False
>>> res = mf.transform(m); res[res > 0]
morgan_fp_idx
33 2
80 1
294 2
320 1
Name: MorganFeaturizer, dtype: int64
Pseudo-gradient with `grad` shows which atoms contributed to which feature.
>>> mf.grad(m)[res > 0]
atom_idx 0 1 2
features
33 1 0 1
80 0 1 0
294 1 2 1
320 1 1 1
"""
def __init__(self, radius=2, n_feats=2048, as_bits=True, use_features=False,
use_bond_types=True, use_chirality=False, **kwargs):
""" Initialize the fingerprinter object.
Args:
radius (int):
The maximum radius for atom environments.
Default is `2`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `True`.
use_features (bool):
Whether to use map atom types to generic features (FCFP analog).
Default is `False`.
use_bond_types (bool):
Whether to use bond types to differentiate environments.
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(MorganFeaturizer, self).__init__(**kwargs)
self.radius = radius
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_features = use_features
self.use_bond_types = use_bond_types
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use `transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetMorganFingerprintAsBitVect(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'morg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='morgan_fp_idx')
def grad(self, mol):
""" Calculate the pseudo gradient with respect to the atoms.
The pseudo gradient is the number of times the atom set that particular
bit.
Args:
mol (skchem.Mol):
The molecule for which to calculate the pseudo gradient.
Returns:
pandas.DataFrame:
Dataframe of pseudogradients, with columns corresponding to
atoms, and rows corresponding to features of the fingerprint.
"""
cols = pd.Index(list(range(len(mol.atoms))), name='atom_idx')
dist = GetDistanceMatrix(mol)
info = {}
if self.n_feats < 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info).GetNonzeroElements()
idx_list = list(res.keys())
idx = pd.Index(idx_list, name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[idx_list.index(bit)] += (dist <= radius)[atom_idx]
else:
res = list(GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info))
idx = pd.Index(range(self.n_feats), name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[bit] += (dist <= radius)[atom_idx]
grad = pd.DataFrame(grad, index=idx, columns=cols)
if self.as_bits:
grad = (grad > 0)
return grad.astype(int)
class AtomPairFeaturizer(Transformer, Featurizer):
""" Atom Pair Fingerprints, implemented by RDKit. """
def __init__(self, min_length=1, max_length=30, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
""" Instantiate an atom pair fingerprinter.
Args:
min_length (int):
The minimum length of paths between pairs.
Default is `1`, i.e. pairs can be bonded together.
max_length (int):
The maximum length of paths between pairs.
Default is `30`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(AtomPairFeaturizer, self).__init__(**kwargs)
self.min_length = min_length
self.max_length = max_length
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedAtomPairFingerprintAsBitVect(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'atom_pair'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='ap_fp_idx')
class TopologicalTorsionFeaturizer(Transformer, Featurizer):
""" Topological Torsion fingerprints, implemented by RDKit. """
def __init__(self, target_size=4, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
"""
Args:
target_size (int):
# TODO
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
self.target_size = target_size
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
super(TopologicalTorsionFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
""" Private method to transform a skchem molecule.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedTopologicalTorsionFingerprintAsBitVect(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def names(self):
return 'top_tort'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='tt_fp_idx')
class MACCSFeaturizer(Transformer, Featurizer):
""" MACCS Keys Fingerprints """
def __init__(self, **kwargs):
super(MACCSFeaturizer, self).__init__(**kwargs)
self.n_feats = 166
def _transform_mol(self, mol):
return np.array(list(GetMACCSKeysFingerprint(mol)))[1:]
@property
def name(self):
return 'maccs'
@property
def columns(self):
return pd.Index(
['ISOTOPE', '103 < ATOMIC NO. < 256', 'GROUP IVA,VA,VIA PERIODS 4-6 (Ge...)', 'ACTINIDE',
'GROUP IIIB,IVB (Sc...)', 'LANTHANIDE', 'GROUP VB,VIB,VIIB (V...)', 'QAAA@1', 'GROUP VIII (Fe...)',
'GROUP IIA (ALKALINE EARTH)', '4M RING', 'GROUP IB,IIB (Cu...)', 'ON(C)C', 'S-S', 'OC(O)O', 'QAA@1', 'CTC',
'GROUP IIIA (B...)', '7M RING', 'SI', 'C=C(Q)Q', '3M RING', 'NC(O)O', 'N-O', 'NC(N)N', 'C$=C($A)$A', 'I',
'QCH2Q', 'P', 'CQ(C)(C)A', 'QX', 'CSN', 'NS', 'CH2=A', 'GROUP IA (ALKALI METAL)', 'S HETEROCYCLE',
'NC(O)N', 'NC(C)N', 'OS(O)O', 'S-O', 'CTN', 'F', 'QHAQH', 'OTHER', 'C=CN', 'BR', 'SAN', 'OQ(O)O', 'CHARGE',
'C=C(C)C', 'CSO', 'NN', 'QHAAAQH', 'QHAAQH', 'OSO', 'ON(O)C', 'O HETEROCYCLE', 'QSQ', 'Snot%A%A', 'S=O',
'AS(A)A', 'A$A!A$A', 'N=O', 'A$A!S', 'C%N', 'CC(C)(C)A', 'QS', 'QHQH (&...)', 'QQH', 'QNQ', 'NO', 'OAAO',
'S=A', 'CH3ACH3', 'A!N$A', 'C=C(A)A', 'NAN', 'C=N', 'NAAN', 'NAAAN', 'SA(A)A', 'ACH2QH', 'QAAAA@1', 'NH2',
'CN(C)C', 'CH2QCH2', 'X!A$A', 'S', 'OAAAO', 'QHAACH2A', 'QHAAACH2A', 'OC(N)C', 'QCH3', 'QN', 'NAAO',
'5M RING', 'NAAAO', 'QAAAAA@1', 'C=C', 'ACH2N', '8M RING', 'QO', 'CL', 'QHACH2A', 'A$A($A)$A', 'QA(Q)Q',
'XA(A)A', 'CH3AAACH2A', 'ACH2O', 'NCO', 'NACH2A', 'AA(A)(A)A', 'Onot%A%A', 'CH3CH2A', 'CH3ACH2A',
'CH3AACH2A', 'NAO', 'ACH2CH2A > 1', 'N=A', 'HETEROCYCLIC ATOM > 1 (&...)', 'N HETEROCYCLE', 'AN(A)A',
'OCO', 'QQ', 'AROMATIC RING > 1', 'A!O!A', 'A$A!O > 1 (&...)', 'ACH2AAACH2A', 'ACH2AACH2A',
'QQ > 1 (&...)', 'QH > 1', 'OACH2A', 'A$A!N', 'X (HALOGEN)', 'Nnot%A%A', 'O=A > 1', 'HETEROCYCLE',
'QCH2A > 1 (&...)', 'OH', 'O > 3 (&...)', 'CH3 > 2 (&...)', 'N > 1', 'A$A!O', 'Anot%A%Anot%A',
'6M RING > 1', 'O > 2', 'ACH2CH2A', 'AQ(A)A', 'CH3 > 1', 'A!A$A!A', 'NH', 'OC(C)C', 'QCH2A', 'C=O',
'A!CH2!A', 'NA(A)A', 'C-O', 'C-N', 'O > 1', 'CH3', 'N', 'AROMATIC', '6M RING', 'O', 'RING', 'FRAGMENTS'],
name='maccs_idx')
class ErGFeaturizer(Transformer, Featurizer):
""" Extended Reduced Graph Fingerprints.
Implemented in RDKit."""
def __init__(self, atom_types=0, fuzz_increment=0.3, min_path=1, max_path=15, **kwargs):
super(ErGFeaturizer, self).__init__(**kwargs)
self.atom_types = atom_types
self.fuzz_increment = fuzz_increment
self.min_path = min_path
self.max_path = max_path
self.n_feats = 315
def _transform_mol(self, mol):
return np.array(GetErGFingerprint(mol))
@property
def name(self):
return 'erg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='erg_fp_idx')
class FeatureInvariantsFeaturizer(Transformer, Featurizer):
""" Feature invariants fingerprints. """
def __init__(self, **kwargs):
super(FeatureInvariantsFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
return np.array(GetFeatureInvariants(mol))
@property
def name(self):
return 'feat_inv'
@property
def columns(self):
return None
class ConnectivityInvariantsFeaturizer(Transformer, Featurizer):
""" Connectivity invariants fingerprints """
def __init__(self, include_ring_membership=True, **kwargs):
super(ConnectivityInvariantsFeaturizer, self).__init__(self, **kwargs)
self.include_ring_membership = include_ring_membership
raise NotImplementedError # this is a sparse descriptor
def _transform_mol(self, mol):
return np.array(GetConnectivityInvariants(mol))
@property
def name(self):
return 'conn_inv'
@property
def columns(self):
return None
class RDKFeaturizer(Transformer, Featurizer):
""" RDKit fingerprint """
# TODO: finish docstring
def __init__(self, min_path=1, max_path=7, n_feats=2048, n_bits_per_hash=2,
use_hs=True, target_density=0.0, min_size=128,
branched_paths=True, use_bond_types=True, **kwargs):
""" RDK fingerprints
Args:
min_path (int):
minimum number of bonds to include in the subgraphs.
max_path (int):
maximum number of bonds to include in the subgraphs.
n_feats (int):
The number of features to which to fold the fingerprint down. For unfolded, use `-1`.
n_bits_per_hash (int)
number of bits to set per path.
use_hs (bool):
include paths involving Hs in the fingerprint if the molecule has explicit Hs.
target_density (float):
fold the fingerprint until this minimum density has been reached.
min_size (int):
the minimum size the fingerprint will be folded to when trying to reach tgtDensity.
branched_paths (bool):
if set both branched and unbranched paths will be used in the fingerprint.
use_bond_types (bool):
if set both bond orders will be used in the path hashes.
"""
super(RDKFeaturizer, self).__init__(**kwargs)
self.min_path = min_path
self.max_path = max_path
self.n_feats = n_feats
self.n_bits_per_hash = n_bits_per_hash
self.use_hs = use_hs
self.target_density = target_density
self.min_size = min_size
self.branched_paths = branched_paths
self.use_bond_types = use_bond_types
def _transform_mol(self, mol):
return np.array(list(RDKFingerprint(mol, minPath=self.min_path,
maxPath=self.max_path,
fpSize=self.n_feats,
nBitsPerHash=self.n_bits_per_hash,
useHs=self.use_hs,
tgtDensity=self.target_density,
minSize=self.min_size,
branchedPaths=self.branched_paths,
useBondOrder=self.use_bond_types)))
@property
def name(self):
return 'rdkit'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='rdk_fp_idx') | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/descriptors/fingerprints.py | fingerprints.py | import pandas as pd
from rdkit.Chem import GetDistanceMatrix
from rdkit.DataStructs import ConvertToNumpyArray
from rdkit.Chem.rdMolDescriptors import (GetMorganFingerprint,
GetHashedMorganFingerprint,
GetMorganFingerprintAsBitVect,
GetAtomPairFingerprint,
GetHashedAtomPairFingerprint,
GetHashedAtomPairFingerprintAsBitVect,
GetTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprintAsBitVect,
GetMACCSKeysFingerprint,
GetFeatureInvariants,
GetConnectivityInvariants)
from rdkit.Chem.rdReducedGraphs import GetErGFingerprint
from rdkit.Chem.rdmolops import RDKFingerprint
import numpy as np
from ..base import Transformer, Featurizer
class MorganFeaturizer(Transformer, Featurizer):
""" Morgan fingerprints, implemented by RDKit.
Notes:
Currently, folded bits are by far the fastest implementation.
Examples:
>>> import skchem
>>> import pandas as pd
>>> pd.options.display.max_rows = pd.options.display.max_columns = 5
>>> mf = skchem.descriptors.MorganFeaturizer()
>>> m = skchem.Mol.from_smiles('CCC')
Can transform an individual molecule to yield a Series:
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
2046 0
2047 0
Name: MorganFeaturizer, dtype: uint8
Can transform a list of molecules to yield a DataFrame:
>>> mf.transform([m])
morgan_fp_idx 0 1 ... 2046 2047
0 0 0 ... 0 0
<BLANKLINE>
[1 rows x 2048 columns]
Change the number of features the fingerprint is folded down to using `n_feats`.
>>> mf.n_feats = 1024
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
1022 0
1023 0
Name: MorganFeaturizer, dtype: uint8
Count fingerprints with `as_bits` = False
>>> mf.as_bits = False
>>> res = mf.transform(m); res[res > 0]
morgan_fp_idx
33 2
80 1
294 2
320 1
Name: MorganFeaturizer, dtype: int64
Pseudo-gradient with `grad` shows which atoms contributed to which feature.
>>> mf.grad(m)[res > 0]
atom_idx 0 1 2
features
33 1 0 1
80 0 1 0
294 1 2 1
320 1 1 1
"""
def __init__(self, radius=2, n_feats=2048, as_bits=True, use_features=False,
use_bond_types=True, use_chirality=False, **kwargs):
""" Initialize the fingerprinter object.
Args:
radius (int):
The maximum radius for atom environments.
Default is `2`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `True`.
use_features (bool):
Whether to use map atom types to generic features (FCFP analog).
Default is `False`.
use_bond_types (bool):
Whether to use bond types to differentiate environments.
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(MorganFeaturizer, self).__init__(**kwargs)
self.radius = radius
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_features = use_features
self.use_bond_types = use_bond_types
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use `transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetMorganFingerprintAsBitVect(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'morg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='morgan_fp_idx')
def grad(self, mol):
""" Calculate the pseudo gradient with respect to the atoms.
The pseudo gradient is the number of times the atom set that particular
bit.
Args:
mol (skchem.Mol):
The molecule for which to calculate the pseudo gradient.
Returns:
pandas.DataFrame:
Dataframe of pseudogradients, with columns corresponding to
atoms, and rows corresponding to features of the fingerprint.
"""
cols = pd.Index(list(range(len(mol.atoms))), name='atom_idx')
dist = GetDistanceMatrix(mol)
info = {}
if self.n_feats < 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info).GetNonzeroElements()
idx_list = list(res.keys())
idx = pd.Index(idx_list, name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[idx_list.index(bit)] += (dist <= radius)[atom_idx]
else:
res = list(GetHashedMorganFingerprint(mol, self.radius,
nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info))
idx = pd.Index(range(self.n_feats), name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[bit] += (dist <= radius)[atom_idx]
grad = pd.DataFrame(grad, index=idx, columns=cols)
if self.as_bits:
grad = (grad > 0)
return grad.astype(int)
class AtomPairFeaturizer(Transformer, Featurizer):
""" Atom Pair Fingerprints, implemented by RDKit. """
def __init__(self, min_length=1, max_length=30, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
""" Instantiate an atom pair fingerprinter.
Args:
min_length (int):
The minimum length of paths between pairs.
Default is `1`, i.e. pairs can be bonded together.
max_length (int):
The maximum length of paths between pairs.
Default is `30`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
super(AtomPairFeaturizer, self).__init__(**kwargs)
self.min_length = min_length
self.max_length = max_length
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedAtomPairFingerprintAsBitVect(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedAtomPairFingerprint(mol, nBits=self.n_feats,
minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'atom_pair'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='ap_fp_idx')
class TopologicalTorsionFeaturizer(Transformer, Featurizer):
""" Topological Torsion fingerprints, implemented by RDKit. """
def __init__(self, target_size=4, n_feats=2048, as_bits=False,
use_chirality=False, **kwargs):
"""
Args:
target_size (int):
# TODO
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
"""
self.target_size = target_size
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
super(TopologicalTorsionFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
""" Private method to transform a skchem molecule.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedTopologicalTorsionFingerprintAsBitVect(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedTopologicalTorsionFingerprint(mol, nBits=self.n_feats,
targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def names(self):
return 'top_tort'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='tt_fp_idx')
class MACCSFeaturizer(Transformer, Featurizer):
""" MACCS Keys Fingerprints """
def __init__(self, **kwargs):
super(MACCSFeaturizer, self).__init__(**kwargs)
self.n_feats = 166
def _transform_mol(self, mol):
return np.array(list(GetMACCSKeysFingerprint(mol)))[1:]
@property
def name(self):
return 'maccs'
@property
def columns(self):
return pd.Index(
['ISOTOPE', '103 < ATOMIC NO. < 256', 'GROUP IVA,VA,VIA PERIODS 4-6 (Ge...)', 'ACTINIDE',
'GROUP IIIB,IVB (Sc...)', 'LANTHANIDE', 'GROUP VB,VIB,VIIB (V...)', 'QAAA@1', 'GROUP VIII (Fe...)',
'GROUP IIA (ALKALINE EARTH)', '4M RING', 'GROUP IB,IIB (Cu...)', 'ON(C)C', 'S-S', 'OC(O)O', 'QAA@1', 'CTC',
'GROUP IIIA (B...)', '7M RING', 'SI', 'C=C(Q)Q', '3M RING', 'NC(O)O', 'N-O', 'NC(N)N', 'C$=C($A)$A', 'I',
'QCH2Q', 'P', 'CQ(C)(C)A', 'QX', 'CSN', 'NS', 'CH2=A', 'GROUP IA (ALKALI METAL)', 'S HETEROCYCLE',
'NC(O)N', 'NC(C)N', 'OS(O)O', 'S-O', 'CTN', 'F', 'QHAQH', 'OTHER', 'C=CN', 'BR', 'SAN', 'OQ(O)O', 'CHARGE',
'C=C(C)C', 'CSO', 'NN', 'QHAAAQH', 'QHAAQH', 'OSO', 'ON(O)C', 'O HETEROCYCLE', 'QSQ', 'Snot%A%A', 'S=O',
'AS(A)A', 'A$A!A$A', 'N=O', 'A$A!S', 'C%N', 'CC(C)(C)A', 'QS', 'QHQH (&...)', 'QQH', 'QNQ', 'NO', 'OAAO',
'S=A', 'CH3ACH3', 'A!N$A', 'C=C(A)A', 'NAN', 'C=N', 'NAAN', 'NAAAN', 'SA(A)A', 'ACH2QH', 'QAAAA@1', 'NH2',
'CN(C)C', 'CH2QCH2', 'X!A$A', 'S', 'OAAAO', 'QHAACH2A', 'QHAAACH2A', 'OC(N)C', 'QCH3', 'QN', 'NAAO',
'5M RING', 'NAAAO', 'QAAAAA@1', 'C=C', 'ACH2N', '8M RING', 'QO', 'CL', 'QHACH2A', 'A$A($A)$A', 'QA(Q)Q',
'XA(A)A', 'CH3AAACH2A', 'ACH2O', 'NCO', 'NACH2A', 'AA(A)(A)A', 'Onot%A%A', 'CH3CH2A', 'CH3ACH2A',
'CH3AACH2A', 'NAO', 'ACH2CH2A > 1', 'N=A', 'HETEROCYCLIC ATOM > 1 (&...)', 'N HETEROCYCLE', 'AN(A)A',
'OCO', 'QQ', 'AROMATIC RING > 1', 'A!O!A', 'A$A!O > 1 (&...)', 'ACH2AAACH2A', 'ACH2AACH2A',
'QQ > 1 (&...)', 'QH > 1', 'OACH2A', 'A$A!N', 'X (HALOGEN)', 'Nnot%A%A', 'O=A > 1', 'HETEROCYCLE',
'QCH2A > 1 (&...)', 'OH', 'O > 3 (&...)', 'CH3 > 2 (&...)', 'N > 1', 'A$A!O', 'Anot%A%Anot%A',
'6M RING > 1', 'O > 2', 'ACH2CH2A', 'AQ(A)A', 'CH3 > 1', 'A!A$A!A', 'NH', 'OC(C)C', 'QCH2A', 'C=O',
'A!CH2!A', 'NA(A)A', 'C-O', 'C-N', 'O > 1', 'CH3', 'N', 'AROMATIC', '6M RING', 'O', 'RING', 'FRAGMENTS'],
name='maccs_idx')
class ErGFeaturizer(Transformer, Featurizer):
""" Extended Reduced Graph Fingerprints.
Implemented in RDKit."""
def __init__(self, atom_types=0, fuzz_increment=0.3, min_path=1, max_path=15, **kwargs):
super(ErGFeaturizer, self).__init__(**kwargs)
self.atom_types = atom_types
self.fuzz_increment = fuzz_increment
self.min_path = min_path
self.max_path = max_path
self.n_feats = 315
def _transform_mol(self, mol):
return np.array(GetErGFingerprint(mol))
@property
def name(self):
return 'erg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='erg_fp_idx')
class FeatureInvariantsFeaturizer(Transformer, Featurizer):
""" Feature invariants fingerprints. """
def __init__(self, **kwargs):
super(FeatureInvariantsFeaturizer, self).__init__(**kwargs)
def _transform_mol(self, mol):
return np.array(GetFeatureInvariants(mol))
@property
def name(self):
return 'feat_inv'
@property
def columns(self):
return None
class ConnectivityInvariantsFeaturizer(Transformer, Featurizer):
""" Connectivity invariants fingerprints """
def __init__(self, include_ring_membership=True, **kwargs):
super(ConnectivityInvariantsFeaturizer, self).__init__(self, **kwargs)
self.include_ring_membership = include_ring_membership
raise NotImplementedError # this is a sparse descriptor
def _transform_mol(self, mol):
return np.array(GetConnectivityInvariants(mol))
@property
def name(self):
return 'conn_inv'
@property
def columns(self):
return None
class RDKFeaturizer(Transformer, Featurizer):
""" RDKit fingerprint """
# TODO: finish docstring
def __init__(self, min_path=1, max_path=7, n_feats=2048, n_bits_per_hash=2,
use_hs=True, target_density=0.0, min_size=128,
branched_paths=True, use_bond_types=True, **kwargs):
""" RDK fingerprints
Args:
min_path (int):
minimum number of bonds to include in the subgraphs.
max_path (int):
maximum number of bonds to include in the subgraphs.
n_feats (int):
The number of features to which to fold the fingerprint down. For unfolded, use `-1`.
n_bits_per_hash (int)
number of bits to set per path.
use_hs (bool):
include paths involving Hs in the fingerprint if the molecule has explicit Hs.
target_density (float):
fold the fingerprint until this minimum density has been reached.
min_size (int):
the minimum size the fingerprint will be folded to when trying to reach tgtDensity.
branched_paths (bool):
if set both branched and unbranched paths will be used in the fingerprint.
use_bond_types (bool):
if set both bond orders will be used in the path hashes.
"""
super(RDKFeaturizer, self).__init__(**kwargs)
self.min_path = min_path
self.max_path = max_path
self.n_feats = n_feats
self.n_bits_per_hash = n_bits_per_hash
self.use_hs = use_hs
self.target_density = target_density
self.min_size = min_size
self.branched_paths = branched_paths
self.use_bond_types = use_bond_types
def _transform_mol(self, mol):
return np.array(list(RDKFingerprint(mol, minPath=self.min_path,
maxPath=self.max_path,
fpSize=self.n_feats,
nBitsPerHash=self.n_bits_per_hash,
useHs=self.use_hs,
tgtDensity=self.target_density,
minSize=self.min_size,
branchedPaths=self.branched_paths,
useBondOrder=self.use_bond_types)))
@property
def name(self):
return 'rdkit'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='rdk_fp_idx') | 0.821582 | 0.460107 |
import matplotlib.pyplot as plt
from .. import descriptors
from .. import core
from .. import vis
from ipywidgets import Dropdown, Text, VBox, HBox, Valid, HTML
from IPython import get_ipython
from IPython.display import clear_output, display
class Visualizer(object):
def __init__(self, fper='morgan', smiles='c1ccccc1O', dpi=200):
self.initialize_ipython()
if isinstance(fper, str):
self.fper = descriptors.get(fper)
else:
self.fper = fper
self.smiles_input = Text(smiles, description='smiles')
self.smiles_input.on_submit(self.update_smiles)
self.smiles_input.observe(self.typing)
self.valid = Valid(True)
self.dropdown = Dropdown(options=[], description='bit')
self.dropdown.observe(self.plot)
self.dpi_input = Text(str(dpi), description='dpi')
self.dpi_input.on_submit(self.plot)
self.ui = VBox([
HTML('<h2>Visualizer</h2>'),
HBox([self.smiles_input, self.valid]),
self.dropdown,
self.dpi_input])
self.update_smiles(None)
self.display()
def initialize_ipython(self):
ipython = get_ipython()
try:
ipython.magic('matplotlib inline')
except:
pass
def typing(self, _):
self.valid.visible = False
@property
def dpi(self):
try:
return int(self.dpi_input.value)
except:
return 50
@dpi.setter
def dpi(self, value):
self.dpi_input.value = str(value)
def display(self):
display(self.ui)
def update_smiles(self, _):
try:
self._mol = core.Mol.from_smiles(self.smiles_input.value)
self.valid.value = True
except ValueError:
self.valid.value = False
return
finally:
self.valid.visible = True
return self.calculate()
def calculate(self):
fp = self.fper.transform(self.mol)
self.fp = fp[fp == 1].index
self.fpg = self.fper.grad(self.mol).ix[self.fp]
return self.update_dropdown()
def update_dropdown(self):
self.dropdown.options.append(self.fp[0])
self.dropdown.value = self.fp[0]
self.dropdown.options = self.fp.tolist()
return self.plot(self.dropdown.value)
@property
def mol(self):
return self._mol
@mol.setter
def mol(self, mol):
self._mol = mol
self.smiles_input.value = mol.to_smiles()
self.calculate()
@property
def current_smiles(self):
return self.smiles_input.value
@property
def current_bit(self):
return self.dropdown.value
def plot(self, _):
clear_output()
plt.clf()
plt.rcParams['savefig.dpi'] = self.dpi
vis.plot_weights(self.mol, self.fpg.ix[self.current_bit], quality=4, ax=plt.gca()) | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/interact/desc_vis.py | desc_vis.py |
import matplotlib.pyplot as plt
from .. import descriptors
from .. import core
from .. import vis
from ipywidgets import Dropdown, Text, VBox, HBox, Valid, HTML
from IPython import get_ipython
from IPython.display import clear_output, display
class Visualizer(object):
def __init__(self, fper='morgan', smiles='c1ccccc1O', dpi=200):
self.initialize_ipython()
if isinstance(fper, str):
self.fper = descriptors.get(fper)
else:
self.fper = fper
self.smiles_input = Text(smiles, description='smiles')
self.smiles_input.on_submit(self.update_smiles)
self.smiles_input.observe(self.typing)
self.valid = Valid(True)
self.dropdown = Dropdown(options=[], description='bit')
self.dropdown.observe(self.plot)
self.dpi_input = Text(str(dpi), description='dpi')
self.dpi_input.on_submit(self.plot)
self.ui = VBox([
HTML('<h2>Visualizer</h2>'),
HBox([self.smiles_input, self.valid]),
self.dropdown,
self.dpi_input])
self.update_smiles(None)
self.display()
def initialize_ipython(self):
ipython = get_ipython()
try:
ipython.magic('matplotlib inline')
except:
pass
def typing(self, _):
self.valid.visible = False
@property
def dpi(self):
try:
return int(self.dpi_input.value)
except:
return 50
@dpi.setter
def dpi(self, value):
self.dpi_input.value = str(value)
def display(self):
display(self.ui)
def update_smiles(self, _):
try:
self._mol = core.Mol.from_smiles(self.smiles_input.value)
self.valid.value = True
except ValueError:
self.valid.value = False
return
finally:
self.valid.visible = True
return self.calculate()
def calculate(self):
fp = self.fper.transform(self.mol)
self.fp = fp[fp == 1].index
self.fpg = self.fper.grad(self.mol).ix[self.fp]
return self.update_dropdown()
def update_dropdown(self):
self.dropdown.options.append(self.fp[0])
self.dropdown.value = self.fp[0]
self.dropdown.options = self.fp.tolist()
return self.plot(self.dropdown.value)
@property
def mol(self):
return self._mol
@mol.setter
def mol(self, mol):
self._mol = mol
self.smiles_input.value = mol.to_smiles()
self.calculate()
@property
def current_smiles(self):
return self.smiles_input.value
@property
def current_bit(self):
return self.dropdown.value
def plot(self, _):
clear_output()
plt.clf()
plt.rcParams['savefig.dpi'] = self.dpi
vis.plot_weights(self.mol, self.fpg.ix[self.current_bit], quality=4, ax=plt.gca()) | 0.619817 | 0.283019 |
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
import pandas as pd
class ChemicalObject(object):
""" A mixin for each chemical object in scikit-chem """
@classmethod
def from_super(cls, obj):
"""A method that converts the class of an object of parent class to that of the child. """
obj.__class__ = cls
return obj
class AtomView(object):
""" Atom interface wrapper """
def __init__(self, owner):
self.owner = owner
self.props = AtomPropertyView(self)
def __getitem__(self, index):
from .atom import Atom
return Atom.from_super(self.owner.GetAtomWithIdx(index))
def __len__(self):
return self.owner.GetNumAtoms()
def __iter__(self):
return AtomIterator(self.owner)
def __str__(self):
return str(list(str(atom) for atom in self))
@property
def elements(self):
return pd.Series((atom.element for atom in self), index=self.index)
@property
def atomic_number(self):
return pd.Series((atom.atomic_number for atom in self), index=self.index)
@property
def atomic_mass(self):
return pd.Series((atom.mass for atom in self), index=self.index)
@property
def index(self):
return pd.RangeIndex(len(self), name='atom_idx')
def __repr__(self):
return '<{class_} values="{values}" at {address}>'.format(
class_=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class AtomIterator(AtomView):
""" Atom iterator """
def __init__(self, owner):
super(AtomIterator, self).__init__(owner)
self._current = 0
self._high = self.owner.GetNumAtoms()
def __next__(self):
if self._current >= self._high:
raise StopIteration
else:
self._current += 1
return self[self._current - 1]
# py2 compat
next = __next__
class View(object):
""" View wrapper interface """
__metaclass__ = ABCMeta
@abstractmethod
def keys(self):
return []
def get(self, index, default=None):
if index in self.keys():
return self[index]
else:
return default
def pop(self, index, default=None):
if default:
val = self.get(index, default)
else:
val = self[index]
self.remove(index)
return val
def clear(self):
for idx in self.keys():
self.remove(idx)
def items(self):
return list((k, self[k]) for k in self.keys())
def remove(self, key):
self.__delitem__(key)
def __getitem__(self, key):
raise NotImplemented
def __setitem__(self, key, value):
raise NotImplemented
def __delitem__(self, key):
raise NotImplemented
def __iter__(self):
return iter(self.keys())
def __str__(self):
return str(dict(self))
def __len__(self):
return len(self.keys())
def __repr__(self):
return '<{klass} values="{values}" at {address}>'.format(
klass=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class PropertyView(View):
""" Property object wrapper """
def __init__(self, owner):
self._owner = owner
def keys(self):
return list(k for k in self._owner.GetPropNames() if k[:1] != '_')
def __getitem__(self, key):
# we manually work out if it was a float that was stored, as GetProp
# returns floats and ints set by SetDoubleProp and SetIntProp as strings
value = self._owner.GetProp(str(key))
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def __setitem__(self, key, value):
if not isinstance(key, str):
warnings.warn("RDKit property keys can only be of type `str`. Using `{key}` as a `str`.".format(key=key))
key = str(key)
if key[0] == '_':
warnings.warn("`{value}` is a private RDKit property key. "
"Using this may have unintended consequences.".format(value=value))
if isinstance(value, str):
self._owner.SetProp(key, value)
elif isinstance(value, (int, np.int64, np.int32)):
self._owner.SetIntProp(key, value)
elif isinstance(value, (float, np.float64, np.float32)):
self._owner.SetDoubleProp(key, value)
else:
warnings.warn("RDKit property keys can only be `str`, `int` or `float`."
"Using `{value}` as a `str`.".format(value=value))
self._owner.SetProp(key, str(value))
def __delitem__(self, index):
self._owner.ClearProp(index)
class AtomPropertyView(View):
""" Atom property wrapper """
def __init__(self, atom_view):
self._atom_view = atom_view
def keys(self):
res = set()
for atom in self._atom_view:
res = res.union(set(atom.props.keys()))
return list(res)
def get(self, key, default=None):
return [a.props.get(key, default) for a in self._atom_view]
def __getitem__(self, key):
if key not in self.keys():
raise KeyError('No atoms have the property set.')
return self.get(key, None)
def __setitem__(self, key, value):
assert len(self._atom_view) == len(value), "Must pass same number of values as atoms."
for atom, val in zip(self._atom_view, value):
atom.props[key] = val
def __delitem__(self, key):
for atom in self._atom_view:
atom.props.remove(key) | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/core/base.py | base.py | from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
import pandas as pd
class ChemicalObject(object):
""" A mixin for each chemical object in scikit-chem """
@classmethod
def from_super(cls, obj):
"""A method that converts the class of an object of parent class to that of the child. """
obj.__class__ = cls
return obj
class AtomView(object):
""" Atom interface wrapper """
def __init__(self, owner):
self.owner = owner
self.props = AtomPropertyView(self)
def __getitem__(self, index):
from .atom import Atom
return Atom.from_super(self.owner.GetAtomWithIdx(index))
def __len__(self):
return self.owner.GetNumAtoms()
def __iter__(self):
return AtomIterator(self.owner)
def __str__(self):
return str(list(str(atom) for atom in self))
@property
def elements(self):
return pd.Series((atom.element for atom in self), index=self.index)
@property
def atomic_number(self):
return pd.Series((atom.atomic_number for atom in self), index=self.index)
@property
def atomic_mass(self):
return pd.Series((atom.mass for atom in self), index=self.index)
@property
def index(self):
return pd.RangeIndex(len(self), name='atom_idx')
def __repr__(self):
return '<{class_} values="{values}" at {address}>'.format(
class_=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class AtomIterator(AtomView):
""" Atom iterator """
def __init__(self, owner):
super(AtomIterator, self).__init__(owner)
self._current = 0
self._high = self.owner.GetNumAtoms()
def __next__(self):
if self._current >= self._high:
raise StopIteration
else:
self._current += 1
return self[self._current - 1]
# py2 compat
next = __next__
class View(object):
""" View wrapper interface """
__metaclass__ = ABCMeta
@abstractmethod
def keys(self):
return []
def get(self, index, default=None):
if index in self.keys():
return self[index]
else:
return default
def pop(self, index, default=None):
if default:
val = self.get(index, default)
else:
val = self[index]
self.remove(index)
return val
def clear(self):
for idx in self.keys():
self.remove(idx)
def items(self):
return list((k, self[k]) for k in self.keys())
def remove(self, key):
self.__delitem__(key)
def __getitem__(self, key):
raise NotImplemented
def __setitem__(self, key, value):
raise NotImplemented
def __delitem__(self, key):
raise NotImplemented
def __iter__(self):
return iter(self.keys())
def __str__(self):
return str(dict(self))
def __len__(self):
return len(self.keys())
def __repr__(self):
return '<{klass} values="{values}" at {address}>'.format(
klass=self.__class__.__name__,
values=str(self),
address=hex(id(self)))
class PropertyView(View):
""" Property object wrapper """
def __init__(self, owner):
self._owner = owner
def keys(self):
return list(k for k in self._owner.GetPropNames() if k[:1] != '_')
def __getitem__(self, key):
# we manually work out if it was a float that was stored, as GetProp
# returns floats and ints set by SetDoubleProp and SetIntProp as strings
value = self._owner.GetProp(str(key))
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def __setitem__(self, key, value):
if not isinstance(key, str):
warnings.warn("RDKit property keys can only be of type `str`. Using `{key}` as a `str`.".format(key=key))
key = str(key)
if key[0] == '_':
warnings.warn("`{value}` is a private RDKit property key. "
"Using this may have unintended consequences.".format(value=value))
if isinstance(value, str):
self._owner.SetProp(key, value)
elif isinstance(value, (int, np.int64, np.int32)):
self._owner.SetIntProp(key, value)
elif isinstance(value, (float, np.float64, np.float32)):
self._owner.SetDoubleProp(key, value)
else:
warnings.warn("RDKit property keys can only be `str`, `int` or `float`."
"Using `{value}` as a `str`.".format(value=value))
self._owner.SetProp(key, str(value))
def __delitem__(self, index):
self._owner.ClearProp(index)
class AtomPropertyView(View):
""" Atom property wrapper """
def __init__(self, atom_view):
self._atom_view = atom_view
def keys(self):
res = set()
for atom in self._atom_view:
res = res.union(set(atom.props.keys()))
return list(res)
def get(self, key, default=None):
return [a.props.get(key, default) for a in self._atom_view]
def __getitem__(self, key):
if key not in self.keys():
raise KeyError('No atoms have the property set.')
return self.get(key, None)
def __setitem__(self, key, value):
assert len(self._atom_view) == len(value), "Must pass same number of values as atoms."
for atom, val in zip(self._atom_view, value):
atom.props[key] = val
def __delitem__(self, key):
for atom in self._atom_view:
atom.props.remove(key) | 0.868213 | 0.356251 |
import warnings
import tempfile
import os
import pandas as pd
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel import config
class Dataset(H5PYDataset):
""" Abstract base class providing an interface to the skchem data format."""
def __init__(self, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(Dataset, self).__init__(
file_or_path=find_in_data_path(self.filename), **kwargs)
@classmethod
def load_set(cls, set_name, sources=()):
""" Load the sources for a single set.
Args:
set_name (str):
The set name.
sources (tuple[str]):
The sources to return data for.
Returns:
tuple[np.array]
The requested sources for the requested set.
"""
if set_name == 'all':
set_name = cls.set_names
else:
set_name = (set_name,)
if sources == 'all':
sources = cls.sources_names
return cls(which_sets=set_name, sources=sources, load_in_memory=True).data_sources
@classmethod
def load_data(cls, sets=(), sources=()):
""" Load a set of sources.
Args:
sets (tuple[str]):
The sets to return data for.
sources:
The sources to return data for.
Example:
(X_train, y_train), (X_test, y_test) = Dataset.load_data(sets=('train', 'test'), sources=('X', 'y'))
"""
for set_name in sets:
yield cls.load_set(set_name, sources)
@classmethod
def read_frame(cls, key, *args, **kwargs):
""" Load a set of features from the dataset as a pandas object.
Args:
key (str):
The HDF5 key for required data. Typically, this will be one of
- structure: for the raw molecules
- smiles: for the smiles
- features/{feat_name}: for the features
- targets/{targ_name}: for the targets
Returns:
pd.Series or pd.DataFrame or pd.Panel
The data as a dataframe.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
data = pd.read_hdf(find_in_data_path(cls.filename), key, *args, **kwargs)
if isinstance(data, pd.Panel):
data = data.transpose(2, 1, 0)
return data
@classmethod
def download(cls, output_directory=None, download_directory=None):
""" Download the dataset and convert it.
Args:
output_directory (str):
The directory to save the data to. Defaults to the first
directory in the fuel data path.
download_directory (str):
The directory to save the raw files to. Defaults to a temporary
directory.
Returns:
str:
The path of the downloaded and processed dataset.
"""
if not output_directory:
output_directory = config.config['data_path']['yaml'].split(':')[0]
output_directory = os.path.expanduser(output_directory)
if not download_directory:
download_directory = tempfile.mkdtemp()
cls.downloader.download(directory=download_directory)
return cls.converter.convert(directory=download_directory,
output_directory=output_directory) | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/datasets/base.py | base.py |
import warnings
import tempfile
import os
import pandas as pd
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel import config
class Dataset(H5PYDataset):
""" Abstract base class providing an interface to the skchem data format."""
def __init__(self, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(Dataset, self).__init__(
file_or_path=find_in_data_path(self.filename), **kwargs)
@classmethod
def load_set(cls, set_name, sources=()):
""" Load the sources for a single set.
Args:
set_name (str):
The set name.
sources (tuple[str]):
The sources to return data for.
Returns:
tuple[np.array]
The requested sources for the requested set.
"""
if set_name == 'all':
set_name = cls.set_names
else:
set_name = (set_name,)
if sources == 'all':
sources = cls.sources_names
return cls(which_sets=set_name, sources=sources, load_in_memory=True).data_sources
@classmethod
def load_data(cls, sets=(), sources=()):
""" Load a set of sources.
Args:
sets (tuple[str]):
The sets to return data for.
sources:
The sources to return data for.
Example:
(X_train, y_train), (X_test, y_test) = Dataset.load_data(sets=('train', 'test'), sources=('X', 'y'))
"""
for set_name in sets:
yield cls.load_set(set_name, sources)
@classmethod
def read_frame(cls, key, *args, **kwargs):
""" Load a set of features from the dataset as a pandas object.
Args:
key (str):
The HDF5 key for required data. Typically, this will be one of
- structure: for the raw molecules
- smiles: for the smiles
- features/{feat_name}: for the features
- targets/{targ_name}: for the targets
Returns:
pd.Series or pd.DataFrame or pd.Panel
The data as a dataframe.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
data = pd.read_hdf(find_in_data_path(cls.filename), key, *args, **kwargs)
if isinstance(data, pd.Panel):
data = data.transpose(2, 1, 0)
return data
@classmethod
def download(cls, output_directory=None, download_directory=None):
""" Download the dataset and convert it.
Args:
output_directory (str):
The directory to save the data to. Defaults to the first
directory in the fuel data path.
download_directory (str):
The directory to save the raw files to. Defaults to a temporary
directory.
Returns:
str:
The path of the downloaded and processed dataset.
"""
if not output_directory:
output_directory = config.config['data_path']['yaml'].split(':')[0]
output_directory = os.path.expanduser(output_directory)
if not download_directory:
download_directory = tempfile.mkdtemp()
cls.downloader.download(directory=download_directory)
return cls.converter.convert(directory=download_directory,
output_directory=output_directory) | 0.829699 | 0.334141 |
import warnings
import logging
import os
from collections import namedtuple
import numpy as np
import pandas as pd
import h5py
from fuel.datasets import H5PYDataset
from ... import forcefields
from ... import filters
from ... import descriptors
from ... import standardizers
from ... import pipeline
logger = logging.getLogger(__name__)
def default_pipeline():
""" Return a default pipeline to be used for general datasets. """
return pipeline.Pipeline([
standardizers.ChemAxonStandardizer(keep_failed=True, warn_on_fail=False),
forcefields.UFF(add_hs=True, warn_on_fail=False),
filters.OrganicFilter(),
filters.AtomNumberFilter(above=5, below=100, include_hydrogens=True),
filters.MassFilter(below=1000)
])
DEFAULT_PYTABLES_KW = {
'complib': 'bzip2',
'complevel': 9
}
def contiguous_order(to_order, splits):
""" Determine a contiguous order from non-overlapping splits, and put data in that order.
Args:
to_order (iterable<pd.Series, pd.DataFrame, pd.Panel>):
The pandas objects to put in contiguous order.
splits (iterable<pd.Series>):
The non-overlapping splits, as boolean masks.
Returns:
iterable<pd.Series, pd.DataFrame, pd.Panel>: The data in contiguous order.
"""
member = pd.Series(0, index=splits[0].index)
for i, split in enumerate(splits):
member[split] = i
idx = member.sort_values().index
return (order.reindex(idx) for order in to_order)
Feature = namedtuple('Feature', ['fper', 'key', 'axis_names'])
def default_features():
return (
Feature(fper=descriptors.MorganFeaturizer(),
key='X_morg',
axis_names=['batch', 'features']),
Feature(fper=descriptors.PhysicochemicalFeaturizer(),
key='X_pc',
axis_names=['batch', 'features']),
Feature(fper=descriptors.AtomFeaturizer(max_atoms=100),
key='A',
axis_names=['batch', 'atom_idx', 'features']),
Feature(fper=descriptors.GraphDistanceTransformer(max_atoms=100),
key='G',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.SpacialDistanceTransformer(max_atoms=100),
key='G_d',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.ChemAxonFeaturizer(features='all'),
key='X_cx',
axis_names=['batch', 'features']),
Feature(fper=descriptors.ChemAxonAtomFeaturizer(features='all', max_atoms=100),
key='A_cx',
axis_names=['batch', 'atom_idx', 'features'])
)
class Split(object):
def __init__(self, mask, name, converter):
self.mask = mask
self.name = name
self.converter = converter
@property
def contiguous(self):
diff = np.ediff1d(self.mask.astype(int))
if self.mask.iloc[0] != 0:
diff[0] = 1
if self.mask.iloc[-1] != 0:
diff[-1] = -1
return sum(diff == -1) == 1 or sum(diff == 1) == 1
@property
def indices(self):
return np.nonzero(self.mask)[0]
def save(self):
self.converter.data_file[self.name + '_indices'] = self.indices
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.mask.to_hdf(self.converter.data_file.filename, '/indices/' + self.name)
@property
def ref(self):
return self.converter.data_file[self.name + '_indices'].ref
def to_dict(self):
idx = self.indices
if self.contiguous:
low, high = min(idx), max(idx)
return {source: (low, high) for source in self.converter.source_names}
else:
return {source: (-1, -1, self.ref) for source in self.converter.source_names}
class Converter(object):
""" Create a fuel dataset from molecules and targets. """
def __init__(self, directory, output_directory, output_filename='default.h5'):
raise NotImplemented
def run(self, ms, y, output_path, splits=None, features=None, pytables_kws=DEFAULT_PYTABLES_KW):
"""
Args:
ms (pd.Series):
The molecules of the dataset.
ys (pd.Series or pd.DataFrame):
The target labels of the dataset.
output_path (str):
The path to which the dataset should be saved.
features (list[Feature]):
The features to calculate. Defaults are used if `None`.
splits (iterable<(name, split)>):
An iterable of name, split tuples. Splits are provided as boolean arrays of the whole data.
"""
self.output_path = output_path
self.pytables_kws = pytables_kws
self.features = features if features is not None else default_features()
self.feature_names = [feat.key for feat in self.features]
self.task_names = ['y']
self.splits = [Split(split, name, self) for name, split in splits]
self.create_file(output_path)
self.save_splits()
self.save_molecules(ms)
self.save_targets(y)
self.save_features(ms)
@property
def source_names(self):
return self.feature_names + self.task_names
@property
def split_names(self):
return self.splits
def create_file(self, path):
logger.info('Creating h5 file at %s...', self.output_path)
self.data_file = h5py.File(path, 'w')
return self.data_file
def save_molecules(self, mols):
""" Save the molecules to the data file. """
logger.info('Writing molecules to file...')
logger.debug('Writing %s molecules to %s', len(mols), self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mols.to_hdf(self.data_file.filename, 'structure', **self.pytables_kws)
mols.apply(lambda m: m.to_smiles().encode('utf-8')).to_hdf(self.data_file.filename, 'smiles')
def save_frame(self, data, name, prefix='targets'):
""" Save the a frame to the data file. """
logger.info('Writing %s', name)
logger.debug('Writing data of shape %s to %s', data.shape, self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if len(data.shape) > 2:
data = data.transpose(2, 1, 0) # panel serializes backwards for some reason...
data.to_hdf(self.data_file.filename,
key='/{prefix}/{name}'.format(prefix=prefix, name=name),
**self.pytables_kws)
if isinstance(data, pd.Series):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
elif isinstance(data, pd.DataFrame):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
self.data_file[name].dims[1].label = data.columns.name
elif isinstance(data, pd.Panel):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.minor_axis.name # as panel serializes backwards
self.data_file[name].dims[1].label = data.major_axis.name
self.data_file[name].dims[2].label = data.items.name
def save_targets(self, y):
self.save_frame(y, name='y', prefix='targets')
def save_features(self, ms):
""" Save all features for the dataset. """
logger.debug('Saving features')
for feat in self.features:
self._save_feature(ms, feat)
def _save_feature(self, ms, feat):
""" Calculate and save a feature to the data file. """
logger.info('Calculating %s', feat.key)
fps = feat.fper.transform(ms)
self.save_frame(fps, name=feat.key, prefix='feats')
def save_splits(self):
""" Save the splits to the data file. """
logger.info('Producing dataset splits...')
for split in self.splits:
split.save()
split_dict = {split.name: split.to_dict() for split in self.splits}
splits = H5PYDataset.create_split_array(split_dict)
logger.debug('split: %s', splits)
logger.info('Saving splits...')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.data_file.attrs['split'] = splits
@classmethod
def convert(cls, **kwargs):
kwargs.setdefault('directory', os.getcwd())
kwargs.setdefault('output_directory', os.getcwd())
return cls(**kwargs).output_path,
@classmethod
def fill_subparser(cls, subparser):
return cls.convert | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/base.py | base.py | import warnings
import logging
import os
from collections import namedtuple
import numpy as np
import pandas as pd
import h5py
from fuel.datasets import H5PYDataset
from ... import forcefields
from ... import filters
from ... import descriptors
from ... import standardizers
from ... import pipeline
logger = logging.getLogger(__name__)
def default_pipeline():
""" Return a default pipeline to be used for general datasets. """
return pipeline.Pipeline([
standardizers.ChemAxonStandardizer(keep_failed=True, warn_on_fail=False),
forcefields.UFF(add_hs=True, warn_on_fail=False),
filters.OrganicFilter(),
filters.AtomNumberFilter(above=5, below=100, include_hydrogens=True),
filters.MassFilter(below=1000)
])
DEFAULT_PYTABLES_KW = {
'complib': 'bzip2',
'complevel': 9
}
def contiguous_order(to_order, splits):
""" Determine a contiguous order from non-overlapping splits, and put data in that order.
Args:
to_order (iterable<pd.Series, pd.DataFrame, pd.Panel>):
The pandas objects to put in contiguous order.
splits (iterable<pd.Series>):
The non-overlapping splits, as boolean masks.
Returns:
iterable<pd.Series, pd.DataFrame, pd.Panel>: The data in contiguous order.
"""
member = pd.Series(0, index=splits[0].index)
for i, split in enumerate(splits):
member[split] = i
idx = member.sort_values().index
return (order.reindex(idx) for order in to_order)
Feature = namedtuple('Feature', ['fper', 'key', 'axis_names'])
def default_features():
return (
Feature(fper=descriptors.MorganFeaturizer(),
key='X_morg',
axis_names=['batch', 'features']),
Feature(fper=descriptors.PhysicochemicalFeaturizer(),
key='X_pc',
axis_names=['batch', 'features']),
Feature(fper=descriptors.AtomFeaturizer(max_atoms=100),
key='A',
axis_names=['batch', 'atom_idx', 'features']),
Feature(fper=descriptors.GraphDistanceTransformer(max_atoms=100),
key='G',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.SpacialDistanceTransformer(max_atoms=100),
key='G_d',
axis_names=['batch', 'atom_idx', 'atom_idx']),
Feature(fper=descriptors.ChemAxonFeaturizer(features='all'),
key='X_cx',
axis_names=['batch', 'features']),
Feature(fper=descriptors.ChemAxonAtomFeaturizer(features='all', max_atoms=100),
key='A_cx',
axis_names=['batch', 'atom_idx', 'features'])
)
class Split(object):
def __init__(self, mask, name, converter):
self.mask = mask
self.name = name
self.converter = converter
@property
def contiguous(self):
diff = np.ediff1d(self.mask.astype(int))
if self.mask.iloc[0] != 0:
diff[0] = 1
if self.mask.iloc[-1] != 0:
diff[-1] = -1
return sum(diff == -1) == 1 or sum(diff == 1) == 1
@property
def indices(self):
return np.nonzero(self.mask)[0]
def save(self):
self.converter.data_file[self.name + '_indices'] = self.indices
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.mask.to_hdf(self.converter.data_file.filename, '/indices/' + self.name)
@property
def ref(self):
return self.converter.data_file[self.name + '_indices'].ref
def to_dict(self):
idx = self.indices
if self.contiguous:
low, high = min(idx), max(idx)
return {source: (low, high) for source in self.converter.source_names}
else:
return {source: (-1, -1, self.ref) for source in self.converter.source_names}
class Converter(object):
""" Create a fuel dataset from molecules and targets. """
def __init__(self, directory, output_directory, output_filename='default.h5'):
raise NotImplemented
def run(self, ms, y, output_path, splits=None, features=None, pytables_kws=DEFAULT_PYTABLES_KW):
"""
Args:
ms (pd.Series):
The molecules of the dataset.
ys (pd.Series or pd.DataFrame):
The target labels of the dataset.
output_path (str):
The path to which the dataset should be saved.
features (list[Feature]):
The features to calculate. Defaults are used if `None`.
splits (iterable<(name, split)>):
An iterable of name, split tuples. Splits are provided as boolean arrays of the whole data.
"""
self.output_path = output_path
self.pytables_kws = pytables_kws
self.features = features if features is not None else default_features()
self.feature_names = [feat.key for feat in self.features]
self.task_names = ['y']
self.splits = [Split(split, name, self) for name, split in splits]
self.create_file(output_path)
self.save_splits()
self.save_molecules(ms)
self.save_targets(y)
self.save_features(ms)
@property
def source_names(self):
return self.feature_names + self.task_names
@property
def split_names(self):
return self.splits
def create_file(self, path):
logger.info('Creating h5 file at %s...', self.output_path)
self.data_file = h5py.File(path, 'w')
return self.data_file
def save_molecules(self, mols):
""" Save the molecules to the data file. """
logger.info('Writing molecules to file...')
logger.debug('Writing %s molecules to %s', len(mols), self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mols.to_hdf(self.data_file.filename, 'structure', **self.pytables_kws)
mols.apply(lambda m: m.to_smiles().encode('utf-8')).to_hdf(self.data_file.filename, 'smiles')
def save_frame(self, data, name, prefix='targets'):
""" Save the a frame to the data file. """
logger.info('Writing %s', name)
logger.debug('Writing data of shape %s to %s', data.shape, self.data_file.filename)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if len(data.shape) > 2:
data = data.transpose(2, 1, 0) # panel serializes backwards for some reason...
data.to_hdf(self.data_file.filename,
key='/{prefix}/{name}'.format(prefix=prefix, name=name),
**self.pytables_kws)
if isinstance(data, pd.Series):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
elif isinstance(data, pd.DataFrame):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.index.name
self.data_file[name].dims[1].label = data.columns.name
elif isinstance(data, pd.Panel):
self.data_file[name] = h5py.SoftLink('/{prefix}/{name}/block0_values'.format(prefix=prefix, name=name))
self.data_file[name].dims[0].label = data.minor_axis.name # as panel serializes backwards
self.data_file[name].dims[1].label = data.major_axis.name
self.data_file[name].dims[2].label = data.items.name
def save_targets(self, y):
self.save_frame(y, name='y', prefix='targets')
def save_features(self, ms):
""" Save all features for the dataset. """
logger.debug('Saving features')
for feat in self.features:
self._save_feature(ms, feat)
def _save_feature(self, ms, feat):
""" Calculate and save a feature to the data file. """
logger.info('Calculating %s', feat.key)
fps = feat.fper.transform(ms)
self.save_frame(fps, name=feat.key, prefix='feats')
def save_splits(self):
""" Save the splits to the data file. """
logger.info('Producing dataset splits...')
for split in self.splits:
split.save()
split_dict = {split.name: split.to_dict() for split in self.splits}
splits = H5PYDataset.create_split_array(split_dict)
logger.debug('split: %s', splits)
logger.info('Saving splits...')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.data_file.attrs['split'] = splits
@classmethod
def convert(cls, **kwargs):
kwargs.setdefault('directory', os.getcwd())
kwargs.setdefault('output_directory', os.getcwd())
return cls(**kwargs).output_path,
@classmethod
def fill_subparser(cls, subparser):
return cls.convert | 0.787768 | 0.400046 |
import zipfile
import os
import logging
LOGGER = logging.getLogger(__name__)
import numpy as np
import pandas as pd
from .base import Converter, default_pipeline
from ... import io
from ... import core
class Tox21Converter(Converter):
""" Class to build tox21 dataset.
"""
def __init__(self, directory, output_directory, output_filename='tox21.h5'):
output_path = os.path.join(output_directory, output_filename)
# extract data
train, valid, test = self.extract(directory)
# read data
train = self.read_train(train)
valid = self.read_valid(valid)
test = self.read_test(test, os.path.join(directory, 'test.txt'))
# combine into full dataset
data = pd.concat([train, valid, test], keys=['train', 'valid', 'test']).sort_index()
data.index.names = 'ds', 'id'
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
# generate splits
ms, y = ms.reset_index(0), y.reset_index(0)
split_arr = ms.pop('ds')
y.pop('ds')
splits = [(split, split_arr == split) for split in ('train', 'valid', 'test')]
y.columns.name = 'tasks'
# call the Converter to make the final dataset
self.run(ms, y, output_path, splits=splits)
@staticmethod
def fix_id(s):
return s.split('-')[0]
@staticmethod
def fix_assay_name(s):
return s.replace('-', '_')
@staticmethod
def patch_test(test):
test_1 = pd.Series({
'structure': core.Mol.from_smiles('FC(F)(F)c1[nH]c(c(C#N)c1Br)C1=CC=C(Cl)C=C1', name='NCGC00357062'),
'stochiometry': 0,
'Compound ID': 'NCGC00357062',
'Sample ID': 'NCGC00357062-01'}, name='NCGC00357062')
test['NCGC00357062'] = test_1
return test
def read_train(self, train):
train = io.read_sdf(train)
train.columns = train.columns.to_series().apply(self.fix_assay_name)
train.index = train.index.to_series().apply(self.fix_id)
self.assays = train.columns[-12:]
self.keep_cols = ['structure'] + self.assays.tolist()
train[self.assays] = train[self.assays].astype(float)
train = train[self.keep_cols]
train = train.sort_index()
ms = train.structure[~train.index.duplicated()]
train = train[self.assays].groupby(train.index).max()
train = ms.to_frame().join(train)
return train
def read_valid(self, valid):
valid = io.read_sdf(valid)
valid.columns = valid.columns.to_series().apply(self.fix_assay_name)
valid = valid[self.keep_cols]
valid[self.assays] = valid[self.assays].astype(float)
return valid
def read_test(self, test, test_data):
test = io.read_sdf(test)
test = self.patch_test(test)
test_data = pd.read_table(test_data)
test_data['Sample ID'] = test_data['Sample ID'].apply(self.fix_id)
test = test.join(test_data.set_index('Sample ID'))
test.columns = test.columns.to_series().apply(self.fix_assay_name)
test = test[self.keep_cols]
test[test == 'x'] = np.nan
test[self.assays] = test[self.assays].astype(float)
return test
def extract(self, directory):
with zipfile.ZipFile(os.path.join(directory, 'train.sdf.zip')) as f:
train = f.extract('tox21_10k_data_all.sdf')
with zipfile.ZipFile(os.path.join(directory, 'valid.sdf.zip')) as f:
valid = f.extract('tox21_10k_challenge_test.sdf')
with zipfile.ZipFile(os.path.join(directory, 'test.sdf.zip')) as f:
test = f.extract('tox21_10k_challenge_score.sdf')
return train, valid, test
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Tox21 Dataset...')
Tox21Converter.convert() | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/tox21.py | tox21.py | import zipfile
import os
import logging
LOGGER = logging.getLogger(__name__)
import numpy as np
import pandas as pd
from .base import Converter, default_pipeline
from ... import io
from ... import core
class Tox21Converter(Converter):
""" Class to build tox21 dataset.
"""
def __init__(self, directory, output_directory, output_filename='tox21.h5'):
output_path = os.path.join(output_directory, output_filename)
# extract data
train, valid, test = self.extract(directory)
# read data
train = self.read_train(train)
valid = self.read_valid(valid)
test = self.read_test(test, os.path.join(directory, 'test.txt'))
# combine into full dataset
data = pd.concat([train, valid, test], keys=['train', 'valid', 'test']).sort_index()
data.index.names = 'ds', 'id'
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
# generate splits
ms, y = ms.reset_index(0), y.reset_index(0)
split_arr = ms.pop('ds')
y.pop('ds')
splits = [(split, split_arr == split) for split in ('train', 'valid', 'test')]
y.columns.name = 'tasks'
# call the Converter to make the final dataset
self.run(ms, y, output_path, splits=splits)
@staticmethod
def fix_id(s):
return s.split('-')[0]
@staticmethod
def fix_assay_name(s):
return s.replace('-', '_')
@staticmethod
def patch_test(test):
test_1 = pd.Series({
'structure': core.Mol.from_smiles('FC(F)(F)c1[nH]c(c(C#N)c1Br)C1=CC=C(Cl)C=C1', name='NCGC00357062'),
'stochiometry': 0,
'Compound ID': 'NCGC00357062',
'Sample ID': 'NCGC00357062-01'}, name='NCGC00357062')
test['NCGC00357062'] = test_1
return test
def read_train(self, train):
train = io.read_sdf(train)
train.columns = train.columns.to_series().apply(self.fix_assay_name)
train.index = train.index.to_series().apply(self.fix_id)
self.assays = train.columns[-12:]
self.keep_cols = ['structure'] + self.assays.tolist()
train[self.assays] = train[self.assays].astype(float)
train = train[self.keep_cols]
train = train.sort_index()
ms = train.structure[~train.index.duplicated()]
train = train[self.assays].groupby(train.index).max()
train = ms.to_frame().join(train)
return train
def read_valid(self, valid):
valid = io.read_sdf(valid)
valid.columns = valid.columns.to_series().apply(self.fix_assay_name)
valid = valid[self.keep_cols]
valid[self.assays] = valid[self.assays].astype(float)
return valid
def read_test(self, test, test_data):
test = io.read_sdf(test)
test = self.patch_test(test)
test_data = pd.read_table(test_data)
test_data['Sample ID'] = test_data['Sample ID'].apply(self.fix_id)
test = test.join(test_data.set_index('Sample ID'))
test.columns = test.columns.to_series().apply(self.fix_assay_name)
test = test[self.keep_cols]
test[test == 'x'] = np.nan
test[self.assays] = test[self.assays].astype(float)
return test
def extract(self, directory):
with zipfile.ZipFile(os.path.join(directory, 'train.sdf.zip')) as f:
train = f.extract('tox21_10k_data_all.sdf')
with zipfile.ZipFile(os.path.join(directory, 'valid.sdf.zip')) as f:
valid = f.extract('tox21_10k_challenge_test.sdf')
with zipfile.ZipFile(os.path.join(directory, 'test.sdf.zip')) as f:
test = f.extract('tox21_10k_challenge_score.sdf')
return train, valid, test
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Tox21 Dataset...')
Tox21Converter.convert() | 0.53048 | 0.389779 |
import os
import logging
import itertools
from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn import metrics
from .base import Converter, default_pipeline, contiguous_order
from ... import io
from ... import utils
from ...cross_validation import SimThresholdSplit
LOGGER = logging.getLogger(__file__)
class NMRShiftDB2Converter(Converter):
def __init__(self, directory, output_directory, output_filename='nmrshiftdb2.h5'):
output_path = os.path.join(output_directory, output_filename)
input_path = os.path.join(directory, 'nmrshiftdb2.sdf')
data = self.parse_data(input_path)
ys = self.get_spectra(data)
ys = self.process_spectra(ys)
ys = self.combine_duplicates(ys)
self.log_dists(ys)
self.log_duplicates(ys)
ys = self.squash_duplicates(ys)
c13s = self.to_frame(ys.loc[ys['13c'].notnull(), '13c'])
data = data[['structure']].join(c13s, how='right')
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
y.columns.name = 'shifts'
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(filepath):
""" Reads the raw datafile. """
LOGGER.info('Reading file: %s', filepath)
data = io.read_sdf(filepath, removeHs=False, warn_bad_mol=False)
data.index = data['nmrshiftdb2 ID'].astype(int)
data.index.name = 'nmrshiftdb2_id'
data.columns = data.columns.to_series().apply(utils.free_to_snail)
data = data.sort_index()
LOGGER.info('Read %s molecules.', len(data))
return data
@staticmethod
def get_spectra(data):
""" Retrieves spectra from raw data. """
LOGGER.info('Retrieving spectra from raw data...')
isotopes = [
'1h',
'11b',
'13c',
'15n',
'17o',
'19f',
'29si',
'31p',
'33s',
'73ge',
'195pt'
]
def is_spectrum(col_name, ele='c'):
return any(isotope in col_name for isotope in isotopes)
spectrum_cols = [c for c in data if is_spectrum(c)]
data = data[spectrum_cols]
def index_pair(s):
return s[0], int(s[1])
data.columns = pd.MultiIndex.from_tuples([index_pair(i.split('_')[1:]) for i in data.columns])
return data
@staticmethod
def process_spectra(data):
""" Turn the string representations found in sdf file into a dictionary. """
def spectrum_dict(spectrum_string):
if not isinstance(spectrum_string, str):
return np.nan # no spectra are still nan
if spectrum_string == '':
return np.nan # empty spectra are nan
sigs = spectrum_string.strip().strip('|').strip().split('|') # extract signals
sig_tup = [tuple(s.split(';')) for s in sigs] # take tuples as (signal, coupling, atom)
return {int(s[2]): float(s[0]) for s in sig_tup} # make spectrum a dictionary of atom to signal
return data.applymap(spectrum_dict)
@staticmethod
def combine_duplicates(data):
""" Collect duplicate spectra into one dictionary. All shifts are collected into lists. """
def aggregate_dicts(ds):
res = defaultdict(list)
for d in ds:
if not isinstance(d, dict): continue
for k, v in d.items():
res[k].append(v)
return dict(res) if len(res) else np.nan
return data.groupby(level=0, axis=1).apply(lambda s: s.apply(aggregate_dicts, axis=1))
@staticmethod
def squash_duplicates(data):
""" Take the mean of all the duplicates. This is where we could do a bit more checking. """
def squash(d):
if not isinstance(d, dict):
return np.nan
else:
return {k: np.mean(v) for k, v in d.items()}
return data.applymap(squash)
@staticmethod
def to_frame(data):
""" Convert a series of dictionaries to a dataframe. """
res = pd.DataFrame(data.tolist(), index=data.index)
res.columns.name = 'atom_idx'
return res
@staticmethod
def extract_duplicates(data, kind='13c'):
""" Get all 13c duplicates. """
def is_duplicate(ele):
if not isinstance(ele, dict):
return False
else:
return len(list(ele.values())[0]) > 1
return data.loc[data[kind].apply(is_duplicate), kind]
@staticmethod
def log_dists(data):
def n_spect(ele):
return isinstance(ele, dict)
def n_shifts(ele):
return len(ele) if isinstance(ele, dict) else 0
def log_message(func):
return ' '.join('{k}: {v}'.format(k=k, v=v) for k, v in data.applymap(func).sum().to_dict().items())
LOGGER.info('Number of spectra: %s', log_message(n_spect))
LOGGER.info('Extracted shifts: %s', log_message(n_shifts))
def log_duplicates(self, data):
for kind in '1h', '13c':
dups = self.extract_duplicates(data, kind)
LOGGER.info('Number of duplicate %s spectra: %s', kind, len(dups))
res = pd.DataFrame(sum((list(itertools.combinations(l, 2)) for s in dups for k, l in s.items()), []))
LOGGER.info('Number of duplicate %s pairs: %f', kind, len(res))
LOGGER.info('MAE for duplicate %s: %.4f', kind, metrics.mean_absolute_error(res[0], res[1]))
LOGGER.info('MSE for duplicate %s: %.4f', kind, metrics.mean_squared_error(res[0], res[1]))
LOGGER.info('r2 for duplicate %s: %.4f', kind, metrics.r2_score(res[0], res[1]))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting NMRShiftDB2 Dataset...')
NMRShiftDB2Converter.convert() | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/nmrshiftdb2.py | nmrshiftdb2.py |
import os
import logging
import itertools
from collections import defaultdict
import pandas as pd
import numpy as np
from sklearn import metrics
from .base import Converter, default_pipeline, contiguous_order
from ... import io
from ... import utils
from ...cross_validation import SimThresholdSplit
LOGGER = logging.getLogger(__file__)
class NMRShiftDB2Converter(Converter):
def __init__(self, directory, output_directory, output_filename='nmrshiftdb2.h5'):
output_path = os.path.join(output_directory, output_filename)
input_path = os.path.join(directory, 'nmrshiftdb2.sdf')
data = self.parse_data(input_path)
ys = self.get_spectra(data)
ys = self.process_spectra(ys)
ys = self.combine_duplicates(ys)
self.log_dists(ys)
self.log_duplicates(ys)
ys = self.squash_duplicates(ys)
c13s = self.to_frame(ys.loc[ys['13c'].notnull(), '13c'])
data = data[['structure']].join(c13s, how='right')
ms, y = data.structure, data.drop('structure', axis=1)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
y.columns.name = 'shifts'
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(filepath):
""" Reads the raw datafile. """
LOGGER.info('Reading file: %s', filepath)
data = io.read_sdf(filepath, removeHs=False, warn_bad_mol=False)
data.index = data['nmrshiftdb2 ID'].astype(int)
data.index.name = 'nmrshiftdb2_id'
data.columns = data.columns.to_series().apply(utils.free_to_snail)
data = data.sort_index()
LOGGER.info('Read %s molecules.', len(data))
return data
@staticmethod
def get_spectra(data):
""" Retrieves spectra from raw data. """
LOGGER.info('Retrieving spectra from raw data...')
isotopes = [
'1h',
'11b',
'13c',
'15n',
'17o',
'19f',
'29si',
'31p',
'33s',
'73ge',
'195pt'
]
def is_spectrum(col_name, ele='c'):
return any(isotope in col_name for isotope in isotopes)
spectrum_cols = [c for c in data if is_spectrum(c)]
data = data[spectrum_cols]
def index_pair(s):
return s[0], int(s[1])
data.columns = pd.MultiIndex.from_tuples([index_pair(i.split('_')[1:]) for i in data.columns])
return data
@staticmethod
def process_spectra(data):
""" Turn the string representations found in sdf file into a dictionary. """
def spectrum_dict(spectrum_string):
if not isinstance(spectrum_string, str):
return np.nan # no spectra are still nan
if spectrum_string == '':
return np.nan # empty spectra are nan
sigs = spectrum_string.strip().strip('|').strip().split('|') # extract signals
sig_tup = [tuple(s.split(';')) for s in sigs] # take tuples as (signal, coupling, atom)
return {int(s[2]): float(s[0]) for s in sig_tup} # make spectrum a dictionary of atom to signal
return data.applymap(spectrum_dict)
@staticmethod
def combine_duplicates(data):
""" Collect duplicate spectra into one dictionary. All shifts are collected into lists. """
def aggregate_dicts(ds):
res = defaultdict(list)
for d in ds:
if not isinstance(d, dict): continue
for k, v in d.items():
res[k].append(v)
return dict(res) if len(res) else np.nan
return data.groupby(level=0, axis=1).apply(lambda s: s.apply(aggregate_dicts, axis=1))
@staticmethod
def squash_duplicates(data):
""" Take the mean of all the duplicates. This is where we could do a bit more checking. """
def squash(d):
if not isinstance(d, dict):
return np.nan
else:
return {k: np.mean(v) for k, v in d.items()}
return data.applymap(squash)
@staticmethod
def to_frame(data):
""" Convert a series of dictionaries to a dataframe. """
res = pd.DataFrame(data.tolist(), index=data.index)
res.columns.name = 'atom_idx'
return res
@staticmethod
def extract_duplicates(data, kind='13c'):
""" Get all 13c duplicates. """
def is_duplicate(ele):
if not isinstance(ele, dict):
return False
else:
return len(list(ele.values())[0]) > 1
return data.loc[data[kind].apply(is_duplicate), kind]
@staticmethod
def log_dists(data):
def n_spect(ele):
return isinstance(ele, dict)
def n_shifts(ele):
return len(ele) if isinstance(ele, dict) else 0
def log_message(func):
return ' '.join('{k}: {v}'.format(k=k, v=v) for k, v in data.applymap(func).sum().to_dict().items())
LOGGER.info('Number of spectra: %s', log_message(n_spect))
LOGGER.info('Extracted shifts: %s', log_message(n_shifts))
def log_duplicates(self, data):
for kind in '1h', '13c':
dups = self.extract_duplicates(data, kind)
LOGGER.info('Number of duplicate %s spectra: %s', kind, len(dups))
res = pd.DataFrame(sum((list(itertools.combinations(l, 2)) for s in dups for k, l in s.items()), []))
LOGGER.info('Number of duplicate %s pairs: %f', kind, len(res))
LOGGER.info('MAE for duplicate %s: %.4f', kind, metrics.mean_absolute_error(res[0], res[1]))
LOGGER.info('MSE for duplicate %s: %.4f', kind, metrics.mean_squared_error(res[0], res[1]))
LOGGER.info('r2 for duplicate %s: %.4f', kind, metrics.r2_score(res[0], res[1]))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting NMRShiftDB2 Dataset...')
NMRShiftDB2Converter.convert() | 0.657098 | 0.379838 |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import skchem
from .base import Converter
from ... import standardizers
PATCHES = {
'820-75-7': r'NNC(=O)CNC(=O)C=[N+]=[N-]',
'2435-76-9': r'[N-]=[N+]=C1C=NC(=O)NC1=O',
'817-99-2': r'NC(=O)CNC(=O)\C=[N+]=[N-]',
'116539-70-9': r'CCCCN(CC(O)C1=C\C(=[N+]=[N-])\C(=O)C=C1)N=O',
'115-02-6': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O',
'122341-55-3': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O'
}
class MullerAmesConverter(Converter):
def __init__(self, directory, output_directory, output_filename='muller_ames.h5'):
"""
Args:
directory (str):
Directory in which input files reside.
output_directory (str):
Directory in which to save the converted dataset.
output_filename (str):
Name of the saved dataset. Defaults to `muller_ames.h5`.
Returns:
tuple of str:
Single-element tuple containing the path to the converted dataset.
"""
zip_path = os.path.join(directory, 'ci900161g_si_001.zip')
output_path = os.path.join(output_directory, output_filename)
with zipfile.ZipFile(zip_path) as f:
f.extractall()
# create dataframe
data = pd.read_csv(os.path.join(directory, 'smiles_cas_N6512.smi'),
delimiter='\t', index_col=1,
converters={1: lambda s: s.strip()},
header=None, names=['structure', 'id', 'is_mutagen'])
data = self.patch_data(data, PATCHES)
data['structure'] = data.structure.apply(skchem.Mol.from_smiles)
data = self.standardize(data)
data = self.optimize(data)
keep = self.filter(data)
ms, ys = keep.structure, keep.is_mutagen
indices = data.reset_index().index.difference(keep.reset_index().index)
train = self.parse_splits(os.path.join('splits_train_N6512.csv'))
train = self.drop_indices(train, indices)
splits = self.create_split_dict(train, 'train')
test = self.parse_splits(os.path.join(directory, 'splits_test_N6512.csv'))
test = self.drop_indices(test, indices)
splits.update(self.create_split_dict(test, 'test'))
self.run(ms, ys, output_path, splits=splits)
def patch_data(self, data, patches):
""" Patch smiles in a DataFrame with rewritten ones that specify diazo
groups in rdkit friendly way. """
LOGGER.info('Patching data...')
for cas, smiles in patches.items():
data.loc[cas, 'structure'] = smiles
return data
def parse_splits(self, f_path):
LOGGER.info('Parsing splits...')
with open(f_path) as f:
splits = [split for split in f.read().strip().splitlines()]
splits = [[n for n in split.strip().split(',')] for split in splits]
splits = [sorted(int(n) for n in split) for split in splits] # sorted ints
return [np.array(split) - 1 for split in splits] # zero based indexing
def drop_indices(self, splits, indices):
LOGGER.info('Dropping failed compounds from split indices...')
for i, split in enumerate(splits):
split = split - sum(split > ix for ix in indices)
splits[i] = np.delete(split, indices)
return splits
def create_split_dict(self, splits, name):
return {'{}_{}'.format(name, i + 1): split \
for i, split in enumerate(splits)}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Muller Ames Dataset...')
MullerAmesConverter.convert() | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/muller_ames.py | muller_ames.py |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
import skchem
from .base import Converter
from ... import standardizers
PATCHES = {
'820-75-7': r'NNC(=O)CNC(=O)C=[N+]=[N-]',
'2435-76-9': r'[N-]=[N+]=C1C=NC(=O)NC1=O',
'817-99-2': r'NC(=O)CNC(=O)\C=[N+]=[N-]',
'116539-70-9': r'CCCCN(CC(O)C1=C\C(=[N+]=[N-])\C(=O)C=C1)N=O',
'115-02-6': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O',
'122341-55-3': r'NC(COC(=O)\C=[N+]=[N-])C(=O)O'
}
class MullerAmesConverter(Converter):
def __init__(self, directory, output_directory, output_filename='muller_ames.h5'):
"""
Args:
directory (str):
Directory in which input files reside.
output_directory (str):
Directory in which to save the converted dataset.
output_filename (str):
Name of the saved dataset. Defaults to `muller_ames.h5`.
Returns:
tuple of str:
Single-element tuple containing the path to the converted dataset.
"""
zip_path = os.path.join(directory, 'ci900161g_si_001.zip')
output_path = os.path.join(output_directory, output_filename)
with zipfile.ZipFile(zip_path) as f:
f.extractall()
# create dataframe
data = pd.read_csv(os.path.join(directory, 'smiles_cas_N6512.smi'),
delimiter='\t', index_col=1,
converters={1: lambda s: s.strip()},
header=None, names=['structure', 'id', 'is_mutagen'])
data = self.patch_data(data, PATCHES)
data['structure'] = data.structure.apply(skchem.Mol.from_smiles)
data = self.standardize(data)
data = self.optimize(data)
keep = self.filter(data)
ms, ys = keep.structure, keep.is_mutagen
indices = data.reset_index().index.difference(keep.reset_index().index)
train = self.parse_splits(os.path.join('splits_train_N6512.csv'))
train = self.drop_indices(train, indices)
splits = self.create_split_dict(train, 'train')
test = self.parse_splits(os.path.join(directory, 'splits_test_N6512.csv'))
test = self.drop_indices(test, indices)
splits.update(self.create_split_dict(test, 'test'))
self.run(ms, ys, output_path, splits=splits)
def patch_data(self, data, patches):
""" Patch smiles in a DataFrame with rewritten ones that specify diazo
groups in rdkit friendly way. """
LOGGER.info('Patching data...')
for cas, smiles in patches.items():
data.loc[cas, 'structure'] = smiles
return data
def parse_splits(self, f_path):
LOGGER.info('Parsing splits...')
with open(f_path) as f:
splits = [split for split in f.read().strip().splitlines()]
splits = [[n for n in split.strip().split(',')] for split in splits]
splits = [sorted(int(n) for n in split) for split in splits] # sorted ints
return [np.array(split) - 1 for split in splits] # zero based indexing
def drop_indices(self, splits, indices):
LOGGER.info('Dropping failed compounds from split indices...')
for i, split in enumerate(splits):
split = split - sum(split > ix for ix in indices)
splits[i] = np.delete(split, indices)
return splits
def create_split_dict(self, splits, name):
return {'{}_{}'.format(name, i + 1): split \
for i, split in enumerate(splits)}
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting Muller Ames Dataset...')
MullerAmesConverter.convert() | 0.512937 | 0.327319 |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
from ... import io
from .base import Converter, contiguous_order
from ...cross_validation import SimThresholdSplit
TXT_COLUMNS = [l.lower() for l in """CAS
Formula
Mol_Weight
Chemical_Name
WS
WS_temp
WS_type
WS_reference
LogP
LogP_temp
LogP_type
LogP_reference
VP
VP_temp
VP_type
VP_reference
DC_pKa
DC_temp
DC_type
DC_reference
henry_law Constant
HL_temp
HL_type
HL_reference
OH
OH_temp
OH_type
OH_reference
BP_pressure
MP
BP
FP""".split('\n')]
class PhysPropConverter(Converter):
def __init__(self, directory, output_directory, output_filename='physprop.h5'):
output_path = os.path.join(output_directory, output_filename)
sdf, txt = self.extract(directory)
mols, data = self.process_sdf(sdf), self.process_txt(txt)
LOGGER.debug('Compounds with data extracted: %s', len(data))
data = mols.to_frame().join(data)
data = self.drop_inconsistencies(data)
y = self.process_targets(data)
LOGGER.debug('Compounds with experimental: %s', len(y))
data = data.ix[y.index]
data.columns.name = 'targets'
ms, y = data.structure, data.drop('structure', axis=1)
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
def extract(self, directory):
LOGGER.info('Extracting from %s', directory)
with zipfile.ZipFile(os.path.join(directory, 'phys_sdf.zip')) as f:
sdf = f.extract('PhysProp.sdf')
with zipfile.ZipFile(os.path.join(directory, 'phys_txt.zip')) as f:
txt = f.extract('PhysProp.txt')
return sdf, txt
def process_sdf(self, path):
LOGGER.info('Processing sdf at %s', path)
mols = io.read_sdf(path, read_props=False).structure
mols.index = mols.apply(lambda m: m.GetProp('CAS'))
mols.index.name = 'cas'
LOGGER.debug('Structures extracted: %s', len(mols))
return mols
def process_txt(self, path):
LOGGER.info('Processing txt at %s', path)
data = pd.read_table(path, header=None, engine='python').iloc[:, :32]
data.columns = TXT_COLUMNS
data_types = data.columns[[s.endswith('_type') for s in data.columns]]
data[data_types] = data[data_types].fillna('NAN')
data = data.set_index('cas')
return data
def drop_inconsistencies(self, data):
LOGGER.info('Dropping inconsistent data...')
formula = data.structure.apply(lambda m: m.to_formula())
LOGGER.info('Inconsistent compounds: %s', (formula != data.formula).sum())
data = data[formula == data.formula]
return data
def process_targets(self, data):
LOGGER.info('Dropping estimated data...')
data = pd.concat([self.process_logS(data),
self.process_logP(data),
self.process_mp(data),
self.process_bp(data)], axis=1)
LOGGER.info('Dropped compounds: %s', data.isnull().all(axis=1).sum())
data = data[data.notnull().any(axis=1)]
LOGGER.debug('Compounds with experimental activities: %s', len(data))
return data
def process_logS(self, data):
cleaned = pd.DataFrame(index=data.index)
S = 0.001 * data.ws / data.mol_weight
logS = np.log10(S)
return logS[data.ws_type == 'EXP']
def process_logP(self, data):
logP = data.logp[data.logp_type == 'EXP']
return logP[logP > -10]
def process_mp(self, data):
return data.mp.apply(self.fix_temp)
def process_bp(self, data):
return data.bp.apply(self.fix_temp)
@staticmethod
def fix_temp(s, mean_range=5):
try:
return float(s)
except ValueError:
if '<' in s or '>' in s:
return np.nan
s = s.strip(' dec')
s = s.strip(' sub')
if '-' in s and mean_range:
rng = [float(n) for n in s.split('-')]
if len(rng) > 2:
return np.nan
if np.abs(rng[1] - rng[0]) < mean_range:
return (rng[0] + rng[1])/2
try:
return float(s)
except ValueError:
return np.nan
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting PhysProp Dataset...')
PhysPropConverter.convert() | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/physprop.py | physprop.py |
import os
import zipfile
import logging
LOGGER = logging.getLogger(__name__)
import pandas as pd
import numpy as np
from ... import io
from .base import Converter, contiguous_order
from ...cross_validation import SimThresholdSplit
TXT_COLUMNS = [l.lower() for l in """CAS
Formula
Mol_Weight
Chemical_Name
WS
WS_temp
WS_type
WS_reference
LogP
LogP_temp
LogP_type
LogP_reference
VP
VP_temp
VP_type
VP_reference
DC_pKa
DC_temp
DC_type
DC_reference
henry_law Constant
HL_temp
HL_type
HL_reference
OH
OH_temp
OH_type
OH_reference
BP_pressure
MP
BP
FP""".split('\n')]
class PhysPropConverter(Converter):
def __init__(self, directory, output_directory, output_filename='physprop.h5'):
output_path = os.path.join(output_directory, output_filename)
sdf, txt = self.extract(directory)
mols, data = self.process_sdf(sdf), self.process_txt(txt)
LOGGER.debug('Compounds with data extracted: %s', len(data))
data = mols.to_frame().join(data)
data = self.drop_inconsistencies(data)
y = self.process_targets(data)
LOGGER.debug('Compounds with experimental: %s', len(y))
data = data.ix[y.index]
data.columns.name = 'targets'
ms, y = data.structure, data.drop('structure', axis=1)
cv = SimThresholdSplit(ms, min_threshold=0.6, block_width=4000, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
def extract(self, directory):
LOGGER.info('Extracting from %s', directory)
with zipfile.ZipFile(os.path.join(directory, 'phys_sdf.zip')) as f:
sdf = f.extract('PhysProp.sdf')
with zipfile.ZipFile(os.path.join(directory, 'phys_txt.zip')) as f:
txt = f.extract('PhysProp.txt')
return sdf, txt
def process_sdf(self, path):
LOGGER.info('Processing sdf at %s', path)
mols = io.read_sdf(path, read_props=False).structure
mols.index = mols.apply(lambda m: m.GetProp('CAS'))
mols.index.name = 'cas'
LOGGER.debug('Structures extracted: %s', len(mols))
return mols
def process_txt(self, path):
LOGGER.info('Processing txt at %s', path)
data = pd.read_table(path, header=None, engine='python').iloc[:, :32]
data.columns = TXT_COLUMNS
data_types = data.columns[[s.endswith('_type') for s in data.columns]]
data[data_types] = data[data_types].fillna('NAN')
data = data.set_index('cas')
return data
def drop_inconsistencies(self, data):
LOGGER.info('Dropping inconsistent data...')
formula = data.structure.apply(lambda m: m.to_formula())
LOGGER.info('Inconsistent compounds: %s', (formula != data.formula).sum())
data = data[formula == data.formula]
return data
def process_targets(self, data):
LOGGER.info('Dropping estimated data...')
data = pd.concat([self.process_logS(data),
self.process_logP(data),
self.process_mp(data),
self.process_bp(data)], axis=1)
LOGGER.info('Dropped compounds: %s', data.isnull().all(axis=1).sum())
data = data[data.notnull().any(axis=1)]
LOGGER.debug('Compounds with experimental activities: %s', len(data))
return data
def process_logS(self, data):
cleaned = pd.DataFrame(index=data.index)
S = 0.001 * data.ws / data.mol_weight
logS = np.log10(S)
return logS[data.ws_type == 'EXP']
def process_logP(self, data):
logP = data.logp[data.logp_type == 'EXP']
return logP[logP > -10]
def process_mp(self, data):
return data.mp.apply(self.fix_temp)
def process_bp(self, data):
return data.bp.apply(self.fix_temp)
@staticmethod
def fix_temp(s, mean_range=5):
try:
return float(s)
except ValueError:
if '<' in s or '>' in s:
return np.nan
s = s.strip(' dec')
s = s.strip(' sub')
if '-' in s and mean_range:
rng = [float(n) for n in s.split('-')]
if len(rng) > 2:
return np.nan
if np.abs(rng[1] - rng[0]) < mean_range:
return (rng[0] + rng[1])/2
try:
return float(s)
except ValueError:
return np.nan
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
LOGGER.info('Converting PhysProp Dataset...')
PhysPropConverter.convert() | 0.417865 | 0.346514 |
import os
import logging
logger = logging.getLogger(__name__)
import pandas as pd
from .base import Converter, default_pipeline, contiguous_order
from ...core import Mol
from ...cross_validation import SimThresholdSplit
class BradleyOpenMPConverter(Converter):
def __init__(self, directory, output_directory, output_filename='bradley_open_mp.h5'):
output_path = os.path.join(output_directory, output_filename)
data = self.parse_data(os.path.join(directory, 'bradley_melting_point_dataset.xlsx'))
data = self.filter_bad(data)
def parse_smiles(smi):
try:
return Mol.from_smiles(smi)
except ValueError:
return None
data['structure'] = data.smiles.apply(parse_smiles)
data = data[data.structure.notnull()]
ms, y = data.structure, self.fix_mp(data)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
cv = SimThresholdSplit(ms, min_threshold=0.6, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(path):
logger.info('Parsing data at %s...', path)
return pd.read_excel(path, index_col=0)
@staticmethod
def filter_bad(data):
logger.info('Removing manually annotated errors...')
bad_data = data.donotuse.notnull()
logger.debug('Removed %s', bad_data.sum())
return data[~bad_data]
@staticmethod
def fix_mp(data):
logger.info('Converting temperature to Kelvin...')
return data.mpC + 278.15
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting Bradley Open Melting Point Dataset...')
BradleyOpenMPConverter.convert() | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/data/converters/bradley_open_mp.py | bradley_open_mp.py |
import os
import logging
logger = logging.getLogger(__name__)
import pandas as pd
from .base import Converter, default_pipeline, contiguous_order
from ...core import Mol
from ...cross_validation import SimThresholdSplit
class BradleyOpenMPConverter(Converter):
def __init__(self, directory, output_directory, output_filename='bradley_open_mp.h5'):
output_path = os.path.join(output_directory, output_filename)
data = self.parse_data(os.path.join(directory, 'bradley_melting_point_dataset.xlsx'))
data = self.filter_bad(data)
def parse_smiles(smi):
try:
return Mol.from_smiles(smi)
except ValueError:
return None
data['structure'] = data.smiles.apply(parse_smiles)
data = data[data.structure.notnull()]
ms, y = data.structure, self.fix_mp(data)
pipeline = default_pipeline()
ms, y = pipeline.transform_filter(ms, y)
cv = SimThresholdSplit(ms, min_threshold=0.6, n_jobs=-1)
train, valid, test = cv.split((70, 15, 15))
(ms, y, train, valid, test) = contiguous_order((ms, y, train, valid, test), (train, valid, test))
splits = (('train', train), ('valid', valid), ('test', test))
self.run(ms, y, output_path=output_path, splits=splits)
@staticmethod
def parse_data(path):
logger.info('Parsing data at %s...', path)
return pd.read_excel(path, index_col=0)
@staticmethod
def filter_bad(data):
logger.info('Removing manually annotated errors...')
bad_data = data.donotuse.notnull()
logger.debug('Removed %s', bad_data.sum())
return data[~bad_data]
@staticmethod
def fix_mp(data):
logger.info('Converting temperature to Kelvin...')
return data.mpC + 278.15
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
LOGGER.info('Converting Bradley Open Melting Point Dataset...')
BradleyOpenMPConverter.convert() | 0.514888 | 0.241775 |
from functools import wraps
import warnings
from rdkit import Chem
import pandas as pd
from ..core import Mol
from ..utils import Suppressor, squeeze
def _drop_props(row):
for prop in row.structure.props.keys():
row.structure.ClearProp(prop)
def _set_props(row, cols):
for i in cols:
row.structure.SetProp(str(i), str(row[i])) # rdkit props can only be str
def _set_name(row):
row.structure.name = str(row.name) # rdkit props can only be strs
def read_sdf(sdf, error_bad_mol=False, warn_bad_mol=True, nmols=None,
skipmols=None, skipfooter=None, read_props=True, mol_props=False,
*args, **kwargs):
"""Read an sdf file into a `pd.DataFrame`.
The function wraps the RDKit `ForwardSDMolSupplier` object.
Args:
sdf (str or file-like):
The location of data to load, as a file path, or a file-like object.
error_bad_mol (bool):
Whether an error should be raised if a molecule fails to parse.
Default is False.
warn_bad_mol (bool):
Whether a warning should be output if a molecule fails to parse.
Default is True.
nmols (int):
The number of molecules to read. If `None`, read all molecules.
Default is `None`.
skipmols (int):
The number of molecules to skip at start.
Default is `0`.
skipfooter (int):
The number of molecules to skip from the end.
Default is `0`.
read_props (bool):
Whether to read the properties into the data frame.
Default is `True`.
mol_props (bool):
Whether to keep properties in the molecule dictionary after they are
extracted to the dataframe.
Default is `False`.
args, kwargs:
Arguments will be passed to rdkit's ForwardSDMolSupplier.
Returns:
pandas.DataFrame:
The loaded data frame, with Mols supplied in the `structure` field.
See also:
rdkit.Chem.SDForwardMolSupplier
skchem.read_smiles
"""
# nmols is actually the index to cutoff. If we skip some at start, we need
# to add this number
if skipmols:
nmols += skipmols
if isinstance(sdf, str):
sdf = open(sdf, 'rb') # use read bytes for python 3 compatibility
# use the suppression context manager to not pollute our stdout with rdkit
# errors and warnings.
# perhaps this should be captured better by Mol etc.
with Suppressor():
mol_supp = Chem.ForwardSDMolSupplier(sdf, *args, **kwargs)
mols = []
# single loop through sdf
for i, mol in enumerate(mol_supp):
if skipmols and i < skipmols:
continue
if nmols and i >= nmols:
break
# rdkit returns None if it fails to parse a molecule. We will raise
# errors unless force is used.
if mol is None:
msg = 'Molecule {} could not be decoded.'.format(i + 1)
if error_bad_mol:
raise ValueError(msg)
elif warn_bad_mol:
warnings.warn(msg)
continue
mols.append(Mol(mol))
if skipfooter:
mols = mols[:-skipfooter]
idx = pd.Index((m.name for m in mols), name='name')
data = pd.DataFrame(mols, columns=['structure'])
if read_props:
props = pd.DataFrame([{k: v for (k, v) in mol.props.items()} for mol in mols])
data = pd.concat([data, props], axis=1)
# now we have extracted the props, we can delete if required
if not mol_props:
data.apply(_drop_props, axis=1)
data.index = idx
return squeeze(data, axis=1)
def write_sdf(data, sdf, write_cols=True, index_as_name=True, mol_props=False,
*args, **kwargs):
""" Write an sdf file from a dataframe.
Args:
data (pandas.Series or pandas.DataFrame):
Pandas data structure with a `structure` column containing compounds
to serialize.
sdf (str or file-like):
A file path or file-like object specifying where to write the
compound data.
write_cols (bool):
Whether columns should be written as props. Default `True`.
index_as_name (bool):
Whether to use index as the header, or the molecule's name.
Default is `True`.
mol_props (bool):
Whether to write properties in the Mol dictionary in addition to
fields in the frame.
Warn:
This function will change the names of the compounds if the
`index_as_name` argument is `True`, and will delete all properties in
the molecule dictionary if `mol_props` is `False`.
"""
if isinstance(data, pd.Series):
data = data.to_frame(name='structure')
names = [m.name for m in data.structure]
writer = Chem.SDWriter(sdf, *args, **kwargs)
cols = list(data.columns.drop('structure'))
if not mol_props:
data.apply(_drop_props, axis=1)
if write_cols:
data.apply(_set_props, cols=cols, axis=1)
if index_as_name:
data.apply(_set_name, axis=1)
data.structure.apply(writer.write)
# rdkit writer changes names sometimes
for mol, name in zip(data.structure, names):
mol.name = name
@wraps(write_sdf)
def _to_sdf_series(self, *args, **kwargs):
return write_sdf(self, write_cols=False, *args, **kwargs)
@wraps(write_sdf)
def _to_sdf_df(self, *args, **kwargs):
return write_sdf(self, *args, **kwargs)
pd.Series.to_sdf = _to_sdf_series
pd.DataFrame.to_sdf = _to_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_df(_, *args, **kwargs):
return read_sdf(*args, **kwargs)
pd.DataFrame.from_sdf = _from_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_series(_, *args, **kwargs):
return read_sdf(*args, **kwargs).structure
pd.Series.from_sdf = _from_sdf_series | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/io/sdf.py | sdf.py | from functools import wraps
import warnings
from rdkit import Chem
import pandas as pd
from ..core import Mol
from ..utils import Suppressor, squeeze
def _drop_props(row):
for prop in row.structure.props.keys():
row.structure.ClearProp(prop)
def _set_props(row, cols):
for i in cols:
row.structure.SetProp(str(i), str(row[i])) # rdkit props can only be str
def _set_name(row):
row.structure.name = str(row.name) # rdkit props can only be strs
def read_sdf(sdf, error_bad_mol=False, warn_bad_mol=True, nmols=None,
skipmols=None, skipfooter=None, read_props=True, mol_props=False,
*args, **kwargs):
"""Read an sdf file into a `pd.DataFrame`.
The function wraps the RDKit `ForwardSDMolSupplier` object.
Args:
sdf (str or file-like):
The location of data to load, as a file path, or a file-like object.
error_bad_mol (bool):
Whether an error should be raised if a molecule fails to parse.
Default is False.
warn_bad_mol (bool):
Whether a warning should be output if a molecule fails to parse.
Default is True.
nmols (int):
The number of molecules to read. If `None`, read all molecules.
Default is `None`.
skipmols (int):
The number of molecules to skip at start.
Default is `0`.
skipfooter (int):
The number of molecules to skip from the end.
Default is `0`.
read_props (bool):
Whether to read the properties into the data frame.
Default is `True`.
mol_props (bool):
Whether to keep properties in the molecule dictionary after they are
extracted to the dataframe.
Default is `False`.
args, kwargs:
Arguments will be passed to rdkit's ForwardSDMolSupplier.
Returns:
pandas.DataFrame:
The loaded data frame, with Mols supplied in the `structure` field.
See also:
rdkit.Chem.SDForwardMolSupplier
skchem.read_smiles
"""
# nmols is actually the index to cutoff. If we skip some at start, we need
# to add this number
if skipmols:
nmols += skipmols
if isinstance(sdf, str):
sdf = open(sdf, 'rb') # use read bytes for python 3 compatibility
# use the suppression context manager to not pollute our stdout with rdkit
# errors and warnings.
# perhaps this should be captured better by Mol etc.
with Suppressor():
mol_supp = Chem.ForwardSDMolSupplier(sdf, *args, **kwargs)
mols = []
# single loop through sdf
for i, mol in enumerate(mol_supp):
if skipmols and i < skipmols:
continue
if nmols and i >= nmols:
break
# rdkit returns None if it fails to parse a molecule. We will raise
# errors unless force is used.
if mol is None:
msg = 'Molecule {} could not be decoded.'.format(i + 1)
if error_bad_mol:
raise ValueError(msg)
elif warn_bad_mol:
warnings.warn(msg)
continue
mols.append(Mol(mol))
if skipfooter:
mols = mols[:-skipfooter]
idx = pd.Index((m.name for m in mols), name='name')
data = pd.DataFrame(mols, columns=['structure'])
if read_props:
props = pd.DataFrame([{k: v for (k, v) in mol.props.items()} for mol in mols])
data = pd.concat([data, props], axis=1)
# now we have extracted the props, we can delete if required
if not mol_props:
data.apply(_drop_props, axis=1)
data.index = idx
return squeeze(data, axis=1)
def write_sdf(data, sdf, write_cols=True, index_as_name=True, mol_props=False,
*args, **kwargs):
""" Write an sdf file from a dataframe.
Args:
data (pandas.Series or pandas.DataFrame):
Pandas data structure with a `structure` column containing compounds
to serialize.
sdf (str or file-like):
A file path or file-like object specifying where to write the
compound data.
write_cols (bool):
Whether columns should be written as props. Default `True`.
index_as_name (bool):
Whether to use index as the header, or the molecule's name.
Default is `True`.
mol_props (bool):
Whether to write properties in the Mol dictionary in addition to
fields in the frame.
Warn:
This function will change the names of the compounds if the
`index_as_name` argument is `True`, and will delete all properties in
the molecule dictionary if `mol_props` is `False`.
"""
if isinstance(data, pd.Series):
data = data.to_frame(name='structure')
names = [m.name for m in data.structure]
writer = Chem.SDWriter(sdf, *args, **kwargs)
cols = list(data.columns.drop('structure'))
if not mol_props:
data.apply(_drop_props, axis=1)
if write_cols:
data.apply(_set_props, cols=cols, axis=1)
if index_as_name:
data.apply(_set_name, axis=1)
data.structure.apply(writer.write)
# rdkit writer changes names sometimes
for mol, name in zip(data.structure, names):
mol.name = name
@wraps(write_sdf)
def _to_sdf_series(self, *args, **kwargs):
return write_sdf(self, write_cols=False, *args, **kwargs)
@wraps(write_sdf)
def _to_sdf_df(self, *args, **kwargs):
return write_sdf(self, *args, **kwargs)
pd.Series.to_sdf = _to_sdf_series
pd.DataFrame.to_sdf = _to_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_df(_, *args, **kwargs):
return read_sdf(*args, **kwargs)
pd.DataFrame.from_sdf = _from_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_series(_, *args, **kwargs):
return read_sdf(*args, **kwargs).structure
pd.Series.from_sdf = _from_sdf_series | 0.749271 | 0.449574 |
from collections import Counter
import numpy as np
import pandas as pd
from ..resource import ORGANIC, PERIODIC_TABLE
from .base import Filter
class ElementFilter(Filter):
""" Filter by elements.
Args:
elements (list[str]):
A list of elements to filter with. If an element not in the list is
found in a molecule, return False, else return True.
as_bits (bool):
Whether to return integer counts or booleans for atoms if mode is `count`.
Examples:
Basic usage on molecules:
>>> import skchem
>>> has_halogen = skchem.filters.ElementFilter(['F', 'Cl', 'Br', 'I'], agg='any')
Molecules with one of the atoms transform to `True`.
>>> m1 = skchem.Mol.from_smiles('ClC(Cl)Cl', name='chloroform')
>>> has_halogen.transform(m1)
True
Molecules with none of the atoms transform to `False`.
>>> m2 = skchem.Mol.from_smiles('CC', name='ethane')
>>> has_halogen.transform(m2)
False
Can see the atom breakdown by passing `agg` == `False`:
>>> has_halogen.transform(m1, agg=False)
has_element
F 0
Cl 3
Br 0
I 0
Name: ElementFilter, dtype: int64
Can transform series.
>>> ms = [m1, m2]
>>> has_halogen.transform(ms)
chloroform True
ethane False
dtype: bool
>>> has_halogen.transform(ms, agg=False)
has_element F Cl Br I
chloroform 0 3 0 0
ethane 0 0 0 0
Can also filter series:
>>> has_halogen.filter(ms)
chloroform <Mol: ClC(Cl)Cl>
Name: structure, dtype: object
>>> has_halogen.filter(ms, neg=True)
ethane <Mol: CC>
Name: structure, dtype: object
"""
def __init__(self, elements=None, as_bits=False, **kwargs):
self.elements = elements
self.as_bits = as_bits
super(ElementFilter, self).__init__(**kwargs)
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, val):
if val is None:
self._elements = PERIODIC_TABLE.symbol.tolist()
else:
self._elements = val
@property
def columns(self):
return pd.Index(self.elements, name='has_element')
def _transform_mol(self, mol):
counter = Counter(atom.element for atom in mol.atoms)
res = pd.Series(counter)
res = res[self.elements].fillna(0).astype(int)
if self.as_bits:
res = (res > 0).astype(np.uint8)
return res
class OrganicFilter(ElementFilter):
""" Whether a molecule is organic.
For the purpose of this function, an organic molecule is defined as having
atoms with elements only in the set H, B, C, N, O, F, P, S, Cl, Br, I.
Args:
mol (skchem.Mol):
The molecule to be tested.
Returns:
bool:
Whether the molecule is organic.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> of = skchem.filters.OrganicFilter()
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> of.transform(benzene)
True
>>> ferrocene = skchem.Mol.from_smiles('[cH-]1cccc1.[cH-]1cccc1.[Fe+2]',
... name='ferrocene')
>>> of.transform(ferrocene)
False
More useful on collections:
>>> sa = skchem.Mol.from_smiles('CC(=O)[O-].[Na+]', name='sodium acetate')
>>> norbornane = skchem.Mol.from_smiles('C12CCC(C2)CC1', name='norbornane')
>>> data = [benzene, ferrocene, norbornane, sa]
>>> of.transform(data)
benzene True
ferrocene False
norbornane True
sodium acetate False
dtype: bool
>>> of.filter(data)
benzene <Mol: c1ccccc1>
norbornane <Mol: C1CC2CCC1C2>
Name: structure, dtype: object
>>> of.filter(data, neg=True)
ferrocene <Mol: [Fe+2].c1cc[cH-]c1.c1cc[cH-]c1>
sodium acetate <Mol: CC(=O)[O-].[Na+]>
Name: structure, dtype: object
"""
def __init__(self):
super(OrganicFilter, self).__init__(elements=None, agg='not any')
self.elements = [element for element in self.elements if element not in ORGANIC]
def n_atoms(mol, above=2, below=75, include_hydrogens=False):
""" Whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Returns:
bool:
Whether the molecule has more atoms than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has 6 atoms.
Lower threshold:
>>> skchem.filters.n_atoms(m, above=3)
True
>>> skchem.filters.n_atoms(m, above=8)
False
Higher threshold:
>>> skchem.filters.n_atoms(m, below=8)
True
>>> skchem.filters.n_atoms(m, below=3)
False
Bounds work like Python slices - inclusive lower, exclusive upper:
>>> skchem.filters.n_atoms(m, above=6)
True
>>> skchem.filters.n_atoms(m, below=6)
False
Both can be used at once:
>>> skchem.filters.n_atoms(m, above=3, below=8)
True
Can include hydrogens:
>>> skchem.filters.n_atoms(m, above=3, below=8, include_hydrogens=True)
False
>>> skchem.filters.n_atoms(m, above=9, below=14, include_hydrogens=True)
True
"""
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
n_a = len(mol.atoms)
if include_hydrogens:
n_a += sum(atom.GetNumImplicitHs() + atom.GetNumExplicitHs() for atom in mol.atoms)
return above <= n_a < below
class AtomNumberFilter(Filter):
"""Filter for whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> af = skchem.filters.AtomNumberFilter(above=3, below=7)
>>> af.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
>>> af.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
>>> af = skchem.filters.AtomNumberFilter(above=5, below=15, include_hydrogens=True)
>>> af.transform(data)
ethane True
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
"""
def __init__(self, above=3, below=60, include_hydrogens=False, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
self.include_hydrogens = include_hydrogens
super(AtomNumberFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return n_atoms(mol, above=self.above, below=self.below, include_hydrogens=self.include_hydrogens)
@property
def columns(self):
return pd.Index(['num_atoms_in_range'])
def mass(mol, above=10, below=900):
""" Whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Returns:
bool:
Whether the mass of the molecule is lower than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has M_r = 78.
>>> skchem.filters.mass(m, above=70)
True
>>> skchem.filters.mass(m, above=80)
False
>>> skchem.filters.mass(m, below=80)
True
>>> skchem.filters.mass(m, below=70)
False
>>> skchem.filters.mass(m, above=70, below=80)
True
"""
return above <= mol.mass < below
class MassFilter(Filter):
""" Filter whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> mf = skchem.filters.MassFilter(above=31, below=100)
>>> mf.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: mass_in_range, dtype: bool
>>> mf.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
"""
def __init__(self, above=3, below=900, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
super(MassFilter, self).__init__( **kwargs)
def _transform_mol(self, mol):
return mass(mol, above=self.above, below=self.below)
@property
def columns(self):
return pd.Index(['mass_in_range']) | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/filters/simple.py | simple.py | from collections import Counter
import numpy as np
import pandas as pd
from ..resource import ORGANIC, PERIODIC_TABLE
from .base import Filter
class ElementFilter(Filter):
""" Filter by elements.
Args:
elements (list[str]):
A list of elements to filter with. If an element not in the list is
found in a molecule, return False, else return True.
as_bits (bool):
Whether to return integer counts or booleans for atoms if mode is `count`.
Examples:
Basic usage on molecules:
>>> import skchem
>>> has_halogen = skchem.filters.ElementFilter(['F', 'Cl', 'Br', 'I'], agg='any')
Molecules with one of the atoms transform to `True`.
>>> m1 = skchem.Mol.from_smiles('ClC(Cl)Cl', name='chloroform')
>>> has_halogen.transform(m1)
True
Molecules with none of the atoms transform to `False`.
>>> m2 = skchem.Mol.from_smiles('CC', name='ethane')
>>> has_halogen.transform(m2)
False
Can see the atom breakdown by passing `agg` == `False`:
>>> has_halogen.transform(m1, agg=False)
has_element
F 0
Cl 3
Br 0
I 0
Name: ElementFilter, dtype: int64
Can transform series.
>>> ms = [m1, m2]
>>> has_halogen.transform(ms)
chloroform True
ethane False
dtype: bool
>>> has_halogen.transform(ms, agg=False)
has_element F Cl Br I
chloroform 0 3 0 0
ethane 0 0 0 0
Can also filter series:
>>> has_halogen.filter(ms)
chloroform <Mol: ClC(Cl)Cl>
Name: structure, dtype: object
>>> has_halogen.filter(ms, neg=True)
ethane <Mol: CC>
Name: structure, dtype: object
"""
def __init__(self, elements=None, as_bits=False, **kwargs):
self.elements = elements
self.as_bits = as_bits
super(ElementFilter, self).__init__(**kwargs)
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, val):
if val is None:
self._elements = PERIODIC_TABLE.symbol.tolist()
else:
self._elements = val
@property
def columns(self):
return pd.Index(self.elements, name='has_element')
def _transform_mol(self, mol):
counter = Counter(atom.element for atom in mol.atoms)
res = pd.Series(counter)
res = res[self.elements].fillna(0).astype(int)
if self.as_bits:
res = (res > 0).astype(np.uint8)
return res
class OrganicFilter(ElementFilter):
""" Whether a molecule is organic.
For the purpose of this function, an organic molecule is defined as having
atoms with elements only in the set H, B, C, N, O, F, P, S, Cl, Br, I.
Args:
mol (skchem.Mol):
The molecule to be tested.
Returns:
bool:
Whether the molecule is organic.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> of = skchem.filters.OrganicFilter()
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> of.transform(benzene)
True
>>> ferrocene = skchem.Mol.from_smiles('[cH-]1cccc1.[cH-]1cccc1.[Fe+2]',
... name='ferrocene')
>>> of.transform(ferrocene)
False
More useful on collections:
>>> sa = skchem.Mol.from_smiles('CC(=O)[O-].[Na+]', name='sodium acetate')
>>> norbornane = skchem.Mol.from_smiles('C12CCC(C2)CC1', name='norbornane')
>>> data = [benzene, ferrocene, norbornane, sa]
>>> of.transform(data)
benzene True
ferrocene False
norbornane True
sodium acetate False
dtype: bool
>>> of.filter(data)
benzene <Mol: c1ccccc1>
norbornane <Mol: C1CC2CCC1C2>
Name: structure, dtype: object
>>> of.filter(data, neg=True)
ferrocene <Mol: [Fe+2].c1cc[cH-]c1.c1cc[cH-]c1>
sodium acetate <Mol: CC(=O)[O-].[Na+]>
Name: structure, dtype: object
"""
def __init__(self):
super(OrganicFilter, self).__init__(elements=None, agg='not any')
self.elements = [element for element in self.elements if element not in ORGANIC]
def n_atoms(mol, above=2, below=75, include_hydrogens=False):
""" Whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Returns:
bool:
Whether the molecule has more atoms than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has 6 atoms.
Lower threshold:
>>> skchem.filters.n_atoms(m, above=3)
True
>>> skchem.filters.n_atoms(m, above=8)
False
Higher threshold:
>>> skchem.filters.n_atoms(m, below=8)
True
>>> skchem.filters.n_atoms(m, below=3)
False
Bounds work like Python slices - inclusive lower, exclusive upper:
>>> skchem.filters.n_atoms(m, above=6)
True
>>> skchem.filters.n_atoms(m, below=6)
False
Both can be used at once:
>>> skchem.filters.n_atoms(m, above=3, below=8)
True
Can include hydrogens:
>>> skchem.filters.n_atoms(m, above=3, below=8, include_hydrogens=True)
False
>>> skchem.filters.n_atoms(m, above=9, below=14, include_hydrogens=True)
True
"""
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
n_a = len(mol.atoms)
if include_hydrogens:
n_a += sum(atom.GetNumImplicitHs() + atom.GetNumExplicitHs() for atom in mol.atoms)
return above <= n_a < below
class AtomNumberFilter(Filter):
"""Filter for whether the number of atoms in a molecule falls in a defined interval.
``above <= n_atoms < below``
Args:
above (int):
The lower threshold number of atoms (exclusive).
Defaults to None.
below (int):
The higher threshold number of atoms (inclusive).
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> af = skchem.filters.AtomNumberFilter(above=3, below=7)
>>> af.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
>>> af.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
>>> af = skchem.filters.AtomNumberFilter(above=5, below=15, include_hydrogens=True)
>>> af.transform(data)
ethane True
butane True
alanine True
barrelene False
Name: num_atoms_in_range, dtype: bool
"""
def __init__(self, above=3, below=60, include_hydrogens=False, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
self.include_hydrogens = include_hydrogens
super(AtomNumberFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return n_atoms(mol, above=self.above, below=self.below, include_hydrogens=self.include_hydrogens)
@property
def columns(self):
return pd.Index(['num_atoms_in_range'])
def mass(mol, above=10, below=900):
""" Whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Returns:
bool:
Whether the mass of the molecule is lower than the threshold.
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> m = skchem.Mol.from_smiles('c1ccccc1') # benzene has M_r = 78.
>>> skchem.filters.mass(m, above=70)
True
>>> skchem.filters.mass(m, above=80)
False
>>> skchem.filters.mass(m, below=80)
True
>>> skchem.filters.mass(m, below=70)
False
>>> skchem.filters.mass(m, above=70, below=80)
True
"""
return above <= mol.mass < below
class MassFilter(Filter):
""" Filter whether a the molecular weight of a molecule is lower than a threshold.
``above <= mass < below``
Args:
mol: (skchem.Mol):
The molecule to be tested.
above (float):
The lower threshold on the mass.
Defaults to None.
below (float):
The higher threshold on the mass.
Defaults to None.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('CCCC', name='butane'),
... skchem.Mol.from_smiles('NC(C)C(=O)O', name='alanine'),
... skchem.Mol.from_smiles('C12C=CC(C=C2)C=C1', name='barrelene')
... ]
>>> mf = skchem.filters.MassFilter(above=31, below=100)
>>> mf.transform(data)
ethane False
butane True
alanine True
barrelene False
Name: mass_in_range, dtype: bool
>>> mf.filter(data)
butane <Mol: CCCC>
alanine <Mol: CC(N)C(=O)O>
Name: structure, dtype: object
"""
def __init__(self, above=3, below=900, **kwargs):
assert above < below, 'Interval {} < a < {} undefined.'.format(above, below)
self.above = above
self.below = below
super(MassFilter, self).__init__( **kwargs)
def _transform_mol(self, mol):
return mass(mol, above=self.above, below=self.below)
@property
def columns(self):
return pd.Index(['mass_in_range']) | 0.896438 | 0.505188 |
from rdkit import RDConfig
import os
import pandas as pd
from .base import Filter
from ..core import Mol
class SMARTSFilter(Filter):
""" Filter a molecule based on smarts.
Args:
smarts (pd.Series):
A series of SMARTS to use in the filter.
agg (function):
Option specifying the mode of the filter.
- None : No filtering takes place
- any: If any of the substructures are in molecule return True.
- all: If all of the substructures are in molecule.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('c1ccccc1', name='benzene'),
... skchem.Mol.from_smiles('c1ccccc1-c2c(C=O)ccnc2', name='big')
... ]
>>> f = skchem.filters.SMARTSFilter({'benzene': 'c1ccccc1', 'pyridine': 'c1ccccn1', 'acetyl': 'C=O'}, agg='any')
>>> f.transform(data, agg=False)
acetyl benzene pyridine
ethane False False False
benzene False True False
big True True True
>>> f.transform(data)
ethane False
benzene True
big True
dtype: bool
>>> f.filter(data)
benzene <Mol: c1ccccc1>
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
>>> f.agg = all
>>> f.filter(data)
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self, smarts, **kwargs):
def read_smarts(s):
if isinstance(s, str):
return Mol.from_smarts(s, mergeHs=True)
else:
return s
self.smarts = pd.Series(smarts).apply(read_smarts)
super(SMARTSFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return self.smarts.apply(lambda smarts: smarts in mol).values
@property
def columns(self):
return self.smarts.index
class PAINSFilter(SMARTSFilter):
""" Whether a molecule passes the Pan Assay INterference (PAINS) filters.
These are supplied with RDKit, and were originally proposed by Baell et al.
References:
[The original paper](http://dx.doi.org/10.1021/jm901137j)
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> pf = skchem.filters.PAINSFilter()
>>> pf.transform(benzene)
True
>>> catechol = skchem.Mol.from_smiles('Oc1c(O)cccc1', name='catechol')
>>> pf.transform(catechol)
False
>>> res = pf.transform(catechol, agg=False)
>>> res[res]
names
catechol_A(92) True
Name: PAINSFilter, dtype: bool
More useful in combination with pandas DataFrames:
>>> data = [benzene, catechol]
>>> pf.transform(data)
benzene True
catechol False
dtype: bool
>>> pf.filter(data)
benzene <Mol: c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self):
super(PAINSFilter, self).__init__(self._load_pains(), agg='not any')
def _load_pains(cls):
""" Load PAINS included in rdkit into a pandas dataframe and cache as class attribute. """
if not hasattr(cls, '_pains'):
path = os.path.join(RDConfig.RDDataDir, 'Pains', 'wehi_pains.csv')
pains = pd.read_csv(path, names=['pains', 'names'])
pains['names'] = pains.names.str.lstrip('<regId=').str.rstrip('>')
pains = pains.set_index('names').pains.apply(Mol.from_smarts, mergeHs=True)
cls._pains = pains
return cls._pains | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/filters/smarts.py | smarts.py | from rdkit import RDConfig
import os
import pandas as pd
from .base import Filter
from ..core import Mol
class SMARTSFilter(Filter):
""" Filter a molecule based on smarts.
Args:
smarts (pd.Series):
A series of SMARTS to use in the filter.
agg (function):
Option specifying the mode of the filter.
- None : No filtering takes place
- any: If any of the substructures are in molecule return True.
- all: If all of the substructures are in molecule.
Examples:
>>> import skchem
>>> data = [
... skchem.Mol.from_smiles('CC', name='ethane'),
... skchem.Mol.from_smiles('c1ccccc1', name='benzene'),
... skchem.Mol.from_smiles('c1ccccc1-c2c(C=O)ccnc2', name='big')
... ]
>>> f = skchem.filters.SMARTSFilter({'benzene': 'c1ccccc1', 'pyridine': 'c1ccccn1', 'acetyl': 'C=O'}, agg='any')
>>> f.transform(data, agg=False)
acetyl benzene pyridine
ethane False False False
benzene False True False
big True True True
>>> f.transform(data)
ethane False
benzene True
big True
dtype: bool
>>> f.filter(data)
benzene <Mol: c1ccccc1>
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
>>> f.agg = all
>>> f.filter(data)
big <Mol: O=Cc1ccncc1-c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self, smarts, **kwargs):
def read_smarts(s):
if isinstance(s, str):
return Mol.from_smarts(s, mergeHs=True)
else:
return s
self.smarts = pd.Series(smarts).apply(read_smarts)
super(SMARTSFilter, self).__init__(**kwargs)
def _transform_mol(self, mol):
return self.smarts.apply(lambda smarts: smarts in mol).values
@property
def columns(self):
return self.smarts.index
class PAINSFilter(SMARTSFilter):
""" Whether a molecule passes the Pan Assay INterference (PAINS) filters.
These are supplied with RDKit, and were originally proposed by Baell et al.
References:
[The original paper](http://dx.doi.org/10.1021/jm901137j)
Examples:
Basic usage as a function on molecules:
>>> import skchem
>>> benzene = skchem.Mol.from_smiles('c1ccccc1', name='benzene')
>>> pf = skchem.filters.PAINSFilter()
>>> pf.transform(benzene)
True
>>> catechol = skchem.Mol.from_smiles('Oc1c(O)cccc1', name='catechol')
>>> pf.transform(catechol)
False
>>> res = pf.transform(catechol, agg=False)
>>> res[res]
names
catechol_A(92) True
Name: PAINSFilter, dtype: bool
More useful in combination with pandas DataFrames:
>>> data = [benzene, catechol]
>>> pf.transform(data)
benzene True
catechol False
dtype: bool
>>> pf.filter(data)
benzene <Mol: c1ccccc1>
Name: structure, dtype: object
"""
def __init__(self):
super(PAINSFilter, self).__init__(self._load_pains(), agg='not any')
def _load_pains(cls):
""" Load PAINS included in rdkit into a pandas dataframe and cache as class attribute. """
if not hasattr(cls, '_pains'):
path = os.path.join(RDConfig.RDDataDir, 'Pains', 'wehi_pains.csv')
pains = pd.read_csv(path, names=['pains', 'names'])
pains['names'] = pains.names.str.lstrip('<regId=').str.rstrip('>')
pains = pains.set_index('names').pains.apply(Mol.from_smarts, mergeHs=True)
cls._pains = pains
return cls._pains | 0.837985 | 0.489198 |
from rdkit.Chem.Draw import MolToImage, DrawingOptions
import numpy as np
from matplotlib import pyplot as plt
def plot_weights(mol, weights, quality=1, l=0.4, step=50, levels=20, contour_opacity=0.5, cmap='RdBu', ax=None, **kwargs):
""" Plot weights as a sum of gaussians across a structure image.
Args:
mol (skchem.Mol):
Molecule to visualize weights for.
weights (iterable<float>):
Array of weights in atom index order.
l (float):
Lengthscale of gaussians to visualize as a multiple of bond length.
steps (int):
Size of grid edge to calculate the gaussians.
levels (int):
Number of contours to plot.
contour_opacity (float):
Alpha applied to the contour layer.
ax (plt.axis):
Axis to apply the plot to. Defaults to current axis.
cmap (plt.cm):
Colormap to use for the contour.
**kwargs:
Passed to contourf function.
Returns:
matplotlib.AxesSubplot: The plot.
"""
if not ax:
ax = plt.gca()
ax.grid('off')
ax.axis('off')
opts = DrawingOptions()
opts.dotsPerAngstrom *= quality
opts.atomLabelFontSize *= quality
opts.bondLineWidth *= quality
size = 300 * quality
img, canvas, drawer = MolToImage(mol, size=(size, size), options=opts, returnCanvas=True)
canvas.flush()
coords = np.array([[i / size, 1 - j / size] for k, (i, j) in list(drawer.atomPs.values())[0].items()])
b = mol.bonds[0]
begin, end = b.GetBeginAtom().GetIdx(), b.GetEndAtom().GetIdx()
length = np.linalg.norm(coords[end] - coords[begin])
x = np.linspace(0, 1, 500)
y = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, y)
def gaussian(x, y, mu=np.zeros(2), sigma=np.identity(2), size=50):
return (1 / (2 * np.pi * sigma[0, 0] * sigma[1, 1]) * np.exp(-((x - mu[0]) ** 2 / (2 * sigma[0, 0] ** 2)
+ (y - mu[1]) ** 2 / (2 * sigma[1, 1] ** 2))))
if not np.max(weights) == np.min(weights) == 0:
z = sum([w * gaussian(x, y, mu, sigma=l * length * np.identity(2)) for mu, w in zip(coords, weights)])
v = np.max((np.abs(z.min()), np.abs(z.max())))
else:
z = np.zeros(x.shape)
v = 1
if z.min() >= 0:
levels = int(levels/2)
cf = ax.contourf(x, y, z, levels, alpha=contour_opacity, extent=(0, 1, 0, 1), vmin=-v, vmax=v, cmap=cmap, **kwargs)
ax.imshow(img, extent=(0, 1, 0, 1))
return ax | scikit-chem | /scikit-chem-0.0.6.tar.gz/scikit-chem-0.0.6/skchem/vis/atom.py | atom.py | from rdkit.Chem.Draw import MolToImage, DrawingOptions
import numpy as np
from matplotlib import pyplot as plt
def plot_weights(mol, weights, quality=1, l=0.4, step=50, levels=20, contour_opacity=0.5, cmap='RdBu', ax=None, **kwargs):
""" Plot weights as a sum of gaussians across a structure image.
Args:
mol (skchem.Mol):
Molecule to visualize weights for.
weights (iterable<float>):
Array of weights in atom index order.
l (float):
Lengthscale of gaussians to visualize as a multiple of bond length.
steps (int):
Size of grid edge to calculate the gaussians.
levels (int):
Number of contours to plot.
contour_opacity (float):
Alpha applied to the contour layer.
ax (plt.axis):
Axis to apply the plot to. Defaults to current axis.
cmap (plt.cm):
Colormap to use for the contour.
**kwargs:
Passed to contourf function.
Returns:
matplotlib.AxesSubplot: The plot.
"""
if not ax:
ax = plt.gca()
ax.grid('off')
ax.axis('off')
opts = DrawingOptions()
opts.dotsPerAngstrom *= quality
opts.atomLabelFontSize *= quality
opts.bondLineWidth *= quality
size = 300 * quality
img, canvas, drawer = MolToImage(mol, size=(size, size), options=opts, returnCanvas=True)
canvas.flush()
coords = np.array([[i / size, 1 - j / size] for k, (i, j) in list(drawer.atomPs.values())[0].items()])
b = mol.bonds[0]
begin, end = b.GetBeginAtom().GetIdx(), b.GetEndAtom().GetIdx()
length = np.linalg.norm(coords[end] - coords[begin])
x = np.linspace(0, 1, 500)
y = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, y)
def gaussian(x, y, mu=np.zeros(2), sigma=np.identity(2), size=50):
return (1 / (2 * np.pi * sigma[0, 0] * sigma[1, 1]) * np.exp(-((x - mu[0]) ** 2 / (2 * sigma[0, 0] ** 2)
+ (y - mu[1]) ** 2 / (2 * sigma[1, 1] ** 2))))
if not np.max(weights) == np.min(weights) == 0:
z = sum([w * gaussian(x, y, mu, sigma=l * length * np.identity(2)) for mu, w in zip(coords, weights)])
v = np.max((np.abs(z.min()), np.abs(z.max())))
else:
z = np.zeros(x.shape)
v = 1
if z.min() >= 0:
levels = int(levels/2)
cf = ax.contourf(x, y, z, levels, alpha=contour_opacity, extent=(0, 1, 0, 1), vmin=-v, vmax=v, cmap=cmap, **kwargs)
ax.imshow(img, extent=(0, 1, 0, 1))
return ax | 0.967506 | 0.670804 |
.. :changelog:
History
-------
scikit-ci-addons was initially developed in May 2016 by Omar Padron to facilitate
the continuous integration of the scikit-build project.
At that time, it consisted of code directly embedded in the CI script used in
scikit-build project.
Then, in early September 2016, with the desire to setup cross-platform continuous
integration for other project and avoid duplication or maintenance hell, the code
was factored out by Jean-Christophe Fillion-Robin into a set of reusable scripts
available in the scikit-ci project. By simply cloning the repository, it was
possible to more easily enable CI for other projects.
While this was an improvement, this prevented the distribution of standalone
and simple scikit-ci package. To better separate concerns and facilitate
testing and maintenance, in late September 2016, the scripts were moved into
their own project and scikit-ci-addons was born.
Finally, in late October 2016, Jean-Christophe came up with the concept of
scikit-ci-addons command line tool allowing to execute the scripts (or add-ons)
distributed within the scikit-ci-addons package.
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/HISTORY.rst | HISTORY.rst | .. :changelog:
History
-------
scikit-ci-addons was initially developed in May 2016 by Omar Padron to facilitate
the continuous integration of the scikit-build project.
At that time, it consisted of code directly embedded in the CI script used in
scikit-build project.
Then, in early September 2016, with the desire to setup cross-platform continuous
integration for other project and avoid duplication or maintenance hell, the code
was factored out by Jean-Christophe Fillion-Robin into a set of reusable scripts
available in the scikit-ci project. By simply cloning the repository, it was
possible to more easily enable CI for other projects.
While this was an improvement, this prevented the distribution of standalone
and simple scikit-ci package. To better separate concerns and facilitate
testing and maintenance, in late September 2016, the scripts were moved into
their own project and scikit-ci-addons was born.
Finally, in late October 2016, Jean-Christophe came up with the concept of
scikit-ci-addons command line tool allowing to execute the scripts (or add-ons)
distributed within the scikit-ci-addons package.
| 0.468304 | 0.341349 |
===============================
scikit-ci-addons
===============================
.. image:: https://readthedocs.org/projects/scikit-ci-addons/badge/?version=latest
:target: http://scikit-ci-addons.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
scikit-ci-addons is a command line tool and a set of scripts useful to help
drive the CI of projects leveraging services like Appveyor, CircleCI, or TravisCI.
Originally developed to help install prerequisites for building Python
extension, it is now useful to support other type of projects.
Latest Release
--------------
.. table::
+------------------------------------------------------------------------------+----------------------------------------------------------------------------+
| Versions | Downloads |
+==============================================================================+============================================================================+
| .. image:: https://img.shields.io/pypi/v/scikit-ci-addons.svg?maxAge=2592000 | .. image:: https://img.shields.io/badge/downloads-92k%20total-green.svg |
| :target: https://pypi.python.org/pypi/scikit-ci-addons | :target: https://pypi.python.org/pypi/scikit-ci-addons |
+------------------------------------------------------------------------------+----------------------------------------------------------------------------+
Build Status
------------
.. table::
+---------------+------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------+
| | Linux | MacOSX | Windows |
+===============+==========================================================================================+=============================================================================================+========================================================================================================+
| PyPI | .. image:: https://circleci.com/gh/scikit-build/scikit-ci-addons.svg?style=shield | .. image:: https://img.shields.io/travis/scikit-build/scikit-ci-addons.svg?maxAge=2592000 | .. image:: https://ci.appveyor.com/api/projects/status/gr60jc9hkjlqoo4a?svg=true |
| | :target: https://circleci.com/gh/scikit-build/scikit-ci-addons | :target: https://travis-ci.org/scikit-build/scikit-ci-addons | :target: https://ci.appveyor.com/project/scikit-build/scikit-ci-addons/branch/master |
+---------------+------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------+
Overall Health
--------------
.. image:: https://codecov.io/gh/scikit-build/scikit-ci-addons/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-build/scikit-ci-addons
Miscellaneous
-------------
* Free software: Apache Software license
* Documentation: http://scikit-ci-addons.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-addons
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/README.rst | README.rst | ===============================
scikit-ci-addons
===============================
.. image:: https://readthedocs.org/projects/scikit-ci-addons/badge/?version=latest
:target: http://scikit-ci-addons.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
scikit-ci-addons is a command line tool and a set of scripts useful to help
drive the CI of projects leveraging services like Appveyor, CircleCI, or TravisCI.
Originally developed to help install prerequisites for building Python
extension, it is now useful to support other type of projects.
Latest Release
--------------
.. table::
+------------------------------------------------------------------------------+----------------------------------------------------------------------------+
| Versions | Downloads |
+==============================================================================+============================================================================+
| .. image:: https://img.shields.io/pypi/v/scikit-ci-addons.svg?maxAge=2592000 | .. image:: https://img.shields.io/badge/downloads-92k%20total-green.svg |
| :target: https://pypi.python.org/pypi/scikit-ci-addons | :target: https://pypi.python.org/pypi/scikit-ci-addons |
+------------------------------------------------------------------------------+----------------------------------------------------------------------------+
Build Status
------------
.. table::
+---------------+------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------+
| | Linux | MacOSX | Windows |
+===============+==========================================================================================+=============================================================================================+========================================================================================================+
| PyPI | .. image:: https://circleci.com/gh/scikit-build/scikit-ci-addons.svg?style=shield | .. image:: https://img.shields.io/travis/scikit-build/scikit-ci-addons.svg?maxAge=2592000 | .. image:: https://ci.appveyor.com/api/projects/status/gr60jc9hkjlqoo4a?svg=true |
| | :target: https://circleci.com/gh/scikit-build/scikit-ci-addons | :target: https://travis-ci.org/scikit-build/scikit-ci-addons | :target: https://ci.appveyor.com/project/scikit-build/scikit-ci-addons/branch/master |
+---------------+------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------+
Overall Health
--------------
.. image:: https://codecov.io/gh/scikit-build/scikit-ci-addons/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-build/scikit-ci-addons
Miscellaneous
-------------
* Free software: Apache Software license
* Documentation: http://scikit-ci-addons.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-addons
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
| 0.78403 | 0.435001 |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Types of Contributions
----------------------
You can contribute in many ways:
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/scikit-build/scikit-ci-addons/issues.
If you are reporting a bug, please include:
* Any details about your CI setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
The scikit-ci-addons project could always use more documentation. We welcome help
with the official scikit-ci-addons docs, in docstrings, or even on blog posts and
articles for the web.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at
https://github.com/scikit-build/scikit-ci-addons/issues.
If you are proposing a new feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started
-----------
Ready to contribute? Here's how to set up `scikit-ci-addons` for local development.
1. Fork the `scikit-ci-addons` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scikit-ci-addons.git
3. Install your local copy into a virtualenv. Assuming you have
virtualenvwrapper installed (`pip install virtualenvwrapper`), this is how
you set up your cloned fork for local development::
$ mkvirtualenv scikit-ci-addons
$ cd scikit-ci-addons/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and
the tests, including testing other Python versions with tox::
$ flake8
$ python setup.py test
$ tox
If needed, you can get flake8 and tox by using `pip install` to install
them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in `README.rst`.
3. The pull request should work for Python 2.7, and 3.4, 3.5, 3.6 and 3.7.
Check https://travis-ci.org/scikit-build/scikit-ci-addons/pull_requests
and make sure that the tests pass for all supported Python versions.
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/CONTRIBUTING.rst | CONTRIBUTING.rst | ============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Types of Contributions
----------------------
You can contribute in many ways:
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/scikit-build/scikit-ci-addons/issues.
If you are reporting a bug, please include:
* Any details about your CI setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
The scikit-ci-addons project could always use more documentation. We welcome help
with the official scikit-ci-addons docs, in docstrings, or even on blog posts and
articles for the web.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at
https://github.com/scikit-build/scikit-ci-addons/issues.
If you are proposing a new feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started
-----------
Ready to contribute? Here's how to set up `scikit-ci-addons` for local development.
1. Fork the `scikit-ci-addons` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scikit-ci-addons.git
3. Install your local copy into a virtualenv. Assuming you have
virtualenvwrapper installed (`pip install virtualenvwrapper`), this is how
you set up your cloned fork for local development::
$ mkvirtualenv scikit-ci-addons
$ cd scikit-ci-addons/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and
the tests, including testing other Python versions with tox::
$ flake8
$ python setup.py test
$ tox
If needed, you can get flake8 and tox by using `pip install` to install
them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in `README.rst`.
3. The pull request should work for Python 2.7, and 3.4, 3.5, 3.6 and 3.7.
Check https://travis-ci.org/scikit-build/scikit-ci-addons/pull_requests
and make sure that the tests pass for all supported Python versions.
| 0.559049 | 0.433981 |
.. _making_a_release:
================
Making a release
================
A core developer should use the following steps to create a release `X.Y.Z` of
**scikit-ci-addons** on `PyPI`_.
-------------
Prerequisites
-------------
* All CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
* You have a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_.
-------------------------
Documentation conventions
-------------------------
The commands reported below should be evaluated in the same terminal session.
Commands to evaluate starts with a dollar sign. For example::
$ echo "Hello"
Hello
means that ``echo "Hello"`` should be copied and evaluated in the terminal.
----------------------
Setting up environment
----------------------
1. First, `register for an account on PyPI <https://pypi.org>`_.
2. If not already the case, ask to be added as a ``Package Index Maintainer``.
3. Create a ``~/.pypirc`` file with your login credentials::
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=<your-username>
password=<your-password>
[pypitest]
repository=https://test.pypi.org/legacy/
username=<your-username>
password=<your-password>
where ``<your-username>`` and ``<your-password>`` correspond to your PyPI account.
---------------------
`PyPI`_: Step-by-step
---------------------
1. Make sure that all CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
2. Download the latest sources
.. code::
$ cd /tmp && \
git clone [email protected]:scikit-build/scikit-ci-addons && \
cd scikit-ci-addons
3. List all tags sorted by version
.. code::
$ git fetch --tags && \
git tag -l | sort -V
4. Choose the next release version number
.. code::
$ release=X.Y.Z
.. warning::
To ensure the packages are uploaded on `PyPI`_, tags must match this regular
expression: ``^[0-9]+(\.[0-9]+)*(\.post[0-9]+)?$``.
5. In `README.rst`, update `PyPI`_ download count after running `this big table query`_
and commit the changes.
.. code::
$ git add README.rst && \
git commit -m "README: Update download stats [ci skip]"
.. note::
To learn more about `pypi-stats`, see `How to get PyPI download statistics <https://kirankoduru.github.io/python/pypi-stats.html>`_.
6. Tag the release
.. code::
$ git tag --sign -m "scikit-ci-addons ${release}" ${release} master
.. warning::
We recommend using a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_
to sign the tag.
7. Create the source distribution and wheel
.. code::
$ python setup.py sdist bdist_wheel
8. Publish the both release tag and the master branch
.. code::
$ git push origin ${release} && \
git push origin master
9. Upload the distributions on `PyPI`_
.. code::
twine upload dist/*
.. note::
To first upload on `TestPyPI`_ , do the following::
$ twine upload -r pypitest dist/*
10. Create a clean testing environment to test the installation
.. code::
$ mkvirtualenv scikit-ci-addons-${release}-install-test && \
pip install scikit-ci-addons && \
ci_addons --list && \
ci_addons --version
.. note::
If the ``mkvirtualenv`` command is not available, this means you do not have `virtualenvwrapper`_
installed, in that case, you could either install it or directly use `virtualenv`_ or `venv`_.
To install from `TestPyPI`_, do the following::
$ pip install -i https://test.pypi.org/simple scikit-ci-addons
11. Cleanup
.. code::
$ deactivate && \
rm -rf dist/* && \
rmvirtualenv scikit-ci-addons-${release}-install-test
12. Add a ``Next Release`` section back in `CHANGES.rst`, commit and push local changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "CHANGES.rst: Add \"Next Release\" section [ci skip]" && \
git push origin master
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/
.. _virtualenv: http://virtualenv.readthedocs.io
.. _venv: https://docs.python.org/3/library/venv.html
.. _AppVeyor: https://ci.appveyor.com/project/scikit-build/scikit-ci-addons/history
.. _CircleCI: https://circleci.com/gh/scikit-build/scikit-ci-addons
.. _Travis CI: https://travis-ci.org/scikit-build/scikit-ci-addons/builds
.. _PyPI: https://pypi.org/project/scikit-ci-addons
.. _TestPyPI: https://test.pypi.org/project/scikit-ci-addons
.. _this big table query: https://bigquery.cloud.google.com/savedquery/280188050539:ce2c8d333d7d455aae8b76a7c0de7dae | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/make_a_release.rst | make_a_release.rst | .. _making_a_release:
================
Making a release
================
A core developer should use the following steps to create a release `X.Y.Z` of
**scikit-ci-addons** on `PyPI`_.
-------------
Prerequisites
-------------
* All CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
* You have a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_.
-------------------------
Documentation conventions
-------------------------
The commands reported below should be evaluated in the same terminal session.
Commands to evaluate starts with a dollar sign. For example::
$ echo "Hello"
Hello
means that ``echo "Hello"`` should be copied and evaluated in the terminal.
----------------------
Setting up environment
----------------------
1. First, `register for an account on PyPI <https://pypi.org>`_.
2. If not already the case, ask to be added as a ``Package Index Maintainer``.
3. Create a ``~/.pypirc`` file with your login credentials::
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=<your-username>
password=<your-password>
[pypitest]
repository=https://test.pypi.org/legacy/
username=<your-username>
password=<your-password>
where ``<your-username>`` and ``<your-password>`` correspond to your PyPI account.
---------------------
`PyPI`_: Step-by-step
---------------------
1. Make sure that all CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
2. Download the latest sources
.. code::
$ cd /tmp && \
git clone [email protected]:scikit-build/scikit-ci-addons && \
cd scikit-ci-addons
3. List all tags sorted by version
.. code::
$ git fetch --tags && \
git tag -l | sort -V
4. Choose the next release version number
.. code::
$ release=X.Y.Z
.. warning::
To ensure the packages are uploaded on `PyPI`_, tags must match this regular
expression: ``^[0-9]+(\.[0-9]+)*(\.post[0-9]+)?$``.
5. In `README.rst`, update `PyPI`_ download count after running `this big table query`_
and commit the changes.
.. code::
$ git add README.rst && \
git commit -m "README: Update download stats [ci skip]"
.. note::
To learn more about `pypi-stats`, see `How to get PyPI download statistics <https://kirankoduru.github.io/python/pypi-stats.html>`_.
6. Tag the release
.. code::
$ git tag --sign -m "scikit-ci-addons ${release}" ${release} master
.. warning::
We recommend using a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_
to sign the tag.
7. Create the source distribution and wheel
.. code::
$ python setup.py sdist bdist_wheel
8. Publish the both release tag and the master branch
.. code::
$ git push origin ${release} && \
git push origin master
9. Upload the distributions on `PyPI`_
.. code::
twine upload dist/*
.. note::
To first upload on `TestPyPI`_ , do the following::
$ twine upload -r pypitest dist/*
10. Create a clean testing environment to test the installation
.. code::
$ mkvirtualenv scikit-ci-addons-${release}-install-test && \
pip install scikit-ci-addons && \
ci_addons --list && \
ci_addons --version
.. note::
If the ``mkvirtualenv`` command is not available, this means you do not have `virtualenvwrapper`_
installed, in that case, you could either install it or directly use `virtualenv`_ or `venv`_.
To install from `TestPyPI`_, do the following::
$ pip install -i https://test.pypi.org/simple scikit-ci-addons
11. Cleanup
.. code::
$ deactivate && \
rm -rf dist/* && \
rmvirtualenv scikit-ci-addons-${release}-install-test
12. Add a ``Next Release`` section back in `CHANGES.rst`, commit and push local changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "CHANGES.rst: Add \"Next Release\" section [ci skip]" && \
git push origin master
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/
.. _virtualenv: http://virtualenv.readthedocs.io
.. _venv: https://docs.python.org/3/library/venv.html
.. _AppVeyor: https://ci.appveyor.com/project/scikit-build/scikit-ci-addons/history
.. _CircleCI: https://circleci.com/gh/scikit-build/scikit-ci-addons
.. _Travis CI: https://travis-ci.org/scikit-build/scikit-ci-addons/builds
.. _PyPI: https://pypi.org/project/scikit-ci-addons
.. _TestPyPI: https://test.pypi.org/project/scikit-ci-addons
.. _this big table query: https://bigquery.cloud.google.com/savedquery/280188050539:ce2c8d333d7d455aae8b76a7c0de7dae | 0.865963 | 0.667985 |
=======
Add-ons
=======
Each category is named after a CI worker (e.g AppVeyor) or operating system (e.g Windows)
and references add-ons designed to be used on the associated continuous integration service
or system.
An add-on is a file that could either directly be executed or used as a
parameter for an other tool.
Anyci
-----
This a special category containing scripts that could be executed on a broad
range of CI services.
.. include:: anyci/ctest_junit_formatter.rst
.. include:: anyci/docker_py.rst
.. include:: anyci/noop_py.rst
.. include:: anyci/publish_github_release_py.rst
.. include:: anyci/run_sh.rst
Appveyor
--------
These scripts are designed to work on worker from http://appveyor.com/
.. include:: appveyor/enable-worker-remote-access_ps1.rst
.. include:: appveyor/install_cmake_py.rst
.. include:: appveyor/run-with-visual-studio_cmd.rst
.. include:: appveyor/patch_vs2008_py.rst
.. include:: appveyor/rolling-build_ps1.rst
.. include:: appveyor/tweak_environment_py.rst
Circle
------
These scripts are designed to work on worker from http://circleci.com/
.. include:: circle/install_cmake_py.rst
Travis
------
These scripts are designed to work on worker from http://travis-ci.org/
.. include:: travis/install_cmake_py.rst
.. include:: travis/pyenv.rst
.. include:: travis/enable-worker-remote-access_sh.rst
Windows
-------
These scripts are designed to work on any windows workstation running Windows 7 and above and can
be directly used from a powershell terminal (or command line terminal) using a simple one-liner.
Content of the scripts can easily be inspected in the `associated source repository <https://github.com/scikit-build/scikit-ci-addons/tree/master/windows>`_.
.. include:: windows/install-scripts.rst | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/addons.rst | addons.rst | =======
Add-ons
=======
Each category is named after a CI worker (e.g AppVeyor) or operating system (e.g Windows)
and references add-ons designed to be used on the associated continuous integration service
or system.
An add-on is a file that could either directly be executed or used as a
parameter for an other tool.
Anyci
-----
This a special category containing scripts that could be executed on a broad
range of CI services.
.. include:: anyci/ctest_junit_formatter.rst
.. include:: anyci/docker_py.rst
.. include:: anyci/noop_py.rst
.. include:: anyci/publish_github_release_py.rst
.. include:: anyci/run_sh.rst
Appveyor
--------
These scripts are designed to work on worker from http://appveyor.com/
.. include:: appveyor/enable-worker-remote-access_ps1.rst
.. include:: appveyor/install_cmake_py.rst
.. include:: appveyor/run-with-visual-studio_cmd.rst
.. include:: appveyor/patch_vs2008_py.rst
.. include:: appveyor/rolling-build_ps1.rst
.. include:: appveyor/tweak_environment_py.rst
Circle
------
These scripts are designed to work on worker from http://circleci.com/
.. include:: circle/install_cmake_py.rst
Travis
------
These scripts are designed to work on worker from http://travis-ci.org/
.. include:: travis/install_cmake_py.rst
.. include:: travis/pyenv.rst
.. include:: travis/enable-worker-remote-access_sh.rst
Windows
-------
These scripts are designed to work on any windows workstation running Windows 7 and above and can
be directly used from a powershell terminal (or command line terminal) using a simple one-liner.
Content of the scripts can easily be inspected in the `associated source repository <https://github.com/scikit-build/scikit-ci-addons/tree/master/windows>`_.
.. include:: windows/install-scripts.rst | 0.754553 | 0.383006 |
=====
Usage
=====
The scikit-ci-addons command line executable allows to discover, execute and
get the path of any of the distributed :doc:`add-ons </addons>`.
Executing an add-on
-------------------
::
ci_addons ADDON_NAME
where ``ADDON_NAME`` can be any of the names displayed using ``ci_addons --list``.
For example:
.. code-block:: bash
$ ci_addons appveyor/patch_vs2008
Listing available add-ons
-------------------------
::
ci_addons --list
For example:
.. code-block:: bash
$ ci_addons --list
anyci/ctest_junit_formatter.py
anyci/publish_github_release.py
anyci/run.sh
anyci/ctest_junit_formatter.xsl
anyci/noop.py
anyci/docker.py
appveyor/enable-worker-remote-access.ps1
appveyor/install_cmake.py
appveyor/apply_mingw_path_fix.py
appveyor/run.cmd
appveyor/patch_vs2008.py
appveyor/run-with-mingw.cmd
appveyor/cancel-queued-build.ps1
appveyor/rolling-build.ps1
appveyor/tweak_environment.py
appveyor/run-with-visual-studio.cmd
circle/install_cmake.py
travis/install_cmake.py
travis/enable-worker-remote-access.sh
travis/run-with-pyenv.sh
travis/install_pyenv.py
windows/install-miniconda3.ps1
windows/install-utils.ps1
windows/install-cmake.ps1
windows/install-python-27-x64.ps1
windows/install-nsis.ps1
windows/install-svn.ps1
windows/install-ninja.ps1
windows/install-python.ps1
windows/install-python-36-x64.ps1
windows/install-git.ps1
windows/install-flang.ps1
.. note::
To learn more about each add-on, consider reading the
:doc:`add-ons </addons>` section.
Getting directory containing all add-ons
----------------------------------------
::
ci_addons --home
For example:
.. code-block:: bash
$ ci_addons --home
/home/jcfr/.virtualenvs/test/local/lib/python2.7/site-packages
Installing add-ons into selected directory
------------------------------------------
::
ci_addons --install DIR
where ``DIR`` is a valid path to an existing directory.
For example:
.. code-block:: bash
$ ci_addons --install /tmp
/tmp/anyci/ctest_junit_formatter.py
/tmp/anyci/publish_github_release.py
/tmp/anyci/run.sh
/tmp/anyci/ctest_junit_formatter.xsl
/tmp/anyci/noop.py
/tmp/anyci/docker.py
/tmp/appveyor/enable-worker-remote-access.ps1
/tmp/appveyor/install_cmake.py
/tmp/appveyor/apply_mingw_path_fix.py
/tmp/appveyor/run.cmd
/tmp/appveyor/patch_vs2008.py
/tmp/appveyor/run-with-mingw.cmd
/tmp/appveyor/cancel-queued-build.ps1
/tmp/appveyor/rolling-build.ps1
/tmp/appveyor/tweak_environment.py
/tmp/appveyor/run-with-visual-studio.cmd
/tmp/circle/install_cmake.py
/tmp/travis/install_cmake.py
/tmp/travis/enable-worker-remote-access.sh
/tmp/travis/run-with-pyenv.sh
/tmp/travis/install_pyenv.py
/tmp/windows/install-miniconda3.ps1
/tmp/windows/install-utils.ps1
/tmp/windows/install-cmake.ps1
/tmp/windows/install-python-27-x64.ps1
/tmp/windows/install-nsis.ps1
/tmp/windows/install-svn.ps1
/tmp/windows/install-ninja.ps1
/tmp/windows/install-python.ps1
/tmp/windows/install-python-36-x64.ps1
/tmp/windows/install-git.ps1
/tmp/windows/install-flang.ps1
Getting full path of an add-on
------------------------------
::
ci_addons --path PATH
where ``PATH`` can be any of these:
- relative path with or without extension (e.g ``appveyor/patch_vs2008.py``
or ``appveyor/patch_vs2008.py``)
- full path (e.g ``/path/to/appveyor/patch_vs2008.py``)
- script name with or without extension (e.g ``patch_vs2008.py``
or ``patch_vs2008``). If there are multiple add-ons with the same bame,
``ci_addons`` reports an error message listing the add-ons to choose from.
For example:
.. code-block:: bash
$ ci_addons --path appveyor/patch_vs2008.py
/home/jcfr/.virtualenvs/test/local/lib/python2.7/site-packages/appveyor/patch_vs2008.py
.. note::
This function is particularly useful when the selected add-on is not a
python script and is expected to be used as an input to an other tool.
Calling scikit-ci-addons through ``python -m ci_addons``
--------------------------------------------------------
You can invoke scikit-ci-addons through the Python interpreter from the command
line::
python -m ci_addons [...]
This is equivalent to invoking the command line script ``ci_addons [...]``
directly.
Getting help on version, option names
-------------------------------------
::
ci_addons --version # shows where ci_addons was imported from
ci_addons -h | --help # show help on command line
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/usage.rst | usage.rst | =====
Usage
=====
The scikit-ci-addons command line executable allows to discover, execute and
get the path of any of the distributed :doc:`add-ons </addons>`.
Executing an add-on
-------------------
::
ci_addons ADDON_NAME
where ``ADDON_NAME`` can be any of the names displayed using ``ci_addons --list``.
For example:
.. code-block:: bash
$ ci_addons appveyor/patch_vs2008
Listing available add-ons
-------------------------
::
ci_addons --list
For example:
.. code-block:: bash
$ ci_addons --list
anyci/ctest_junit_formatter.py
anyci/publish_github_release.py
anyci/run.sh
anyci/ctest_junit_formatter.xsl
anyci/noop.py
anyci/docker.py
appveyor/enable-worker-remote-access.ps1
appveyor/install_cmake.py
appveyor/apply_mingw_path_fix.py
appveyor/run.cmd
appveyor/patch_vs2008.py
appveyor/run-with-mingw.cmd
appveyor/cancel-queued-build.ps1
appveyor/rolling-build.ps1
appveyor/tweak_environment.py
appveyor/run-with-visual-studio.cmd
circle/install_cmake.py
travis/install_cmake.py
travis/enable-worker-remote-access.sh
travis/run-with-pyenv.sh
travis/install_pyenv.py
windows/install-miniconda3.ps1
windows/install-utils.ps1
windows/install-cmake.ps1
windows/install-python-27-x64.ps1
windows/install-nsis.ps1
windows/install-svn.ps1
windows/install-ninja.ps1
windows/install-python.ps1
windows/install-python-36-x64.ps1
windows/install-git.ps1
windows/install-flang.ps1
.. note::
To learn more about each add-on, consider reading the
:doc:`add-ons </addons>` section.
Getting directory containing all add-ons
----------------------------------------
::
ci_addons --home
For example:
.. code-block:: bash
$ ci_addons --home
/home/jcfr/.virtualenvs/test/local/lib/python2.7/site-packages
Installing add-ons into selected directory
------------------------------------------
::
ci_addons --install DIR
where ``DIR`` is a valid path to an existing directory.
For example:
.. code-block:: bash
$ ci_addons --install /tmp
/tmp/anyci/ctest_junit_formatter.py
/tmp/anyci/publish_github_release.py
/tmp/anyci/run.sh
/tmp/anyci/ctest_junit_formatter.xsl
/tmp/anyci/noop.py
/tmp/anyci/docker.py
/tmp/appveyor/enable-worker-remote-access.ps1
/tmp/appveyor/install_cmake.py
/tmp/appveyor/apply_mingw_path_fix.py
/tmp/appveyor/run.cmd
/tmp/appveyor/patch_vs2008.py
/tmp/appveyor/run-with-mingw.cmd
/tmp/appveyor/cancel-queued-build.ps1
/tmp/appveyor/rolling-build.ps1
/tmp/appveyor/tweak_environment.py
/tmp/appveyor/run-with-visual-studio.cmd
/tmp/circle/install_cmake.py
/tmp/travis/install_cmake.py
/tmp/travis/enable-worker-remote-access.sh
/tmp/travis/run-with-pyenv.sh
/tmp/travis/install_pyenv.py
/tmp/windows/install-miniconda3.ps1
/tmp/windows/install-utils.ps1
/tmp/windows/install-cmake.ps1
/tmp/windows/install-python-27-x64.ps1
/tmp/windows/install-nsis.ps1
/tmp/windows/install-svn.ps1
/tmp/windows/install-ninja.ps1
/tmp/windows/install-python.ps1
/tmp/windows/install-python-36-x64.ps1
/tmp/windows/install-git.ps1
/tmp/windows/install-flang.ps1
Getting full path of an add-on
------------------------------
::
ci_addons --path PATH
where ``PATH`` can be any of these:
- relative path with or without extension (e.g ``appveyor/patch_vs2008.py``
or ``appveyor/patch_vs2008.py``)
- full path (e.g ``/path/to/appveyor/patch_vs2008.py``)
- script name with or without extension (e.g ``patch_vs2008.py``
or ``patch_vs2008``). If there are multiple add-ons with the same bame,
``ci_addons`` reports an error message listing the add-ons to choose from.
For example:
.. code-block:: bash
$ ci_addons --path appveyor/patch_vs2008.py
/home/jcfr/.virtualenvs/test/local/lib/python2.7/site-packages/appveyor/patch_vs2008.py
.. note::
This function is particularly useful when the selected add-on is not a
python script and is expected to be used as an input to an other tool.
Calling scikit-ci-addons through ``python -m ci_addons``
--------------------------------------------------------
You can invoke scikit-ci-addons through the Python interpreter from the command
line::
python -m ci_addons [...]
This is equivalent to invoking the command line script ``ci_addons [...]``
directly.
Getting help on version, option names
-------------------------------------
::
ci_addons --version # shows where ci_addons was imported from
ci_addons -h | --help # show help on command line
| 0.695648 | 0.160858 |
.. scikit-ci-addons documentation master file, created by
sphinx-quickstart on Thu Oct 27 04:37:15 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-ci-addons's documentation!
============================================
scikit-ci-addons is a command line tool and a set of scripts useful to help
drive the CI of projects leveraging services like `AppVeyor`_, `CircleCI`_, or `Travis CI`_.
Originally developed to help install prerequisites for building Python
extension, it is now useful to support other type of projects.
.. toctree::
:maxdepth: 2
:caption: User guide
installation
usage
addons
contributing
authors
history
.. toctree::
:maxdepth: 2
:caption: For maintainers
make_a_release
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Resources
=========
* Free software: Apache Software license
* Documentation: http://scikit-ci-addons.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci-addons
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
.. _AppVeyor: https://ci.appveyor.com
.. _CircleCI: https://circleci.com
.. _Travis CI: https://travis-ci.com
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/index.rst | index.rst | .. scikit-ci-addons documentation master file, created by
sphinx-quickstart on Thu Oct 27 04:37:15 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-ci-addons's documentation!
============================================
scikit-ci-addons is a command line tool and a set of scripts useful to help
drive the CI of projects leveraging services like `AppVeyor`_, `CircleCI`_, or `Travis CI`_.
Originally developed to help install prerequisites for building Python
extension, it is now useful to support other type of projects.
.. toctree::
:maxdepth: 2
:caption: User guide
installation
usage
addons
contributing
authors
history
.. toctree::
:maxdepth: 2
:caption: For maintainers
make_a_release
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Resources
=========
* Free software: Apache Software license
* Documentation: http://scikit-ci-addons.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci-addons
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
.. _AppVeyor: https://ci.appveyor.com
.. _CircleCI: https://circleci.com
.. _Travis CI: https://travis-ci.com
| 0.65368 | 0.274768 |
============
Installation
============
Install package with pip
------------------------
To install with pip::
$ pip install scikit-ci-addons
Install from source
-------------------
To install scikit-ci-addons from the latest source, first obtain the source code::
$ git clone https://github.com/scikit-build/scikit-ci-addons
$ cd scikit-ci-addons
then install with::
$ pip install .
or::
$ pip install -e .
for development.
Dependencies
------------
Python Packages
^^^^^^^^^^^^^^^
The project has a few common Python package dependencies. The runtime
dependencies are:
.. include:: ../requirements.txt
:literal:
The development dependencies (for testing and coverage) are:
.. include:: ../requirements-dev.txt
:literal:
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/installation.rst | installation.rst | ============
Installation
============
Install package with pip
------------------------
To install with pip::
$ pip install scikit-ci-addons
Install from source
-------------------
To install scikit-ci-addons from the latest source, first obtain the source code::
$ git clone https://github.com/scikit-build/scikit-ci-addons
$ cd scikit-ci-addons
then install with::
$ pip install .
or::
$ pip install -e .
for development.
Dependencies
------------
Python Packages
^^^^^^^^^^^^^^^
The project has a few common Python package dependencies. The runtime
dependencies are:
.. include:: ../requirements.txt
:literal:
The development dependencies (for testing and coverage) are:
.. include:: ../requirements-dev.txt
:literal:
| 0.773002 | 0.248181 |
For example, on a new system without python or git installed, they can be installed from a powershell terminal
open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-36-x64.ps1'))
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-git.ps1'))
Read `here <https://technet.microsoft.com/en-us/library/ee176961.aspx>`_ to learn about the
powershell execution policy.
Details for each ``install-*.ps1`` scripts are reported below.
``install-cmake.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install selected CMake version in ``C:\cmake-X.Y.Z``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$cmakeVersion="3.8.1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-cmake.ps1'))
.. note::
- CMake is **NOT** added to the ``PATH``
- setting ``$cmakeVersion`` to "X.Y.Z" before executing the script allows to select a specific CMake version.
- on AppVeyor, the download and install can be skipped by adding directory ``C:\cmake-X.Y.Z`` to the ``cache``. For more details, see https://www.appveyor.com/docs/build-cache/#configuring-cache-items
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-flang.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install latest ``flang`` in a new conda environment named `flang-env`.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-flang.ps1'))
Flang is a Fortran compiler targeting LLVM, it was `announced <https://www.llnl.gov/news/nnsa-national-labs-team-nvidia-develop-open-source-fortran-compiler-technology>`_
in 2015.
Source code is hosted on GitHub at https://github.com/flang-compiler/flang, the windows fork is hosted as https://github.com/isuruf/flang
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-git.ps1``
^^^^^^^^^^^^^^^^^^^
Install Git 2.11.0 (including Git Bash) on the system.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-git.ps1'))
.. note::
- Git executables are added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-miniconda3.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^
Install latest miniconda3 environment into ``C:\Miniconda3``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-miniconda3.ps1'))
.. note::
- miniconda environment is **NOT** added to the ``PATH`` and registry.
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-ninja.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install ninja executable v1.7.2 into ``C:\ninja-1.7.2``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-ninja.ps1'))
.. note::
- ninja executable is **NOT** added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-nsis.ps1``
^^^^^^^^^^^^^^^^^^^^
Install NSIS 3.01 on the system.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-nsis.ps1'))
.. note::
- nsis executable is added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-python.ps1``
^^^^^^^^^^^^^^^^^^^^^^
Install Python 2.7.15, 3.4.4, 3.5.4, 3.6.8, 3.7.2 and 3.8.0a2 (32 and 64-bit) along with pip and virtualenv
in the following directories: ::
C:\Python27-x64
C:\Python27-x86
C:\Python34-x64
C:\Python34-x86
C:\Python35-x64
C:\Python35-x86
C:\Python36-x64
C:\Python36-x86
C:\Python37-x64
C:\Python37-x86
C:\Python38-x64
C:\Python38-x86
.. note::
- python interpreter is **NOT** added to the ``PATH``
- setting ``$pythonVersion`` to either "2.7", "3.4", "3.5", "3.6", "3.7" or "3.8" before executing the script allows
to install a specific version. By default, all are installed.
- setting ``$pythonArch`` to either "86", "32" or "64" before executing the script allows
to install python for specific architecture. By default, both are installed.
Values "86" and "32" correspond to the same architecture.
- setting ``$pythonPrependPath`` to 1 will add install and Scripts directories the PATH and .PY to PATHEXT. This
variable should be set only if ``$pythonVersion`` and ``$pythonArch`` are set. By default, the value is 0.
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
.. warning::
- The downloaded versions of python may **NOT** be the latest version including security patches.
If running in a production environment (e.g webserver), these versions should be built from source.
``install-python-27-x64.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install Python 2.7 64-bit and update the PATH.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-27-x64.ps1'))
This is equivalent to: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$pythonVersion = "2.7"
$pythonArch = "64"
$pythonPrependPath = "1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
.. note::
- ``C:\Python27-x64`` and ``C:\Python27-x64\Scripts`` are prepended to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-python-36-x64.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install Python 3.6 64-bit and update the PATH.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-36-x64.ps1'))
This is equivalent to: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$pythonVersion = "3.6"
$pythonArch = "64"
$pythonPrependPath = "1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
.. note::
- ``C:\Python36-x64`` and ``C:\Python36-x64\Scripts`` are prepended to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-svn.ps1``
^^^^^^^^^^^^^^^^^^^^
Install `Slik SVN <https://sliksvn.com/download/>`_ 1.9.5 in the following directory: ::
C:\SlikSvn
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-svn.ps1'))
.. note::
- svn executable is added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-utils.ps1``
^^^^^^^^^^^^^^^^^^^^^
This script is automatically included (and downloaded if needed) by the other addons, it
provides convenience functions useful to download and install programs:
``Always-Download-File($url, $file)``:
Systematically download `$url` into `$file`.
``Download-File($url, $file)``:
If file is not found, download `$url` into `$file`.
``Download-URL($url, $downloadDir)``:
Download `$url` into `$downloadDir`. The filename is extracted from `$url`.
``Install-MSI($fileName, $downloadDir, $targetDir)``:
Programatically install MSI installers `$downloadDir\$fileName`
into `$targetDir`. The package is installed for all users.
``Which($progName)``
Search for `$progName` in the ``PATH`` and return its full path.
``Download-7zip($downloadDir)``:
If not found, download 7zip executable ``7za.exe`` into `$downloadDir`. The function
returns the full path to the executable.
``Always-Extract-Zip($filePath, $destDir)``:
Systematically extract zip file `$filePath` into `$destDir` using
7zip. If 7zip executable ``7za.exe`` is not found in `$downloadDir`, it is downloaded
using function ``Download-7zip``.
``Extract-Zip($filePath, $destDir)``:
Extract zip file into `$destDir` only if `$destDir` does not exist.
Frequently Asked Questions
^^^^^^^^^^^^^^^^^^^^^^^^^^
Installing add-on from a Windows command line terminal
""""""""""""""""""""""""""""""""""""""""""""""""""""""
This can be using the following syntax::
@powershell -ExecutionPolicy Unrestricted "iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-ninja.ps1'))"
.. _addressing_underlying_connection_closed:
Addressing "The underlying connection was closed" error
"""""""""""""""""""""""""""""""""""""""""""""""""""""""
::
PS C:\Users\dashboard> iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
Error: 0
Description: The underlying connection was closed: An unexpected error occurred on a receive.
As explained the `chololatey documentation <https://github.com/chocolatey/choco/wiki/Installation#installing-with-restricted-tls>`_,
this most likely happens because the build script is attempting to download from a server that needs to use TLS 1.1 or
TLS 1.2 and has restricted the use of TLS 1.0 and SSL v3.
The first things to try is to use the following snippet replacing ``https://file/to/download`` with
the appropriate value::
$securityProtocolSettingsOriginal = [System.Net.ServicePointManager]::SecurityProtocol
try {
# Set TLS 1.2 (3072), then TLS 1.1 (768), then TLS 1.0 (192), finally SSL 3.0 (48)
# Use integers because the enumeration values for TLS 1.2 and TLS 1.1 won't
# exist in .NET 4.0, even though they are addressable if .NET 4.5+ is
# installed (.NET 4.5 is an in-place upgrade).
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
} catch {
Write-Warning 'Unable to set PowerShell to use TLS 1.2 and TLS 1.1 due to old .NET Framework installed. If you see underlying connection closed or trust errors, you may need to upgrade to .NET Framework 4.5 and PowerShell v3'
}
iex ((new-object net.webclient).DownloadString('https://file/to/download'))
[System.Net.ServicePointManager]::SecurityProtocol = $securityProtocolSettingsOriginal
If that does not address the problem, you should update the version of `.NET` installed and install
a newer version of PowerShell:
* https://en.wikipedia.org/wiki/.NET_Framework_version_history#Overview
* https://social.technet.microsoft.com/wiki/contents/articles/21016.how-to-install-windows-powershell-4-0.aspx
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/windows/install-scripts.rst | install-scripts.rst | For example, on a new system without python or git installed, they can be installed from a powershell terminal
open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-36-x64.ps1'))
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-git.ps1'))
Read `here <https://technet.microsoft.com/en-us/library/ee176961.aspx>`_ to learn about the
powershell execution policy.
Details for each ``install-*.ps1`` scripts are reported below.
``install-cmake.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install selected CMake version in ``C:\cmake-X.Y.Z``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$cmakeVersion="3.8.1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-cmake.ps1'))
.. note::
- CMake is **NOT** added to the ``PATH``
- setting ``$cmakeVersion`` to "X.Y.Z" before executing the script allows to select a specific CMake version.
- on AppVeyor, the download and install can be skipped by adding directory ``C:\cmake-X.Y.Z`` to the ``cache``. For more details, see https://www.appveyor.com/docs/build-cache/#configuring-cache-items
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-flang.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install latest ``flang`` in a new conda environment named `flang-env`.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-flang.ps1'))
Flang is a Fortran compiler targeting LLVM, it was `announced <https://www.llnl.gov/news/nnsa-national-labs-team-nvidia-develop-open-source-fortran-compiler-technology>`_
in 2015.
Source code is hosted on GitHub at https://github.com/flang-compiler/flang, the windows fork is hosted as https://github.com/isuruf/flang
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-git.ps1``
^^^^^^^^^^^^^^^^^^^
Install Git 2.11.0 (including Git Bash) on the system.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-git.ps1'))
.. note::
- Git executables are added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-miniconda3.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^
Install latest miniconda3 environment into ``C:\Miniconda3``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-miniconda3.ps1'))
.. note::
- miniconda environment is **NOT** added to the ``PATH`` and registry.
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-ninja.ps1``
^^^^^^^^^^^^^^^^^^^^^
Install ninja executable v1.7.2 into ``C:\ninja-1.7.2``.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-ninja.ps1'))
.. note::
- ninja executable is **NOT** added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-nsis.ps1``
^^^^^^^^^^^^^^^^^^^^
Install NSIS 3.01 on the system.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-nsis.ps1'))
.. note::
- nsis executable is added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-python.ps1``
^^^^^^^^^^^^^^^^^^^^^^
Install Python 2.7.15, 3.4.4, 3.5.4, 3.6.8, 3.7.2 and 3.8.0a2 (32 and 64-bit) along with pip and virtualenv
in the following directories: ::
C:\Python27-x64
C:\Python27-x86
C:\Python34-x64
C:\Python34-x86
C:\Python35-x64
C:\Python35-x86
C:\Python36-x64
C:\Python36-x86
C:\Python37-x64
C:\Python37-x86
C:\Python38-x64
C:\Python38-x86
.. note::
- python interpreter is **NOT** added to the ``PATH``
- setting ``$pythonVersion`` to either "2.7", "3.4", "3.5", "3.6", "3.7" or "3.8" before executing the script allows
to install a specific version. By default, all are installed.
- setting ``$pythonArch`` to either "86", "32" or "64" before executing the script allows
to install python for specific architecture. By default, both are installed.
Values "86" and "32" correspond to the same architecture.
- setting ``$pythonPrependPath`` to 1 will add install and Scripts directories the PATH and .PY to PATHEXT. This
variable should be set only if ``$pythonVersion`` and ``$pythonArch`` are set. By default, the value is 0.
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
.. warning::
- The downloaded versions of python may **NOT** be the latest version including security patches.
If running in a production environment (e.g webserver), these versions should be built from source.
``install-python-27-x64.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install Python 2.7 64-bit and update the PATH.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-27-x64.ps1'))
This is equivalent to: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$pythonVersion = "2.7"
$pythonArch = "64"
$pythonPrependPath = "1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
.. note::
- ``C:\Python27-x64`` and ``C:\Python27-x64\Scripts`` are prepended to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-python-36-x64.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Install Python 3.6 64-bit and update the PATH.
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python-36-x64.ps1'))
This is equivalent to: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
$pythonVersion = "3.6"
$pythonArch = "64"
$pythonPrependPath = "1"
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
.. note::
- ``C:\Python36-x64`` and ``C:\Python36-x64\Scripts`` are prepended to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-svn.ps1``
^^^^^^^^^^^^^^^^^^^^
Install `Slik SVN <https://sliksvn.com/download/>`_ 1.9.5 in the following directory: ::
C:\SlikSvn
From a powershell terminal open as administrator: ::
Set-ExecutionPolicy Unrestricted -Force
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-svn.ps1'))
.. note::
- svn executable is added to the ``PATH``
.. note::
- to understand why ``SecurityProtocol`` is set, see :ref:`addressing_underlying_connection_closed`
``install-utils.ps1``
^^^^^^^^^^^^^^^^^^^^^
This script is automatically included (and downloaded if needed) by the other addons, it
provides convenience functions useful to download and install programs:
``Always-Download-File($url, $file)``:
Systematically download `$url` into `$file`.
``Download-File($url, $file)``:
If file is not found, download `$url` into `$file`.
``Download-URL($url, $downloadDir)``:
Download `$url` into `$downloadDir`. The filename is extracted from `$url`.
``Install-MSI($fileName, $downloadDir, $targetDir)``:
Programatically install MSI installers `$downloadDir\$fileName`
into `$targetDir`. The package is installed for all users.
``Which($progName)``
Search for `$progName` in the ``PATH`` and return its full path.
``Download-7zip($downloadDir)``:
If not found, download 7zip executable ``7za.exe`` into `$downloadDir`. The function
returns the full path to the executable.
``Always-Extract-Zip($filePath, $destDir)``:
Systematically extract zip file `$filePath` into `$destDir` using
7zip. If 7zip executable ``7za.exe`` is not found in `$downloadDir`, it is downloaded
using function ``Download-7zip``.
``Extract-Zip($filePath, $destDir)``:
Extract zip file into `$destDir` only if `$destDir` does not exist.
Frequently Asked Questions
^^^^^^^^^^^^^^^^^^^^^^^^^^
Installing add-on from a Windows command line terminal
""""""""""""""""""""""""""""""""""""""""""""""""""""""
This can be using the following syntax::
@powershell -ExecutionPolicy Unrestricted "iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-ninja.ps1'))"
.. _addressing_underlying_connection_closed:
Addressing "The underlying connection was closed" error
"""""""""""""""""""""""""""""""""""""""""""""""""""""""
::
PS C:\Users\dashboard> iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/windows/install-python.ps1'))
Error: 0
Description: The underlying connection was closed: An unexpected error occurred on a receive.
As explained the `chololatey documentation <https://github.com/chocolatey/choco/wiki/Installation#installing-with-restricted-tls>`_,
this most likely happens because the build script is attempting to download from a server that needs to use TLS 1.1 or
TLS 1.2 and has restricted the use of TLS 1.0 and SSL v3.
The first things to try is to use the following snippet replacing ``https://file/to/download`` with
the appropriate value::
$securityProtocolSettingsOriginal = [System.Net.ServicePointManager]::SecurityProtocol
try {
# Set TLS 1.2 (3072), then TLS 1.1 (768), then TLS 1.0 (192), finally SSL 3.0 (48)
# Use integers because the enumeration values for TLS 1.2 and TLS 1.1 won't
# exist in .NET 4.0, even though they are addressable if .NET 4.5+ is
# installed (.NET 4.5 is an in-place upgrade).
[System.Net.ServicePointManager]::SecurityProtocol = 3072 -bor 768 -bor 192 -bor 48
} catch {
Write-Warning 'Unable to set PowerShell to use TLS 1.2 and TLS 1.1 due to old .NET Framework installed. If you see underlying connection closed or trust errors, you may need to upgrade to .NET Framework 4.5 and PowerShell v3'
}
iex ((new-object net.webclient).DownloadString('https://file/to/download'))
[System.Net.ServicePointManager]::SecurityProtocol = $securityProtocolSettingsOriginal
If that does not address the problem, you should update the version of `.NET` installed and install
a newer version of PowerShell:
* https://en.wikipedia.org/wiki/.NET_Framework_version_history#Overview
* https://social.technet.microsoft.com/wiki/contents/articles/21016.how-to-install-windows-powershell-4-0.aspx
| 0.870611 | 0.314051 |
``patch_vs2008.py``
^^^^^^^^^^^^^^^^^^^
This script patches the installation of `Visual C++ 2008 Express <https://www.appveyor.com/docs/installed-software/#visual-studio-2008>`_
so that it can be used to build 64-bit projects.
Usage::
ci_addons appveyor/patch_vs2008.py
Credits:
- Xia Wei, sunmast#gmail.com
Links:
- http://www.cppblog.com/xcpp/archive/2009/09/09/vc2008express_64bit_win7sdk.html
.. note::
The add-on download `vs2008_patch.zip <https://github.com/menpo/condaci/raw/master/vs2008_patch.zip>`_
and execute ``setup_x64.bat``.
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/appveyor/patch_vs2008_py.rst | patch_vs2008_py.rst | ``patch_vs2008.py``
^^^^^^^^^^^^^^^^^^^
This script patches the installation of `Visual C++ 2008 Express <https://www.appveyor.com/docs/installed-software/#visual-studio-2008>`_
so that it can be used to build 64-bit projects.
Usage::
ci_addons appveyor/patch_vs2008.py
Credits:
- Xia Wei, sunmast#gmail.com
Links:
- http://www.cppblog.com/xcpp/archive/2009/09/09/vc2008express_64bit_win7sdk.html
.. note::
The add-on download `vs2008_patch.zip <https://github.com/menpo/condaci/raw/master/vs2008_patch.zip>`_
and execute ``setup_x64.bat``.
| 0.80784 | 0.294019 |
``run-with-visual-studio.cmd``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is a wrapper script setting the Visual Studio environment
matching the selected version of Python. This is particularly
important when building Python C Extensions.
Usage::
ci_addons --install ../
../appveyor/run-with-visual-studio.cmd \\path\\to\\command [arg1 [...]]
Example::
SET PYTHON_DIR="C:\\Python35"
SET PYTHON_VERSION="3.5.x"
SET PYTHON_ARCH="64"
SET PATH=%PYTHON_DIR%;%PYTHON_DIR%\\Scripts;%PATH%
ci_addons --install ../
../appveyor/run-with-visual-studio.cmd python setup.by bdist_wheel
Author:
- Olivier Grisel
License:
- `CC0 1.0 Universal <http://creativecommons.org/publicdomain/zero/1.0/>`_
.. note::
- Python version selection is done by setting the ``PYTHON_VERSION`` and
``PYTHON_ARCH`` environment variables.
- Possible values for ``PYTHON_VERSION`` are:
- ``"2.7.x"``
- ``"3.4.x"``
- ``"3.5.x"``
- Possible values for ``PYTHON_ARCH`` are:
- ``"32"``
- ``"64"`` | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/appveyor/run-with-visual-studio_cmd.rst | run-with-visual-studio_cmd.rst | ``run-with-visual-studio.cmd``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is a wrapper script setting the Visual Studio environment
matching the selected version of Python. This is particularly
important when building Python C Extensions.
Usage::
ci_addons --install ../
../appveyor/run-with-visual-studio.cmd \\path\\to\\command [arg1 [...]]
Example::
SET PYTHON_DIR="C:\\Python35"
SET PYTHON_VERSION="3.5.x"
SET PYTHON_ARCH="64"
SET PATH=%PYTHON_DIR%;%PYTHON_DIR%\\Scripts;%PATH%
ci_addons --install ../
../appveyor/run-with-visual-studio.cmd python setup.by bdist_wheel
Author:
- Olivier Grisel
License:
- `CC0 1.0 Universal <http://creativecommons.org/publicdomain/zero/1.0/>`_
.. note::
- Python version selection is done by setting the ``PYTHON_VERSION`` and
``PYTHON_ARCH`` environment variables.
- Possible values for ``PYTHON_VERSION`` are:
- ``"2.7.x"``
- ``"3.4.x"``
- ``"3.5.x"``
- Possible values for ``PYTHON_ARCH`` are:
- ``"32"``
- ``"64"`` | 0.764012 | 0.228565 |
``enable-worker-remote-access.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Enable access to the build worker via Remote Desktop.
Usage::
- ci_addons --install ../
- ps: ../appveyor/enable-worker-remote-access.ps1 [-block|-check_for_block]
Example::
- ci_addons --install ../
- ps: ../appveyor/enable-worker-remote-access.ps1 -block
.. note::
- Calling this script will enable and display the Remote Desktop
connection details. By default, the connection will be available
for the length of the build.
- Specifying ``-block`` option will ensure the connection remains
open for at least 60 mins.
- Specifying ``-check_for_block`` option will keep the connection
open only if the environment variable ``BLOCK`` has been set to ``1``. | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/appveyor/enable-worker-remote-access_ps1.rst | enable-worker-remote-access_ps1.rst | ``enable-worker-remote-access.ps1``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Enable access to the build worker via Remote Desktop.
Usage::
- ci_addons --install ../
- ps: ../appveyor/enable-worker-remote-access.ps1 [-block|-check_for_block]
Example::
- ci_addons --install ../
- ps: ../appveyor/enable-worker-remote-access.ps1 -block
.. note::
- Calling this script will enable and display the Remote Desktop
connection details. By default, the connection will be available
for the length of the build.
- Specifying ``-block`` option will ensure the connection remains
open for at least 60 mins.
- Specifying ``-check_for_block`` option will keep the connection
open only if the environment variable ``BLOCK`` has been set to ``1``. | 0.754825 | 0.122366 |
``rolling-build.ps1``
^^^^^^^^^^^^^^^^^^^^^
Cancel on-going build if there is a newer build queued for the same PR
Usage:
.. code-block:: yaml
- ps: rolling-build.ps1
.. note::
- If there is a newer build queued for the same PR, cancel this one.
The AppVeyor 'rollout builds' option is supposed to serve the same
purpose but it is problematic because it tends to cancel builds pushed
directly to master instead of just PR builds (or the converse).
credits: JuliaLang developers. | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/appveyor/rolling-build_ps1.rst | rolling-build_ps1.rst | ``rolling-build.ps1``
^^^^^^^^^^^^^^^^^^^^^
Cancel on-going build if there is a newer build queued for the same PR
Usage:
.. code-block:: yaml
- ps: rolling-build.ps1
.. note::
- If there is a newer build queued for the same PR, cancel this one.
The AppVeyor 'rollout builds' option is supposed to serve the same
purpose but it is problematic because it tends to cancel builds pushed
directly to master instead of just PR builds (or the converse).
credits: JuliaLang developers. | 0.712832 | 0.359758 |
``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons appveyor/install_cmake.py X.Y.Z
Example::
$ ci_addons appveyor/install_cmake.py 3.6.2
.. note::
- CMake archive is downloaded and extracted into ``C:\\cmake-X.Y.Z``. That
same directory can then be added to the cache. See `Build Cache <https://www.appveyor.com/docs/build-cache/>`_
documentation for more details.
- ``C:\\cmake-X.Y.Z`` is prepended to the ``PATH``.
TODO: Is the env global on AppVeyor ? Or does this work only with scikit-ci ? | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/appveyor/install_cmake_py.rst | install_cmake_py.rst | ``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons appveyor/install_cmake.py X.Y.Z
Example::
$ ci_addons appveyor/install_cmake.py 3.6.2
.. note::
- CMake archive is downloaded and extracted into ``C:\\cmake-X.Y.Z``. That
same directory can then be added to the cache. See `Build Cache <https://www.appveyor.com/docs/build-cache/>`_
documentation for more details.
- ``C:\\cmake-X.Y.Z`` is prepended to the ``PATH``.
TODO: Is the env global on AppVeyor ? Or does this work only with scikit-ci ? | 0.751557 | 0.225811 |
``publish_github_release.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add-on automating the creation of GitHub releases.
Based on the git branch found in the current working directory, it allows to
automatically create a GitHub ``prerelease`` and/or ``release`` and upload
associated packages.
Getting Started
"""""""""""""""
To create a pre-release named ``latest``::
ci_addons publish_github_release --prerelease-packages "dist/*"
To create a release named after the current tag::
ci_addons publish_github_release --release-packages "dist/*"
In both case, packages found in *dist* directory are uploaded.
.. note::
Pre-releases are created only if the current commit is *NOT* a tag (``latest`` tag is automatically
ignored). Similarly, releases are created *ONLY* if current commit is a tag (different from ``latest``).
Terminology
"""""""""""
**Prerelease**: By default, this corresponds to a GitHub prerelease associated with a tag named
``latest`` and named ``Latest (updated on YYYY-MM-DD HH:MM UTC)``. The prerelease is automatically
updated each time the ``publish_github_release`` script is executed. Updating the ``latest``
prerelease means that (1) the latest tag is updated to point to the current HEAD, (2) the name is
updated and (3) latest packages are uploaded to replace the previous ones. GitHub prerelease are
basically release with *draft* option set to False and *prerelease* option set to True.
**Release**: This corresponds to a GitHub release automatically created by ``publish_github_release``
script only if it found that HEAD was associated with a tag different from ``latest``. It has both
*draft* and *prerelease* options set to False. Once packages have been associated with such a release,
they are not expected to be removed.
Usage
"""""
::
ci_addons publish_github_release [-h]
[--release-packages [PATTERN [PATTERN ...]]]
[--prerelease-packages [PATTERN [PATTERN ...]]]
[--prerelease-packages-clear-pattern PATTERN]
[--prerelease-packages-keep-pattern PATTERN]
[--prerelease-tag PRERELEASE_TAG]
[--prerelease-name PRERELEASE_NAME]
[--prerelease-sha PRERELEASE_SHA]
[--token GITHUB_TOKEN]
[--exit-success-if-missing-token]
[--re-upload]
[--display-python-wheel-platform]
[--dry-run]
ORG/PROJECT
.. note::
- Packages to upload can be a list of paths or a list of globbing patterns.
Mini-language for packages selection
""""""""""""""""""""""""""""""""""""
To facilitate selection of specific packages, if any of the strings described below are
found in arguments associated with with either ``--prerelease-packages``
or ``--release-packages``, they will be replaced.
**<PYTHON_WHEEL_PLATFORM>**: This string is replaced by the current
platform as found in python wheel package names (e.g manylinux1, macosx, or win).
Executing ``ci_addons publish_github_release --display-python-wheel-platform``
returns the same string.
**<COMMIT_DATE>**: This string is replaced by the YYYYMMDD date
as returned by ``git show -s --format="%ci"``.
**<COMMIT_SHORT_SHA>**: This string is replaced by the sha
as returned by ``git rev-parse --short=7 HEAD``.
**<COMMIT_DISTANCE>**: This string is replaced by the distance
to the tag specified using ``--prerelease-tag``. If the tag does not exist,
it corresponds to the number of commits. This is particularly useful when
selecting prerelease packages generated using `pep440-pre style <https://github.com/warner/python-versioneer/blob/master/details.md#how-do-i-select-a-version-style>`_
implemented in `python-versioneer`.
Use case: Automatic upload of release packages associated with a tag
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
In this example, the script automatically detects that the current branch
HEAD is associated with the tag **1.0.0** and automatically uploads all
packages found in the ``dist`` directory.
::
$ cd PROJECT
$ git describe
1.0.0
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*"
Checking if HEAD is a release tag
Checking if HEAD is a release tag - yes (found 1.0.0: creating release)
created '1.0.0' release
Tag name : 1.0.0
ID : 5436107
Created : 2017-02-13T06:36:29Z
URL : https://github.com/ORG/PROJECT/releases/tag/1.0.0
Author : USERNAME
Is published : True
Is prerelease : False
uploading '1.0.0' release asset(s) (found 2):
uploading dist/sandbox-1.0.0-cp27-cp27m-manylinux1.whl
download_url: https://github.com/ORG/PROJECT/releases/download/1.0.0/sandbox-1.0.0-cp27-cp27m-manylinux1.whl
uploading dist/sandbox-1.0.0-cp35-cp35m-manylinux1.whl
download_url: https://github.com/ORG/PROJECT/releases/download/1.0.0/sandbox-1.0.0-cp35-cp35m-manylinux1.whl
Use case: Automatic creation of "nightly" prerelease from different build machines
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
When building projects using continuous integration services (e.g Appveyor,
TravicCI, or CircleCI), the *publish_github_release* script has the following
responsibilities:
* update the nightly tag reference
* update the release name
* keep only the most recent packages. This means that after successfully
uploading package generating on a given platform, the older ones will be
removed.
To fulfill its requirements, *publish_github_release* provides two
convenient options ``--prerelease-packages-clear-pattern`` and ``--prerelease-packages-keep-pattern``.
**prerelease-packages-clear-pattern**: This option allows to select all packages
that should be removed from the prerelease. For example, on a machine responsible
to generate windows python wheels, the following pattern can be used :``"*win*.whl"``.
**prerelease-packages-keep-pattern**: This option allows to keep packages
that have been selected by the previous globbing pattern. For example, assuming
development package names contain the date of the commit they are built from,
specifying a globbing pattern with the date allows to delete older packages while
keeping only the new ones built from that commit.
In the following example, we assume a prerelease done on 2017-02-12 with
16 packages (4 linux, 4 macosx, and 8 windows) already exists. The command
reported below corresponds to the execution of the script on a linux machine,
after one additional commit has been done the next day.
::
$ cd PROJECT
$ git describe
1.0.0-2-g9d40177
$ commit_date=$(git log -1 --format="%ad" --date=local | date +%Y%m%d)
$ echo $commit_date
20170213
$ ci_addons publish_github_release ORG/PROJECT \
--prerelease-packages dist/*.dev${commit_date}*manylinux1*.whl \
--prerelease-packages-clear-pattern "*manylinux1*.whl" \
--prerelease-packages-keep-pattern "*.dev${commit_date}*.whl"
Checking if HEAD is a release tag
Checking if HEAD is a release tag - no (creating prerelease)
release nightly: already exists
uploading 'nightly' release asset(s) (found 4):
uploading dist/sandbox-1.0.0.dev20170213-cp27-cp27m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp27-cp27m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp34-cp34m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp34-cp34m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp35-cp35m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp35-cp35m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp36-cp36m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp36-cp36m-manylinux1_x86_64.whl
deleting 'nightly' release asset(s) (matched: 8, matched-but-keep: 4, not-matched: 12):
deleting sandbox-1.0.0.dev20170212-cp27-cp27m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp34-cp34m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp35-cp35m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp36-cp36m-manylinux1_x86_64.whl
nothing to delete
resolved 'master' to '9d40177e6d3a69890de8ea359de2d02a943d2e10'
updating 'nightly' release:
target_commitish: '62fe605938ff252e4ddee05b5209299a1aa9a39e' -> '9d40177e6d3a69890de8ea359de2d02a943d2e10'
tag_name: 'nightly' -> 'nightly-tmp'
deleting reference refs/tags/nightly
updating 'nightly-tmp' release:
tag_name: 'nightly-tmp' -> 'nightly'
deleting reference refs/tags/nightly-tmp
updating 'nightly' release:
target_commitish: '62fe605938ff252e4ddee05b5209299a1aa9a39e' -> '9d40177e6d3a69890de8ea359de2d02a943d2e10'
Use case: Automatic creation of GitHub releases and prereleases
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This can be done by combining the options ``--release-packages``
and ``--prerelease-packages``.
Note also the use of ``--display-python-wheel-platform`` to automatically
get the current python platform.
For example::
$ commit_date=$(git log -1 --format="%ad" --date=local | date +%Y%m%d)
$ platform=$(ci_addons publish_github_release ORG/PROJECT --display-python-wheel-platform)
$ echo $platform
manylinux1
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*" \
--prerelease-packages dist/*.dev${commit_date}*${platform}*.whl \
--prerelease-packages-clear-pattern "*${platform}*.whl" \
--prerelease-packages-keep-pattern "*.dev${commit_date}*.whl"
The same can also be achieved across platform using the convenient mini-language for package
selection::
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*" \
--prerelease-packages "dist/*.dev<COMMIT_DATE>*<PYTHON_WHEEL_PLATFORM>*.whl" \
--prerelease-packages-clear-pattern "*<PYTHON_WHEEL_PLATFORM>*.whl" \
--prerelease-packages-keep-pattern "*.dev<COMMIT_DATE>*.whl"
Testing
"""""""
Since the add-on tests interact with GitHub API, there are not included in the
regular scikit-ci-addons collection of tests executed using pytest. Instead,
they needs to be manually executed following these steps:
(1) Generate a `personal access token <https://github.com/settings/tokens/new>`_
with at least ``public_repo`` scope enabled.
(2) Create a *test* project on GitHub with at least one commit.
(3) Check out sources of your *test* project.
(4) Create a virtual environment, download scikit-ci-addons source code, and install its requirements.
(5) Execute the test script.
For example::
export GITHUB_TOKEN=... # Change this with the token generated above in step (1)
TEST_PROJECT=jcfr/sandbox # Change this with the project name created above in step (2)
cd /tmp
git clone https://github.com/scikit-build/scikit-ci-addons
cd scikit-ci-addons/
mkvirtualenv scikit-ci-addons-test
pip install -r requirements.txt
SRC_DIR=$(pwd)
cd /tmp
git clone https://github.com/$TEST_PROJECT test-project
cd test-project
python $SRC_DIR/anyci/tests/test_publish_github_release.py $TEST_PROJECT --no-interactive
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/anyci/publish_github_release_py.rst | publish_github_release_py.rst | ``publish_github_release.py``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add-on automating the creation of GitHub releases.
Based on the git branch found in the current working directory, it allows to
automatically create a GitHub ``prerelease`` and/or ``release`` and upload
associated packages.
Getting Started
"""""""""""""""
To create a pre-release named ``latest``::
ci_addons publish_github_release --prerelease-packages "dist/*"
To create a release named after the current tag::
ci_addons publish_github_release --release-packages "dist/*"
In both case, packages found in *dist* directory are uploaded.
.. note::
Pre-releases are created only if the current commit is *NOT* a tag (``latest`` tag is automatically
ignored). Similarly, releases are created *ONLY* if current commit is a tag (different from ``latest``).
Terminology
"""""""""""
**Prerelease**: By default, this corresponds to a GitHub prerelease associated with a tag named
``latest`` and named ``Latest (updated on YYYY-MM-DD HH:MM UTC)``. The prerelease is automatically
updated each time the ``publish_github_release`` script is executed. Updating the ``latest``
prerelease means that (1) the latest tag is updated to point to the current HEAD, (2) the name is
updated and (3) latest packages are uploaded to replace the previous ones. GitHub prerelease are
basically release with *draft* option set to False and *prerelease* option set to True.
**Release**: This corresponds to a GitHub release automatically created by ``publish_github_release``
script only if it found that HEAD was associated with a tag different from ``latest``. It has both
*draft* and *prerelease* options set to False. Once packages have been associated with such a release,
they are not expected to be removed.
Usage
"""""
::
ci_addons publish_github_release [-h]
[--release-packages [PATTERN [PATTERN ...]]]
[--prerelease-packages [PATTERN [PATTERN ...]]]
[--prerelease-packages-clear-pattern PATTERN]
[--prerelease-packages-keep-pattern PATTERN]
[--prerelease-tag PRERELEASE_TAG]
[--prerelease-name PRERELEASE_NAME]
[--prerelease-sha PRERELEASE_SHA]
[--token GITHUB_TOKEN]
[--exit-success-if-missing-token]
[--re-upload]
[--display-python-wheel-platform]
[--dry-run]
ORG/PROJECT
.. note::
- Packages to upload can be a list of paths or a list of globbing patterns.
Mini-language for packages selection
""""""""""""""""""""""""""""""""""""
To facilitate selection of specific packages, if any of the strings described below are
found in arguments associated with with either ``--prerelease-packages``
or ``--release-packages``, they will be replaced.
**<PYTHON_WHEEL_PLATFORM>**: This string is replaced by the current
platform as found in python wheel package names (e.g manylinux1, macosx, or win).
Executing ``ci_addons publish_github_release --display-python-wheel-platform``
returns the same string.
**<COMMIT_DATE>**: This string is replaced by the YYYYMMDD date
as returned by ``git show -s --format="%ci"``.
**<COMMIT_SHORT_SHA>**: This string is replaced by the sha
as returned by ``git rev-parse --short=7 HEAD``.
**<COMMIT_DISTANCE>**: This string is replaced by the distance
to the tag specified using ``--prerelease-tag``. If the tag does not exist,
it corresponds to the number of commits. This is particularly useful when
selecting prerelease packages generated using `pep440-pre style <https://github.com/warner/python-versioneer/blob/master/details.md#how-do-i-select-a-version-style>`_
implemented in `python-versioneer`.
Use case: Automatic upload of release packages associated with a tag
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
In this example, the script automatically detects that the current branch
HEAD is associated with the tag **1.0.0** and automatically uploads all
packages found in the ``dist`` directory.
::
$ cd PROJECT
$ git describe
1.0.0
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*"
Checking if HEAD is a release tag
Checking if HEAD is a release tag - yes (found 1.0.0: creating release)
created '1.0.0' release
Tag name : 1.0.0
ID : 5436107
Created : 2017-02-13T06:36:29Z
URL : https://github.com/ORG/PROJECT/releases/tag/1.0.0
Author : USERNAME
Is published : True
Is prerelease : False
uploading '1.0.0' release asset(s) (found 2):
uploading dist/sandbox-1.0.0-cp27-cp27m-manylinux1.whl
download_url: https://github.com/ORG/PROJECT/releases/download/1.0.0/sandbox-1.0.0-cp27-cp27m-manylinux1.whl
uploading dist/sandbox-1.0.0-cp35-cp35m-manylinux1.whl
download_url: https://github.com/ORG/PROJECT/releases/download/1.0.0/sandbox-1.0.0-cp35-cp35m-manylinux1.whl
Use case: Automatic creation of "nightly" prerelease from different build machines
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
When building projects using continuous integration services (e.g Appveyor,
TravicCI, or CircleCI), the *publish_github_release* script has the following
responsibilities:
* update the nightly tag reference
* update the release name
* keep only the most recent packages. This means that after successfully
uploading package generating on a given platform, the older ones will be
removed.
To fulfill its requirements, *publish_github_release* provides two
convenient options ``--prerelease-packages-clear-pattern`` and ``--prerelease-packages-keep-pattern``.
**prerelease-packages-clear-pattern**: This option allows to select all packages
that should be removed from the prerelease. For example, on a machine responsible
to generate windows python wheels, the following pattern can be used :``"*win*.whl"``.
**prerelease-packages-keep-pattern**: This option allows to keep packages
that have been selected by the previous globbing pattern. For example, assuming
development package names contain the date of the commit they are built from,
specifying a globbing pattern with the date allows to delete older packages while
keeping only the new ones built from that commit.
In the following example, we assume a prerelease done on 2017-02-12 with
16 packages (4 linux, 4 macosx, and 8 windows) already exists. The command
reported below corresponds to the execution of the script on a linux machine,
after one additional commit has been done the next day.
::
$ cd PROJECT
$ git describe
1.0.0-2-g9d40177
$ commit_date=$(git log -1 --format="%ad" --date=local | date +%Y%m%d)
$ echo $commit_date
20170213
$ ci_addons publish_github_release ORG/PROJECT \
--prerelease-packages dist/*.dev${commit_date}*manylinux1*.whl \
--prerelease-packages-clear-pattern "*manylinux1*.whl" \
--prerelease-packages-keep-pattern "*.dev${commit_date}*.whl"
Checking if HEAD is a release tag
Checking if HEAD is a release tag - no (creating prerelease)
release nightly: already exists
uploading 'nightly' release asset(s) (found 4):
uploading dist/sandbox-1.0.0.dev20170213-cp27-cp27m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp27-cp27m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp34-cp34m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp34-cp34m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp35-cp35m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp35-cp35m-manylinux1_x86_64.whl
uploading dist/sandbox-1.0.0.dev20170213-cp36-cp36m-manylinux1_x86_64.whl
download_url: https://github.com/ORG/PROJECT/releases/download/nightly/sandbox-1.0.0.dev20170213-cp36-cp36m-manylinux1_x86_64.whl
deleting 'nightly' release asset(s) (matched: 8, matched-but-keep: 4, not-matched: 12):
deleting sandbox-1.0.0.dev20170212-cp27-cp27m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp34-cp34m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp35-cp35m-manylinux1_x86_64.whl
deleting sandbox-1.0.0.dev20170212-cp36-cp36m-manylinux1_x86_64.whl
nothing to delete
resolved 'master' to '9d40177e6d3a69890de8ea359de2d02a943d2e10'
updating 'nightly' release:
target_commitish: '62fe605938ff252e4ddee05b5209299a1aa9a39e' -> '9d40177e6d3a69890de8ea359de2d02a943d2e10'
tag_name: 'nightly' -> 'nightly-tmp'
deleting reference refs/tags/nightly
updating 'nightly-tmp' release:
tag_name: 'nightly-tmp' -> 'nightly'
deleting reference refs/tags/nightly-tmp
updating 'nightly' release:
target_commitish: '62fe605938ff252e4ddee05b5209299a1aa9a39e' -> '9d40177e6d3a69890de8ea359de2d02a943d2e10'
Use case: Automatic creation of GitHub releases and prereleases
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This can be done by combining the options ``--release-packages``
and ``--prerelease-packages``.
Note also the use of ``--display-python-wheel-platform`` to automatically
get the current python platform.
For example::
$ commit_date=$(git log -1 --format="%ad" --date=local | date +%Y%m%d)
$ platform=$(ci_addons publish_github_release ORG/PROJECT --display-python-wheel-platform)
$ echo $platform
manylinux1
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*" \
--prerelease-packages dist/*.dev${commit_date}*${platform}*.whl \
--prerelease-packages-clear-pattern "*${platform}*.whl" \
--prerelease-packages-keep-pattern "*.dev${commit_date}*.whl"
The same can also be achieved across platform using the convenient mini-language for package
selection::
$ ci_addons publish_github_release ORG/PROJECT \
--release-packages "dist/*" \
--prerelease-packages "dist/*.dev<COMMIT_DATE>*<PYTHON_WHEEL_PLATFORM>*.whl" \
--prerelease-packages-clear-pattern "*<PYTHON_WHEEL_PLATFORM>*.whl" \
--prerelease-packages-keep-pattern "*.dev<COMMIT_DATE>*.whl"
Testing
"""""""
Since the add-on tests interact with GitHub API, there are not included in the
regular scikit-ci-addons collection of tests executed using pytest. Instead,
they needs to be manually executed following these steps:
(1) Generate a `personal access token <https://github.com/settings/tokens/new>`_
with at least ``public_repo`` scope enabled.
(2) Create a *test* project on GitHub with at least one commit.
(3) Check out sources of your *test* project.
(4) Create a virtual environment, download scikit-ci-addons source code, and install its requirements.
(5) Execute the test script.
For example::
export GITHUB_TOKEN=... # Change this with the token generated above in step (1)
TEST_PROJECT=jcfr/sandbox # Change this with the project name created above in step (2)
cd /tmp
git clone https://github.com/scikit-build/scikit-ci-addons
cd scikit-ci-addons/
mkvirtualenv scikit-ci-addons-test
pip install -r requirements.txt
SRC_DIR=$(pwd)
cd /tmp
git clone https://github.com/$TEST_PROJECT test-project
cd test-project
python $SRC_DIR/anyci/tests/test_publish_github_release.py $TEST_PROJECT --no-interactive
| 0.890348 | 0.504944 |
``docker.py``
^^^^^^^^^^^^^
Add-on facilitating docker use on CI services.
It allows to load an image from local cache, pull and save back using
a convenience one-liner.
Usage::
ci_addons docker load-pull-save [-h] [--cache-dir CACHE_DIR] [--verbose]
NAME[:TAG|@DIGEST]
Example::
$ ci_addons docker load-pull-save hello-world:latest
[anyci:docker.py] Loading cached image from /home/jcfr/docker/hello-world-latest.tar
[anyci:docker.py] -> cached image not found
[anyci:docker.py] Pulling image: hello-world:latest
[anyci:docker.py] -> done
[anyci:docker.py] Reading image ID from current image
[anyci:docker.py] -> image ID: sha256:c54a2cc56cbb2f04003c1cd4507e118af7c0d340fe7e2720f70976c4b75237dc
[anyci:docker.py] Caching image
[anyci:docker.py] -> image cached: /home/jcfr/docker/hello-world-latest.tar
[anyci:docker.py] Saving image ID into /home/jcfr/docker/hello-world-latest.image_id
[anyci:docker.py] -> done
.. note::
- Image is saved into the cache only if needed.
In addition to the image archive (e.g `image-name.tar`), a file containing
the image ID is also saved into the cache directory (e.g `image-name.image_id`).
This allows to quickly read back the image ID of the cached image and determine if
the current image should be saved into the cache. | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/anyci/docker_py.rst | docker_py.rst | ``docker.py``
^^^^^^^^^^^^^
Add-on facilitating docker use on CI services.
It allows to load an image from local cache, pull and save back using
a convenience one-liner.
Usage::
ci_addons docker load-pull-save [-h] [--cache-dir CACHE_DIR] [--verbose]
NAME[:TAG|@DIGEST]
Example::
$ ci_addons docker load-pull-save hello-world:latest
[anyci:docker.py] Loading cached image from /home/jcfr/docker/hello-world-latest.tar
[anyci:docker.py] -> cached image not found
[anyci:docker.py] Pulling image: hello-world:latest
[anyci:docker.py] -> done
[anyci:docker.py] Reading image ID from current image
[anyci:docker.py] -> image ID: sha256:c54a2cc56cbb2f04003c1cd4507e118af7c0d340fe7e2720f70976c4b75237dc
[anyci:docker.py] Caching image
[anyci:docker.py] -> image cached: /home/jcfr/docker/hello-world-latest.tar
[anyci:docker.py] Saving image ID into /home/jcfr/docker/hello-world-latest.image_id
[anyci:docker.py] -> done
.. note::
- Image is saved into the cache only if needed.
In addition to the image archive (e.g `image-name.tar`), a file containing
the image ID is also saved into the cache directory (e.g `image-name.image_id`).
This allows to quickly read back the image ID of the cached image and determine if
the current image should be saved into the cache. | 0.784443 | 0.337558 |
``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons circle/install_cmake.py X.Y.Z
Example::
$ ci_addons circle/install_cmake.py 3.6.2
.. note::
- The script will skip the download in two cases:
- if current version matches the selected one.
- if archive already exist in ``$HOME/downloads`` directory.
- Adding directory ``$HOME/downloads`` to the CircleCI cache can speed up
the build. For more details, see `Caching Dependencies <https://circleci.com/docs/2.0/caching/>`_. | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/circle/install_cmake_py.rst | install_cmake_py.rst | ``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons circle/install_cmake.py X.Y.Z
Example::
$ ci_addons circle/install_cmake.py 3.6.2
.. note::
- The script will skip the download in two cases:
- if current version matches the selected one.
- if archive already exist in ``$HOME/downloads`` directory.
- Adding directory ``$HOME/downloads`` to the CircleCI cache can speed up
the build. For more details, see `Caching Dependencies <https://circleci.com/docs/2.0/caching/>`_. | 0.828315 | 0.271651 |
``install_pyenv.py``
^^^^^^^^^^^^^^^^^^^^
Usage::
export PYTHON_VERSION=X.Y.Z
ci_addons travis/install_pyenv.py
.. note::
- Update the version of ``pyenv`` using ``brew``.
- Install the version of python selected setting ``PYTHON_VERSION``
environment variable.
``run-with-pyenv.sh``
^^^^^^^^^^^^^^^^^^^^^
This is a wrapper script setting the environment corresponding to the
version selected setting ``PYTHON_VERSION`` environment variable.
Usage::
export PYTHON_VERSION=X.Y.Z
ci_addons --install ../
../travis/run-with-pyenv.sh python --version
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/travis/pyenv.rst | pyenv.rst |
``install_pyenv.py``
^^^^^^^^^^^^^^^^^^^^
Usage::
export PYTHON_VERSION=X.Y.Z
ci_addons travis/install_pyenv.py
.. note::
- Update the version of ``pyenv`` using ``brew``.
- Install the version of python selected setting ``PYTHON_VERSION``
environment variable.
``run-with-pyenv.sh``
^^^^^^^^^^^^^^^^^^^^^
This is a wrapper script setting the environment corresponding to the
version selected setting ``PYTHON_VERSION`` environment variable.
Usage::
export PYTHON_VERSION=X.Y.Z
ci_addons --install ../
../travis/run-with-pyenv.sh python --version
| 0.74158 | 0.228146 |
``enable-worker-remote-access.sh``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Enable access to the Travis build worker via netcat.
Prerequisites:
- To make use of this add-on, you first need to:
1. create an account on https://dashboard.ngrok.com
2. get the associated token (e.g ``xxxxxxxxxxxxxxxxxxxx``)
Usage:
- encrypt the environment variable and associated value using the travis client::
travis-cli encrypt NGROK_TOKEN=xxxxxxxxxxxxxxxxxxxx -r org/repo
- update ``travis.yml``::
[...]
env:
global:
- secure: "xyz...abc...dev="
[...]
install:
- [...]
- wget https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/travis/enable-worker-remote-access.sh -O ../enable-worker-remote-access.sh
- chmod u+x ../enable-worker-remote-access.sh
script:
- [...]
after_success:
- ../enable-worker-remote-access.sh
after_failure:
- ../enable-worker-remote-access.sh
- next time travis build the project it will download ngrok and setup the tunnel. Output should
be similar to this one::
Executing ngrok
Executing nc
Authtoken saved to configuration file: /Users/travis/.ngrok2/ngrok.yml
INFO[06-05|07:11:10] no configuration paths supplied
INFO[06-05|07:11:10] using configuration at default config path path=/Users/travis/.ngrok2/ngrok.yml
INFO[06-05|07:11:10] open config file path=/Users/travis/.ngrok2/ngrok.yml err=nil
DBUG[06-05|07:11:10] init storage obj=controller mem size=52428800 err=nil
DBUG[06-05|07:11:10] Dialing direct, no proxy obj=tunSess
[...]
DBUG[06-05|07:11:10] decoded response obj=csess id=7d08567ce4a5 clientid=169864eb02eb6fba5f585bb6d27445cf sid=7
resp="&{ClientId:... URL:tcp://0.tcp.ngrok.io:18499 Proto:tcp Opts:map[Addr:0.tcp.ngrok.io:18499] Error: Extra:map[Token:xxxxxxxxxxxxxx]}" err=nil
where the url and port allowing to remotely connect are ``0.tcp.ngrok.io`` and ``18499``.
- connection with the worker can be established using netcat. In the example
below the command ``pwd`` and then ``ls`` are executed::
$ nc 0.tcp.ngrok.io 18499
pwd
/Users/travis/build/jcfr/ci-sandbox
ls
LICENSE
README.md
appveyor.yml
circle.yml
images
ngrok
pipe
scripts
.. note::
To easily install the travis client, you could the dockerized version
from `jcfr/docker-travis-cli <https://github.com/jcfr/docker-travis-cli>`_.
It can easily be installed using::
curl https://raw.githubusercontent.com/jcfr/docker-travis-cli/master/travis-cli.sh \
-o ~/bin/travis-cli && \
chmod +x ~/bin/travis-cli
Credits:
- Initial implementation copied from `fniephaus/travis-remote-shell <https://github.com/fniephaus/travis-remote-shell>`_
- Support for working with recent version of ``netcat`` adapted from `colesbury/travis-remote-shell <https://github.com/colesbury/travis-remote-shell>`_
and `emulating-netcat-e@stackoverflow <https://stackoverflow.com/questions/6269311/emulating-netcat-e/8161475#8161475>`_.
| scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/travis/enable-worker-remote-access_sh.rst | enable-worker-remote-access_sh.rst | ``enable-worker-remote-access.sh``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Enable access to the Travis build worker via netcat.
Prerequisites:
- To make use of this add-on, you first need to:
1. create an account on https://dashboard.ngrok.com
2. get the associated token (e.g ``xxxxxxxxxxxxxxxxxxxx``)
Usage:
- encrypt the environment variable and associated value using the travis client::
travis-cli encrypt NGROK_TOKEN=xxxxxxxxxxxxxxxxxxxx -r org/repo
- update ``travis.yml``::
[...]
env:
global:
- secure: "xyz...abc...dev="
[...]
install:
- [...]
- wget https://raw.githubusercontent.com/scikit-build/scikit-ci-addons/master/travis/enable-worker-remote-access.sh -O ../enable-worker-remote-access.sh
- chmod u+x ../enable-worker-remote-access.sh
script:
- [...]
after_success:
- ../enable-worker-remote-access.sh
after_failure:
- ../enable-worker-remote-access.sh
- next time travis build the project it will download ngrok and setup the tunnel. Output should
be similar to this one::
Executing ngrok
Executing nc
Authtoken saved to configuration file: /Users/travis/.ngrok2/ngrok.yml
INFO[06-05|07:11:10] no configuration paths supplied
INFO[06-05|07:11:10] using configuration at default config path path=/Users/travis/.ngrok2/ngrok.yml
INFO[06-05|07:11:10] open config file path=/Users/travis/.ngrok2/ngrok.yml err=nil
DBUG[06-05|07:11:10] init storage obj=controller mem size=52428800 err=nil
DBUG[06-05|07:11:10] Dialing direct, no proxy obj=tunSess
[...]
DBUG[06-05|07:11:10] decoded response obj=csess id=7d08567ce4a5 clientid=169864eb02eb6fba5f585bb6d27445cf sid=7
resp="&{ClientId:... URL:tcp://0.tcp.ngrok.io:18499 Proto:tcp Opts:map[Addr:0.tcp.ngrok.io:18499] Error: Extra:map[Token:xxxxxxxxxxxxxx]}" err=nil
where the url and port allowing to remotely connect are ``0.tcp.ngrok.io`` and ``18499``.
- connection with the worker can be established using netcat. In the example
below the command ``pwd`` and then ``ls`` are executed::
$ nc 0.tcp.ngrok.io 18499
pwd
/Users/travis/build/jcfr/ci-sandbox
ls
LICENSE
README.md
appveyor.yml
circle.yml
images
ngrok
pipe
scripts
.. note::
To easily install the travis client, you could the dockerized version
from `jcfr/docker-travis-cli <https://github.com/jcfr/docker-travis-cli>`_.
It can easily be installed using::
curl https://raw.githubusercontent.com/jcfr/docker-travis-cli/master/travis-cli.sh \
-o ~/bin/travis-cli && \
chmod +x ~/bin/travis-cli
Credits:
- Initial implementation copied from `fniephaus/travis-remote-shell <https://github.com/fniephaus/travis-remote-shell>`_
- Support for working with recent version of ``netcat`` adapted from `colesbury/travis-remote-shell <https://github.com/colesbury/travis-remote-shell>`_
and `emulating-netcat-e@stackoverflow <https://stackoverflow.com/questions/6269311/emulating-netcat-e/8161475#8161475>`_.
| 0.77081 | 0.363732 |
``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons appveyor/install_cmake.py X.Y.Z
Example::
$ ci_addons appveyor/install_cmake.py 3.6.2
.. note::
- The script automatically detects the operating system (``Linux`` or ``macOS``)
and install CMake in a valid location.
- The archives are downloaded in ``$HOME/downloads`` to allow
caching. See `Caching Dependencies and Directories <https://docs.travis-ci.com/user/caching/>`_
The script on only preforms the download if the correct CMake archive is found in ``$HOME/downloads``.
- Linux:
- Download directory is ``/home/travis/downloads``.
- To support worker with and without ``sudo`` enabled, CMake is installed
in ``HOME`` (i.e /home/travis). Since ``~/bin`` is already in the ``PATH``,
CMake executables will be available in the PATH after running this script.
- macOS:
- Download directory is ``/Users/travis/downloads``.
- Consider using this script only if the available version does **NOT**
work for you. See the `Compilers-and-Build-toolchain <https://docs.travis-ci.com/user/osx-ci-environment/#Compilers-and-Build-toolchain>`_
in Travis documentation.
- What does this script do ? First, it removes the older version of CMake
executable installed in ``/usr/local/bin``. Then, it installs the selected
version of CMake using ``sudo cmake-gui --install``. | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/docs/travis/install_cmake_py.rst | install_cmake_py.rst | ``install_cmake.py``
^^^^^^^^^^^^^^^^^^^^
Download and install in the PATH the specified version of CMake binaries.
Usage::
ci_addons appveyor/install_cmake.py X.Y.Z
Example::
$ ci_addons appveyor/install_cmake.py 3.6.2
.. note::
- The script automatically detects the operating system (``Linux`` or ``macOS``)
and install CMake in a valid location.
- The archives are downloaded in ``$HOME/downloads`` to allow
caching. See `Caching Dependencies and Directories <https://docs.travis-ci.com/user/caching/>`_
The script on only preforms the download if the correct CMake archive is found in ``$HOME/downloads``.
- Linux:
- Download directory is ``/home/travis/downloads``.
- To support worker with and without ``sudo`` enabled, CMake is installed
in ``HOME`` (i.e /home/travis). Since ``~/bin`` is already in the ``PATH``,
CMake executables will be available in the PATH after running this script.
- macOS:
- Download directory is ``/Users/travis/downloads``.
- Consider using this script only if the available version does **NOT**
work for you. See the `Compilers-and-Build-toolchain <https://docs.travis-ci.com/user/osx-ci-environment/#Compilers-and-Build-toolchain>`_
in Travis documentation.
- What does this script do ? First, it removes the older version of CMake
executable installed in ``/usr/local/bin``. Then, it installs the selected
version of CMake using ``sudo cmake-gui --install``. | 0.847084 | 0.28169 |
import argparse
import ci_addons
import os
import sys
def main():
"""The main entry point to ``ci_addons``.
This is installed as the script entry point.
"""
version_str = ("This is scikit-ci-addons version %s, imported from %s\n" %
(ci_addons.__version__, os.path.abspath(ci_addons.__file__)))
parser = argparse.ArgumentParser(description=ci_addons.__doc__)
parser.add_argument(
'addon', metavar='ADDON', type=str, nargs='?',
help='name of add-on to execute'
)
parser.add_argument(
'arguments', metavar='ARG', type=str, nargs='*',
help='add-on arguments'
)
parser.add_argument(
"--home", action="store_true",
help="display directory where all add-ons can be found"
)
parser.add_argument(
"--list", action="store_true",
help="list all available add-ons"
)
parser.add_argument(
"--path", type=str,
help="display add-on path"
)
parser.add_argument(
"--install", type=str, metavar="DIR",
help="install add-ons in the selected directory"
)
parser.add_argument(
"--version", action="version",
version=version_str,
help="display scikit-ci-addons version and import information"
)
# If an add-on is selected, let's extract its arguments now. This will
# prevent ci_addons parser from complaining about unknown parameters.
addon_arguments = []
if len(sys.argv) > 1:
try:
ci_addons.path(sys.argv[1])
addon_arguments = sys.argv[2:]
if len(addon_arguments) > 0 and addon_arguments[0] == '--':
addon_arguments.pop(0)
sys.argv = sys.argv[:2]
except ci_addons.SKAddonsError:
pass
args = parser.parse_args()
args.arguments = addon_arguments
try:
if args.home: # pragma: no cover
print(ci_addons.home())
exit()
if args.list:
previous_collection = ""
for addon in ci_addons.addons():
current_collection = addon.split(os.path.sep)[0]
if previous_collection != current_collection:
print("")
print(addon)
previous_collection = current_collection
exit()
if args.path is not None: # pragma: no cover
print(ci_addons.path(args.path))
exit()
if args.install is not None: # pragma: no cover
ci_addons.install(args.install)
exit()
if all([not getattr(args, arg)
for arg in ['addon', 'home', 'install', 'list', 'path']]):
parser.print_usage()
exit()
ci_addons.execute(args.addon, args.arguments)
except ci_addons.SKAddonsError as error:
exit(error)
if __name__ == '__main__': # pragma: no cover
main() | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/ci_addons/__main__.py | __main__.py | import argparse
import ci_addons
import os
import sys
def main():
"""The main entry point to ``ci_addons``.
This is installed as the script entry point.
"""
version_str = ("This is scikit-ci-addons version %s, imported from %s\n" %
(ci_addons.__version__, os.path.abspath(ci_addons.__file__)))
parser = argparse.ArgumentParser(description=ci_addons.__doc__)
parser.add_argument(
'addon', metavar='ADDON', type=str, nargs='?',
help='name of add-on to execute'
)
parser.add_argument(
'arguments', metavar='ARG', type=str, nargs='*',
help='add-on arguments'
)
parser.add_argument(
"--home", action="store_true",
help="display directory where all add-ons can be found"
)
parser.add_argument(
"--list", action="store_true",
help="list all available add-ons"
)
parser.add_argument(
"--path", type=str,
help="display add-on path"
)
parser.add_argument(
"--install", type=str, metavar="DIR",
help="install add-ons in the selected directory"
)
parser.add_argument(
"--version", action="version",
version=version_str,
help="display scikit-ci-addons version and import information"
)
# If an add-on is selected, let's extract its arguments now. This will
# prevent ci_addons parser from complaining about unknown parameters.
addon_arguments = []
if len(sys.argv) > 1:
try:
ci_addons.path(sys.argv[1])
addon_arguments = sys.argv[2:]
if len(addon_arguments) > 0 and addon_arguments[0] == '--':
addon_arguments.pop(0)
sys.argv = sys.argv[:2]
except ci_addons.SKAddonsError:
pass
args = parser.parse_args()
args.arguments = addon_arguments
try:
if args.home: # pragma: no cover
print(ci_addons.home())
exit()
if args.list:
previous_collection = ""
for addon in ci_addons.addons():
current_collection = addon.split(os.path.sep)[0]
if previous_collection != current_collection:
print("")
print(addon)
previous_collection = current_collection
exit()
if args.path is not None: # pragma: no cover
print(ci_addons.path(args.path))
exit()
if args.install is not None: # pragma: no cover
ci_addons.install(args.install)
exit()
if all([not getattr(args, arg)
for arg in ['addon', 'home', 'install', 'list', 'path']]):
parser.print_usage()
exit()
ci_addons.execute(args.addon, args.arguments)
except ci_addons.SKAddonsError as error:
exit(error)
if __name__ == '__main__': # pragma: no cover
main() | 0.221519 | 0.065247 |
import os
import shutil
import sys
from subprocess import CalledProcessError, check_call
from ._version import get_versions
__author__ = 'The scikit-build team'
__email__ = '[email protected]'
__version__ = get_versions()['version']
del get_versions
DIR_NAMES = ['anyci', 'appveyor', 'circle', 'travis', 'windows']
class SKAddonsError(RuntimeError):
"""Exception raised when a user error occurs.
"""
pass
def addons():
"""Return all available add-ons."""
addons = []
for dirname, dirnames, filenames in os.walk(home()):
for v in list(dirnames):
dirnames.remove(v)
dirnames += DIR_NAMES
if dirname == home():
continue
for filename in filenames:
if filename in ['__init__.py'] or filename.endswith(".pyc"):
continue
addon_path = os.path.join(dirname, filename)
addons.append(os.path.relpath(addon_path, home()))
return addons
def home():
"""Return directory where all add-ons can be found."""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def path(addon_name):
"""Return path of ``addon_name``.
Supported values for ``addon_name`` are listed below:
- relative path with or without extension (e.g ``appveyor/patch_vs2008.py``
or ``appveyor/patch_vs2008.py``)
- full path (e.g ``/path/to/appveyor/patch_vs2008.py``
- script name with or without extension (e.g ``patch_vs2008.py``
or ``patch_vs2008``). If there are multiple matching scripts, a
``SKAddonsError`` exception is raised.
"""
def _path(_addon_name):
_addon_path = os.path.join(dir_name, home(), _addon_name)
if (not os.path.exists(_addon_path)
and not _addon_path.endswith(".py")):
_addon_path += '.py'
return _addon_path if os.path.exists(_addon_path) else ""
candidates = []
for dir_name in DIR_NAMES + [""]:
addon_path = _path(os.path.join(dir_name, addon_name))
if addon_path and addon_path not in candidates:
candidates.append(addon_path)
if len(candidates) > 1:
raise SKAddonsError(
"Failed to return a single path because it found %d matching "
"paths. You must select one of these:\n %s" % (
len(candidates), "\n ".join(candidates)))
elif len(candidates) == 1:
return candidates[0]
else:
raise SKAddonsError("Could not find addon: %s" % addon_name)
def install(dst_path, force=False):
"""Copy add-ons into ``dst_path``.
By default, existing add-ons are *NOT* overwritten. Specifying ``force``
allow to overwrite them.
"""
dst_path = os.path.normpath(os.path.abspath(dst_path))
if dst_path == os.path.normpath(home()):
raise SKAddonsError(
"skipping install: target directory already contains add-ons")
for addon in addons():
dst_addon_path = os.path.join(dst_path, addon)
dst_addon_dir = os.path.split(dst_addon_path)[0]
if not os.path.exists(dst_addon_dir):
os.makedirs(dst_addon_dir)
src_addon_path = os.path.join(home(), addon)
extra = ""
do_copy = True
if os.path.exists(dst_addon_path):
extra = " (skipped)"
do_copy = False
if force:
extra = " (overwritten)"
do_copy = True
if do_copy:
shutil.copy(src_addon_path, dst_addon_path)
print(dst_addon_path + extra)
def execute(addon_name, arguments=[]):
"""Execute ``addon_name`` with ``arguments``.
Executable add-ons are python script.
"""
cmd = [sys.executable, path(addon_name)] + arguments
try:
check_call(cmd)
except CalledProcessError as error:
sys.exit(error.returncode) | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/ci_addons/__init__.py | __init__.py | import os
import shutil
import sys
from subprocess import CalledProcessError, check_call
from ._version import get_versions
__author__ = 'The scikit-build team'
__email__ = '[email protected]'
__version__ = get_versions()['version']
del get_versions
DIR_NAMES = ['anyci', 'appveyor', 'circle', 'travis', 'windows']
class SKAddonsError(RuntimeError):
"""Exception raised when a user error occurs.
"""
pass
def addons():
"""Return all available add-ons."""
addons = []
for dirname, dirnames, filenames in os.walk(home()):
for v in list(dirnames):
dirnames.remove(v)
dirnames += DIR_NAMES
if dirname == home():
continue
for filename in filenames:
if filename in ['__init__.py'] or filename.endswith(".pyc"):
continue
addon_path = os.path.join(dirname, filename)
addons.append(os.path.relpath(addon_path, home()))
return addons
def home():
"""Return directory where all add-ons can be found."""
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def path(addon_name):
"""Return path of ``addon_name``.
Supported values for ``addon_name`` are listed below:
- relative path with or without extension (e.g ``appveyor/patch_vs2008.py``
or ``appveyor/patch_vs2008.py``)
- full path (e.g ``/path/to/appveyor/patch_vs2008.py``
- script name with or without extension (e.g ``patch_vs2008.py``
or ``patch_vs2008``). If there are multiple matching scripts, a
``SKAddonsError`` exception is raised.
"""
def _path(_addon_name):
_addon_path = os.path.join(dir_name, home(), _addon_name)
if (not os.path.exists(_addon_path)
and not _addon_path.endswith(".py")):
_addon_path += '.py'
return _addon_path if os.path.exists(_addon_path) else ""
candidates = []
for dir_name in DIR_NAMES + [""]:
addon_path = _path(os.path.join(dir_name, addon_name))
if addon_path and addon_path not in candidates:
candidates.append(addon_path)
if len(candidates) > 1:
raise SKAddonsError(
"Failed to return a single path because it found %d matching "
"paths. You must select one of these:\n %s" % (
len(candidates), "\n ".join(candidates)))
elif len(candidates) == 1:
return candidates[0]
else:
raise SKAddonsError("Could not find addon: %s" % addon_name)
def install(dst_path, force=False):
"""Copy add-ons into ``dst_path``.
By default, existing add-ons are *NOT* overwritten. Specifying ``force``
allow to overwrite them.
"""
dst_path = os.path.normpath(os.path.abspath(dst_path))
if dst_path == os.path.normpath(home()):
raise SKAddonsError(
"skipping install: target directory already contains add-ons")
for addon in addons():
dst_addon_path = os.path.join(dst_path, addon)
dst_addon_dir = os.path.split(dst_addon_path)[0]
if not os.path.exists(dst_addon_dir):
os.makedirs(dst_addon_dir)
src_addon_path = os.path.join(home(), addon)
extra = ""
do_copy = True
if os.path.exists(dst_addon_path):
extra = " (skipped)"
do_copy = False
if force:
extra = " (overwritten)"
do_copy = True
if do_copy:
shutil.copy(src_addon_path, dst_addon_path)
print(dst_addon_path + extra)
def execute(addon_name, arguments=[]):
"""Execute ``addon_name`` with ``arguments``.
Executable add-ons are python script.
"""
cmd = [sys.executable, path(addon_name)] + arguments
try:
check_call(cmd)
except CalledProcessError as error:
sys.exit(error.returncode) | 0.411229 | 0.102305 |
import errno
import os
import shutil
import sys
import zipfile
from subprocess import CalledProcessError, check_output
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
DEFAULT_CMAKE_VERSION = "3.5.2"
def _log(*args):
script_name = os.path.basename(__file__)
print("[appveyor:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def install(cmake_version=DEFAULT_CMAKE_VERSION):
"""Download and install CMake into ``C:\\cmake``.
The function also make sure to prepend ``C:\\cmake\\bin``
to the ``PATH``."""
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
cmake_directory = "C:\\cmake-{}".format(cmake_version)
cmake_package = "cmake-{}-win32-x86.zip".format(cmake_version)
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
_log("Downloading", cmake_package)
if not os.path.exists(cmake_directory):
remote_file = urlopen(
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package))
with open("C:\\%s" % cmake_package, "wb") as local_file:
shutil.copyfileobj(remote_file, local_file)
_log(" ->", "done")
_log("Unpacking", cmake_package, "into", cmake_directory)
with zipfile.ZipFile("C:\\%s" % cmake_package) as local_zip:
local_zip.extractall(cmake_directory)
_log(" ->", "done")
cmake_system_install_dir = "C:\\Program Files (x86)\\CMake"
_log("Removing", cmake_system_install_dir)
shutil.rmtree(cmake_system_install_dir)
_log(" ->", "done")
# C:\\cmake-3.6.2\\cmake-3.6.2-win32-x86
cmake_package_no_ext = os.path.splitext(cmake_package)[0]
inner_directory = cmake_directory + "\\" + cmake_package_no_ext
_log("Moving", inner_directory, "to", cmake_system_install_dir)
shutil.move(inner_directory, cmake_system_install_dir)
shutil.rmtree(cmake_directory)
_log(" ->", "done")
# C:\\Program Files (x86)\\CMake\\bin\\cmake.exe
cmake_exe = "%s\\bin\\cmake.exe" % cmake_system_install_dir
_log("Checking if", cmake_exe, "exists")
if os.path.exists(cmake_exe):
_log(" ->", "found")
else:
# FileNotFoundError exception available only in python 3
raise OSError(errno.ENOENT, "File not found", cmake_exe)
else:
_log(" ->", "skipping download: directory %s exists" % cmake_package)
_log("Looking for cmake %s in PATH" % cmake_version)
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
_log(" ->", "found %s" % current_cmake_version)
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION) | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/appveyor/install_cmake.py | install_cmake.py | import errno
import os
import shutil
import sys
import zipfile
from subprocess import CalledProcessError, check_output
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
DEFAULT_CMAKE_VERSION = "3.5.2"
def _log(*args):
script_name = os.path.basename(__file__)
print("[appveyor:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def install(cmake_version=DEFAULT_CMAKE_VERSION):
"""Download and install CMake into ``C:\\cmake``.
The function also make sure to prepend ``C:\\cmake\\bin``
to the ``PATH``."""
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
cmake_directory = "C:\\cmake-{}".format(cmake_version)
cmake_package = "cmake-{}-win32-x86.zip".format(cmake_version)
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
_log("Downloading", cmake_package)
if not os.path.exists(cmake_directory):
remote_file = urlopen(
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package))
with open("C:\\%s" % cmake_package, "wb") as local_file:
shutil.copyfileobj(remote_file, local_file)
_log(" ->", "done")
_log("Unpacking", cmake_package, "into", cmake_directory)
with zipfile.ZipFile("C:\\%s" % cmake_package) as local_zip:
local_zip.extractall(cmake_directory)
_log(" ->", "done")
cmake_system_install_dir = "C:\\Program Files (x86)\\CMake"
_log("Removing", cmake_system_install_dir)
shutil.rmtree(cmake_system_install_dir)
_log(" ->", "done")
# C:\\cmake-3.6.2\\cmake-3.6.2-win32-x86
cmake_package_no_ext = os.path.splitext(cmake_package)[0]
inner_directory = cmake_directory + "\\" + cmake_package_no_ext
_log("Moving", inner_directory, "to", cmake_system_install_dir)
shutil.move(inner_directory, cmake_system_install_dir)
shutil.rmtree(cmake_directory)
_log(" ->", "done")
# C:\\Program Files (x86)\\CMake\\bin\\cmake.exe
cmake_exe = "%s\\bin\\cmake.exe" % cmake_system_install_dir
_log("Checking if", cmake_exe, "exists")
if os.path.exists(cmake_exe):
_log(" ->", "found")
else:
# FileNotFoundError exception available only in python 3
raise OSError(errno.ENOENT, "File not found", cmake_exe)
else:
_log(" ->", "skipping download: directory %s exists" % cmake_package)
_log("Looking for cmake %s in PATH" % cmake_version)
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
_log(" ->", "found %s" % current_cmake_version)
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION) | 0.293303 | 0.061876 |
import argparse
import os
import re
import subprocess
import sys
def _log(*args):
script_name = os.path.basename(__file__)
print("[anyci:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; slashes and colons are converted to
dashes; and anything that is not a unicode alphanumeric, dash, underscore,
or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
>>> get_valid_filename("library/hello-world:latest")
'library-hello-world-latest'
Copied from https://github.com/django/django/blob/20be1918e77414837178d6bf1657068c8306d50c/django/utils/encoding.py
Distributed under BSD-3 License
""" # noqa: E501
s = s.strip().replace(' ', '_').replace('/', '-').replace(':', '-')
return re.sub(r'(?u)[^-\w.]', '', s)
def main():
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(help='sub-command help')
# create the parser for the "load-pull-save" command
parser_pull = subparsers.add_parser("load-pull-save",
help="load-pull-save help")
parser_pull.add_argument(
"image", type=str, metavar="NAME[:TAG|@DIGEST]",
help="Load an image from local cache, pull and save back"
)
parser_pull.add_argument(
"--cache-dir", type=str, metavar="CACHE_DIR", default="~/docker",
help="Image cache directory (default: ~/docker)"
)
parser_pull.add_argument(
"--verbose", action="store_true",
help="Display pulling progress"
)
args = parser.parse_args()
if hasattr(args, 'image'):
# If needed, create cache directory
cache_dir = os.path.expanduser(args.cache_dir)
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
# Convert image to valid filename
filename = os.path.join(cache_dir, get_valid_filename(args.image))
image_filename = filename + '.tar'
image_id_filename = filename + '.image_id'
# If it exists, load cache image
cached_image_id = ""
_log("Loading cached image", "from", image_filename)
if os.path.exists(image_filename):
cmd = ["docker", "load", "-i", image_filename]
subprocess.check_output(cmd)
_log(" ->", "done")
# Read image id
if os.path.exists(image_id_filename):
_log("Reading cached image ID", "from", image_id_filename)
with open(image_id_filename) as content:
cached_image_id = content.readline()
_log(" ->", "cached image ID:", cached_image_id)
else:
_log(" ->", "cached image not found")
# Pull latest image if any
_log("Pulling image:", args.image)
cmd = ["docker", "pull", args.image]
(subprocess.check_call
if args.verbose else subprocess.check_output)(cmd)
_log(" ->", "done")
# Get ID of current image
_log("Reading image ID from current image")
cmd = ["docker", "inspect", "--format='{{.Config.Image}}'", args.image]
output = subprocess.check_output(cmd).decode("utf-8")
current_image_id = output.strip()
_log(" ->", "image ID:", current_image_id)
# Cache image only if updated
if cached_image_id != current_image_id:
_log("Caching image")
cmd = ["docker", "save", "-o", image_filename, args.image]
subprocess.check_output(cmd)
_log(" ->", "image cached:", image_filename)
_log("Saving image ID into", image_id_filename)
with open(image_id_filename, "w") as content:
content.write(current_image_id)
_log(" ->", "done")
else:
_log("Caching image")
_log(" ->", "Skipped because pulled image did not change")
else:
parser.print_usage()
if __name__ == '__main__':
main() | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/anyci/docker.py | docker.py |
import argparse
import os
import re
import subprocess
import sys
def _log(*args):
script_name = os.path.basename(__file__)
print("[anyci:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; slashes and colons are converted to
dashes; and anything that is not a unicode alphanumeric, dash, underscore,
or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
>>> get_valid_filename("library/hello-world:latest")
'library-hello-world-latest'
Copied from https://github.com/django/django/blob/20be1918e77414837178d6bf1657068c8306d50c/django/utils/encoding.py
Distributed under BSD-3 License
""" # noqa: E501
s = s.strip().replace(' ', '_').replace('/', '-').replace(':', '-')
return re.sub(r'(?u)[^-\w.]', '', s)
def main():
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(help='sub-command help')
# create the parser for the "load-pull-save" command
parser_pull = subparsers.add_parser("load-pull-save",
help="load-pull-save help")
parser_pull.add_argument(
"image", type=str, metavar="NAME[:TAG|@DIGEST]",
help="Load an image from local cache, pull and save back"
)
parser_pull.add_argument(
"--cache-dir", type=str, metavar="CACHE_DIR", default="~/docker",
help="Image cache directory (default: ~/docker)"
)
parser_pull.add_argument(
"--verbose", action="store_true",
help="Display pulling progress"
)
args = parser.parse_args()
if hasattr(args, 'image'):
# If needed, create cache directory
cache_dir = os.path.expanduser(args.cache_dir)
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
# Convert image to valid filename
filename = os.path.join(cache_dir, get_valid_filename(args.image))
image_filename = filename + '.tar'
image_id_filename = filename + '.image_id'
# If it exists, load cache image
cached_image_id = ""
_log("Loading cached image", "from", image_filename)
if os.path.exists(image_filename):
cmd = ["docker", "load", "-i", image_filename]
subprocess.check_output(cmd)
_log(" ->", "done")
# Read image id
if os.path.exists(image_id_filename):
_log("Reading cached image ID", "from", image_id_filename)
with open(image_id_filename) as content:
cached_image_id = content.readline()
_log(" ->", "cached image ID:", cached_image_id)
else:
_log(" ->", "cached image not found")
# Pull latest image if any
_log("Pulling image:", args.image)
cmd = ["docker", "pull", args.image]
(subprocess.check_call
if args.verbose else subprocess.check_output)(cmd)
_log(" ->", "done")
# Get ID of current image
_log("Reading image ID from current image")
cmd = ["docker", "inspect", "--format='{{.Config.Image}}'", args.image]
output = subprocess.check_output(cmd).decode("utf-8")
current_image_id = output.strip()
_log(" ->", "image ID:", current_image_id)
# Cache image only if updated
if cached_image_id != current_image_id:
_log("Caching image")
cmd = ["docker", "save", "-o", image_filename, args.image]
subprocess.check_output(cmd)
_log(" ->", "image cached:", image_filename)
_log("Saving image ID into", image_id_filename)
with open(image_id_filename, "w") as content:
content.write(current_image_id)
_log(" ->", "done")
else:
_log("Caching image")
_log(" ->", "Skipped because pulled image did not change")
else:
parser.print_usage()
if __name__ == '__main__':
main() | 0.517327 | 0.120879 |
import os
import subprocess
import sys
import textwrap
from subprocess import CalledProcessError, check_output
DEFAULT_CMAKE_VERSION = "3.5.0"
def _log(*args):
script_name = os.path.basename(__file__)
print("[circle:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def _check_executables_availability(executables):
"""Try to run each executable with the `--version` argument. If at least
one could not be executed, it raises :exception:`RuntimeError` suggesting
approaches to mitigate the problem.
"""
missing_executables = []
for executable_name in executables:
try:
subprocess.check_output([executable_name, "--version"])
except (OSError, CalledProcessError):
missing_executables.append(executable_name)
if missing_executables:
raise RuntimeError(textwrap.dedent(
"""
The following executables are required to install CMake:
{missing_executables}
Few options to address this:
(1) install the missing executables using the system package manager. For example:
sudo apt-get install {missing_executables}
(2) install CMake wheel using pip. For example:
pip install cmake
""".format(
missing_executables=" ".join(missing_executables),
)
))
def install(cmake_version=DEFAULT_CMAKE_VERSION):
"""Download and install CMake into ``/usr/local``."""
_check_executables_availability(["rsync", "tar", "wget"])
cmake_directory = "/usr/local"
cmake_exe = os.path.join(cmake_directory, 'bin/cmake')
if os.path.exists(cmake_exe):
output = check_output([cmake_exe, '--version']).decode("utf-8")
if output.strip() == cmake_version:
_log("Skipping download: Found %s (v%s)" % (
cmake_exe, cmake_version))
return
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
["cmake", "--version"]).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
cmake_arch = "x86_64"
name = "cmake-{}-Linux-{}".format(cmake_version, cmake_arch)
cmake_package = "{}.tar.gz".format(name)
_log("Downloading", cmake_package)
download_dir = os.environ["HOME"] + "/downloads"
downloaded_package = os.path.join(download_dir, cmake_package)
if not os.path.exists(downloaded_package):
if not os.path.exists(download_dir):
os.makedirs(download_dir)
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
try:
check_output([
"wget", "--no-check-certificate", "--progress=dot",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-O", downloaded_package
], stderr=subprocess.STDOUT)
except (OSError, CalledProcessError):
_check_executables_availability(['curl'])
check_output([
"curl", "--progress-bar", "-L",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-o", downloaded_package
], stderr=subprocess.STDOUT)
_log(" ->", "done")
else:
_log(" ->", "skipping download: found", downloaded_package)
_log("Extracting", downloaded_package)
check_output(["tar", "xzf", downloaded_package])
_log(" ->", "done")
_log("Installing", name, "into", cmake_directory)
check_output([
"sudo", "rsync", "-avz", name + "/", cmake_directory
])
_log(" ->", "done")
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION) | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/circle/install_cmake.py | install_cmake.py | import os
import subprocess
import sys
import textwrap
from subprocess import CalledProcessError, check_output
DEFAULT_CMAKE_VERSION = "3.5.0"
def _log(*args):
script_name = os.path.basename(__file__)
print("[circle:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def _check_executables_availability(executables):
"""Try to run each executable with the `--version` argument. If at least
one could not be executed, it raises :exception:`RuntimeError` suggesting
approaches to mitigate the problem.
"""
missing_executables = []
for executable_name in executables:
try:
subprocess.check_output([executable_name, "--version"])
except (OSError, CalledProcessError):
missing_executables.append(executable_name)
if missing_executables:
raise RuntimeError(textwrap.dedent(
"""
The following executables are required to install CMake:
{missing_executables}
Few options to address this:
(1) install the missing executables using the system package manager. For example:
sudo apt-get install {missing_executables}
(2) install CMake wheel using pip. For example:
pip install cmake
""".format(
missing_executables=" ".join(missing_executables),
)
))
def install(cmake_version=DEFAULT_CMAKE_VERSION):
"""Download and install CMake into ``/usr/local``."""
_check_executables_availability(["rsync", "tar", "wget"])
cmake_directory = "/usr/local"
cmake_exe = os.path.join(cmake_directory, 'bin/cmake')
if os.path.exists(cmake_exe):
output = check_output([cmake_exe, '--version']).decode("utf-8")
if output.strip() == cmake_version:
_log("Skipping download: Found %s (v%s)" % (
cmake_exe, cmake_version))
return
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
["cmake", "--version"]).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
cmake_arch = "x86_64"
name = "cmake-{}-Linux-{}".format(cmake_version, cmake_arch)
cmake_package = "{}.tar.gz".format(name)
_log("Downloading", cmake_package)
download_dir = os.environ["HOME"] + "/downloads"
downloaded_package = os.path.join(download_dir, cmake_package)
if not os.path.exists(downloaded_package):
if not os.path.exists(download_dir):
os.makedirs(download_dir)
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
try:
check_output([
"wget", "--no-check-certificate", "--progress=dot",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-O", downloaded_package
], stderr=subprocess.STDOUT)
except (OSError, CalledProcessError):
_check_executables_availability(['curl'])
check_output([
"curl", "--progress-bar", "-L",
"https://cmake.org/files/v{}.{}/{}".format(cmake_version_major, cmake_version_minor, cmake_package),
"-o", downloaded_package
], stderr=subprocess.STDOUT)
_log(" ->", "done")
else:
_log(" ->", "skipping download: found", downloaded_package)
_log("Extracting", downloaded_package)
check_output(["tar", "xzf", downloaded_package])
_log(" ->", "done")
_log("Installing", name, "into", cmake_directory)
check_output([
"sudo", "rsync", "-avz", name + "/", cmake_directory
])
_log(" ->", "done")
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION) | 0.400867 | 0.1178 |
import os
import platform
import subprocess
import sys
from subprocess import CalledProcessError, check_output
DEFAULT_CMAKE_VERSION = "3.5.0"
def _log(*args):
script_name = os.path.basename(__file__)
print("[travis:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def install(cmake_version=DEFAULT_CMAKE_VERSION, is_darwin=False):
"""Download and install CMake into ``/usr/local``."""
cmake_os = "Darwin" if is_darwin else "Linux"
cmake_name = "cmake-{}-{}-x86_64".format(cmake_version, cmake_os)
cmake_package = ".".join((cmake_name, "tar", "gz"))
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
download_dir = os.environ["HOME"] + "/downloads"
downloaded_package = os.path.join(download_dir, cmake_package)
if not os.path.exists(downloaded_package):
_log("Making directory: ", download_dir)
try:
os.mkdir(download_dir)
except OSError:
pass
_log(" ->", "done")
_log("Downloading", cmake_package)
try:
check_output([
"wget", "--no-check-certificate", "--progress=dot",
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package),
"-P", download_dir
], stderr=subprocess.STDOUT)
except (OSError, CalledProcessError):
check_output([
"curl", "--progress-bar", "-L",
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package),
"-o", downloaded_package
], stderr=subprocess.STDOUT)
_log(" ->", "done")
else:
_log("Downloading", cmake_package)
_log(" ->", "skipping download: Found ", downloaded_package)
_log("Extracting", downloaded_package, "into", download_dir)
check_output(["tar", "xzf", downloaded_package, '-C', download_dir])
_log(" ->", "done")
if is_darwin:
prefix = "/usr/local/bin"
_log("Removing CMake executables from", prefix)
check_output(
["sudo", "rm", "-f"] + [
"/".join((prefix, subdir)) for subdir in
("cmake", "cpack", "cmake-gui", "ccmake", "ctest")
]
)
_log(" ->", "done")
_log("Installing CMake in", prefix)
check_output([
"sudo",
download_dir + "/" + cmake_name
+ "/CMake.app/Contents/bin/cmake-gui",
"--install"
])
_log(" ->", "done")
else:
home = os.environ["HOME"]
assert os.path.exists(home)
_log("Copying", download_dir + "/" + cmake_name, "to", home)
check_output([
"rsync", "-avz",
download_dir + "/" + cmake_name + "/", home])
_log(" ->", "done")
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION,
is_darwin=platform.system().lower() == "darwin") | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/travis/install_cmake.py | install_cmake.py | import os
import platform
import subprocess
import sys
from subprocess import CalledProcessError, check_output
DEFAULT_CMAKE_VERSION = "3.5.0"
def _log(*args):
script_name = os.path.basename(__file__)
print("[travis:%s] " % script_name + " ".join(args))
sys.stdout.flush()
def install(cmake_version=DEFAULT_CMAKE_VERSION, is_darwin=False):
"""Download and install CMake into ``/usr/local``."""
cmake_os = "Darwin" if is_darwin else "Linux"
cmake_name = "cmake-{}-{}-x86_64".format(cmake_version, cmake_os)
cmake_package = ".".join((cmake_name, "tar", "gz"))
cmake_version_major = cmake_version.split(".")[0]
cmake_version_minor = cmake_version.split(".")[1]
_log("Looking for cmake", cmake_version, "in PATH")
try:
output = check_output(
"cmake --version", shell=True).decode("utf-8")
current_cmake_version = output.splitlines()[0]
if cmake_version in current_cmake_version:
_log(" ->", "found %s:" % current_cmake_version,
"skipping download: version matches expected one")
return
else:
_log(" ->", "found %s:" % current_cmake_version,
"not the expected version")
except (OSError, CalledProcessError):
_log(" ->", "not found")
pass
download_dir = os.environ["HOME"] + "/downloads"
downloaded_package = os.path.join(download_dir, cmake_package)
if not os.path.exists(downloaded_package):
_log("Making directory: ", download_dir)
try:
os.mkdir(download_dir)
except OSError:
pass
_log(" ->", "done")
_log("Downloading", cmake_package)
try:
check_output([
"wget", "--no-check-certificate", "--progress=dot",
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package),
"-P", download_dir
], stderr=subprocess.STDOUT)
except (OSError, CalledProcessError):
check_output([
"curl", "--progress-bar", "-L",
"https://cmake.org/files/v{}.{}/{}".format(
cmake_version_major, cmake_version_minor, cmake_package),
"-o", downloaded_package
], stderr=subprocess.STDOUT)
_log(" ->", "done")
else:
_log("Downloading", cmake_package)
_log(" ->", "skipping download: Found ", downloaded_package)
_log("Extracting", downloaded_package, "into", download_dir)
check_output(["tar", "xzf", downloaded_package, '-C', download_dir])
_log(" ->", "done")
if is_darwin:
prefix = "/usr/local/bin"
_log("Removing CMake executables from", prefix)
check_output(
["sudo", "rm", "-f"] + [
"/".join((prefix, subdir)) for subdir in
("cmake", "cpack", "cmake-gui", "ccmake", "ctest")
]
)
_log(" ->", "done")
_log("Installing CMake in", prefix)
check_output([
"sudo",
download_dir + "/" + cmake_name
+ "/CMake.app/Contents/bin/cmake-gui",
"--install"
])
_log(" ->", "done")
else:
home = os.environ["HOME"]
assert os.path.exists(home)
_log("Copying", download_dir + "/" + cmake_name, "to", home)
check_output([
"rsync", "-avz",
download_dir + "/" + cmake_name + "/", home])
_log(" ->", "done")
if __name__ == '__main__':
install(sys.argv[1] if len(sys.argv) > 1 else DEFAULT_CMAKE_VERSION,
is_darwin=platform.system().lower() == "darwin") | 0.286668 | 0.069573 |
import os
import sys
import tempfile
import textwrap
from subprocess import check_output
def _log_prefix():
script_name = os.path.basename(__file__)
return "[travis:%s] " % script_name
def _log(*args):
print(_log_prefix() + " ".join(args))
sys.stdout.flush()
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Copied from textwrap.py available in python 3 (cpython/cpython@a2d2bef)
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
def _execute_script(script):
def _write(output_stream, txt):
output_stream.write(bytearray("%s\n" % txt, "utf-8"))
with tempfile.NamedTemporaryFile(delete=True) as script_file:
_write(script_file, script)
script_file.file.flush()
# _log("Executing:", "bash", script_file.name)
return check_output(
["bash", script_file.name]).decode("utf-8").strip()
def is_pyenv_installed(py_version):
"""Return True if ``py_version`` pyenv is installed.
"""
script = textwrap.dedent(
r"""
#eval "$( pyenv init - )"
(pyenv versions \
| sed -Ee "s/\(.+\)//" \
| tr -d "* " \
| grep "^{py_version}$") \
|| echo ""
""".format(py_version=py_version)
)
return _execute_script(script) == py_version
def pyenv_executable_path(py_version, executable="python"):
return os.path.expanduser(
"~/.pyenv/versions/%s/bin/%s" % (py_version, executable))
def pyenv_executable_exists(py_version, executable="python"):
return os.path.exists(pyenv_executable_path(py_version, executable))
def install(py_version):
"""Update and install ``pyenv``."""
_log("Looking for", pyenv_executable_path(py_version))
python_found = pyenv_executable_exists(py_version)
if python_found:
_log(" ->", "found")
return
else:
_log(" ->", "not found")
cmd = "brew update"
_log("Executing:", cmd)
check_output(cmd, shell=True)
_log(" -> done")
cmd = "brew outdated pyenv || brew upgrade pyenv"
_log("Executing:", cmd)
check_output(cmd, shell=True)
_log(" -> done")
_log("Looking for pyenv", py_version)
if is_pyenv_installed(py_version) and pyenv_executable_exists(py_version):
_log(" ->", "found")
return
else:
_log(" ->", "not found")
_log("Installing pyenv", py_version)
cmd = textwrap.dedent(
"""
eval "$( pyenv init - )"
pyenv install {py_version}
""".format(py_version=py_version)
).strip()
_log("Executing:")
for line in indent(cmd, " " * 11).splitlines():
_log(line)
check_output(cmd, shell=True)
_log(" -> done")
_log("Looking for pyenv", py_version)
if not is_pyenv_installed(py_version):
exit(_log_prefix() +
" -> ERROR: Failed to install pyenv %s" % py_version)
_log(" ->", "found")
if __name__ == '__main__':
install(os.environ['PYTHON_VERSION']) | scikit-ci-addons | /scikit-ci-addons-0.25.0.tar.gz/scikit-ci-addons-0.25.0/travis/install_pyenv.py | install_pyenv.py | import os
import sys
import tempfile
import textwrap
from subprocess import check_output
def _log_prefix():
script_name = os.path.basename(__file__)
return "[travis:%s] " % script_name
def _log(*args):
print(_log_prefix() + " ".join(args))
sys.stdout.flush()
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Copied from textwrap.py available in python 3 (cpython/cpython@a2d2bef)
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
def _execute_script(script):
def _write(output_stream, txt):
output_stream.write(bytearray("%s\n" % txt, "utf-8"))
with tempfile.NamedTemporaryFile(delete=True) as script_file:
_write(script_file, script)
script_file.file.flush()
# _log("Executing:", "bash", script_file.name)
return check_output(
["bash", script_file.name]).decode("utf-8").strip()
def is_pyenv_installed(py_version):
"""Return True if ``py_version`` pyenv is installed.
"""
script = textwrap.dedent(
r"""
#eval "$( pyenv init - )"
(pyenv versions \
| sed -Ee "s/\(.+\)//" \
| tr -d "* " \
| grep "^{py_version}$") \
|| echo ""
""".format(py_version=py_version)
)
return _execute_script(script) == py_version
def pyenv_executable_path(py_version, executable="python"):
return os.path.expanduser(
"~/.pyenv/versions/%s/bin/%s" % (py_version, executable))
def pyenv_executable_exists(py_version, executable="python"):
return os.path.exists(pyenv_executable_path(py_version, executable))
def install(py_version):
"""Update and install ``pyenv``."""
_log("Looking for", pyenv_executable_path(py_version))
python_found = pyenv_executable_exists(py_version)
if python_found:
_log(" ->", "found")
return
else:
_log(" ->", "not found")
cmd = "brew update"
_log("Executing:", cmd)
check_output(cmd, shell=True)
_log(" -> done")
cmd = "brew outdated pyenv || brew upgrade pyenv"
_log("Executing:", cmd)
check_output(cmd, shell=True)
_log(" -> done")
_log("Looking for pyenv", py_version)
if is_pyenv_installed(py_version) and pyenv_executable_exists(py_version):
_log(" ->", "found")
return
else:
_log(" ->", "not found")
_log("Installing pyenv", py_version)
cmd = textwrap.dedent(
"""
eval "$( pyenv init - )"
pyenv install {py_version}
""".format(py_version=py_version)
).strip()
_log("Executing:")
for line in indent(cmd, " " * 11).splitlines():
_log(line)
check_output(cmd, shell=True)
_log(" -> done")
_log("Looking for pyenv", py_version)
if not is_pyenv_installed(py_version):
exit(_log_prefix() +
" -> ERROR: Failed to install pyenv %s" % py_version)
_log(" ->", "found")
if __name__ == '__main__':
install(os.environ['PYTHON_VERSION']) | 0.368406 | 0.105441 |
.. :changelog:
History
-------
scikit-ci was initially developed in May 2016 by Omar Padron to facilitate the
continuous integration of the scikit-build project.
At that time, it already consisted of a driver script calling methods specific
to each continuous integration service. By having each CI service calling the
same driver script, there was no need to deal with implementing install/test/build
steps over and over in different scripting languages (power shell, shell or
windows batch). Instead all code was implemented in python code leveraging the
subprocess module.
Later in early September 2016, with the desire to setup cross-platform continuous
integration for other project and avoid duplication or maintenance hell, a
dedicated repository was created by Jean-Christophe Fillion-Robin. By simply
cloning the repository, it was possible to more easily enable CI for other projects.
While this was an improvement, all the steps were still hardcoded in the driver
scripts, the project was not easily customizable. More could be done to improve
the user experience.
Finally, in late September 2016, all hardcoded code was moved into standalone
executable python scripts. Then, Jean-Christophe came up with the concept of
scikit-ci.yml configuration file. This configuration file allows to describe the
commands and environment for each step (install, test and build) specific to a
project and associated continuous integration services.
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/HISTORY.rst | HISTORY.rst | .. :changelog:
History
-------
scikit-ci was initially developed in May 2016 by Omar Padron to facilitate the
continuous integration of the scikit-build project.
At that time, it already consisted of a driver script calling methods specific
to each continuous integration service. By having each CI service calling the
same driver script, there was no need to deal with implementing install/test/build
steps over and over in different scripting languages (power shell, shell or
windows batch). Instead all code was implemented in python code leveraging the
subprocess module.
Later in early September 2016, with the desire to setup cross-platform continuous
integration for other project and avoid duplication or maintenance hell, a
dedicated repository was created by Jean-Christophe Fillion-Robin. By simply
cloning the repository, it was possible to more easily enable CI for other projects.
While this was an improvement, all the steps were still hardcoded in the driver
scripts, the project was not easily customizable. More could be done to improve
the user experience.
Finally, in late September 2016, all hardcoded code was moved into standalone
executable python scripts. Then, Jean-Christophe came up with the concept of
scikit-ci.yml configuration file. This configuration file allows to describe the
commands and environment for each step (install, test and build) specific to a
project and associated continuous integration services.
| 0.705075 | 0.397295 |
=========
scikit-ci
=========
scikit-ci enables a centralized and simpler CI configuration for Python
extensions.
By having ``appveyor.yml``, ``azure-pipelines.yml``, ``circle.yml`` and ``.travis.yml`` calling
the same scikit-ci command-line executable, all the CI steps for all
service can be fully described in one ``scikit-ci.yml`` configuration file.
Latest Release
--------------
.. table::
+--------------------------------------------------------------------------+----------------------------------------------------------------------------+
| Versions | Downloads |
+==========================================================================+============================================================================+
| .. image:: https://img.shields.io/pypi/v/scikit-ci.svg?maxAge=2592000 | .. image:: https://img.shields.io/badge/downloads-72k%20total-green.svg |
| :target: https://pypi.python.org/pypi/scikit-ci | :target: https://pypi.python.org/pypi/scikit-ci |
+--------------------------------------------------------------------------+----------------------------------------------------------------------------+
Build Status
------------
.. table::
+---------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
| | Linux | macOS | Windows |
+===============+======================================================================================+======================================================================================+============================================================================================+
| PyPI | .. image:: https://circleci.com/gh/scikit-build/scikit-ci.svg?style=shield | .. image:: https://img.shields.io/travis/scikit-build/scikit-ci.svg?maxAge=2592000 | .. image:: https://ci.appveyor.com/api/projects/status/5to6lvgaqcrck675?svg=true |
| | :target: https://circleci.com/gh/scikit-build/scikit-ci | :target: https://travis-ci.org/scikit-build/scikit-ci | :target: https://ci.appveyor.com/project/scikit-build/scikit-ci/branch/master |
+---------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
Overall Health
--------------
.. image:: https://readthedocs.org/projects/scikit-ci/badge/?version=latest
:target: http://scikit-ci.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/scikit-build/scikit-ci/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-build/scikit-ci
Miscellaneous
-------------
* Free software: Apache Software license
* Documentation: http://scikit-ci.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/README.rst | README.rst | =========
scikit-ci
=========
scikit-ci enables a centralized and simpler CI configuration for Python
extensions.
By having ``appveyor.yml``, ``azure-pipelines.yml``, ``circle.yml`` and ``.travis.yml`` calling
the same scikit-ci command-line executable, all the CI steps for all
service can be fully described in one ``scikit-ci.yml`` configuration file.
Latest Release
--------------
.. table::
+--------------------------------------------------------------------------+----------------------------------------------------------------------------+
| Versions | Downloads |
+==========================================================================+============================================================================+
| .. image:: https://img.shields.io/pypi/v/scikit-ci.svg?maxAge=2592000 | .. image:: https://img.shields.io/badge/downloads-72k%20total-green.svg |
| :target: https://pypi.python.org/pypi/scikit-ci | :target: https://pypi.python.org/pypi/scikit-ci |
+--------------------------------------------------------------------------+----------------------------------------------------------------------------+
Build Status
------------
.. table::
+---------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
| | Linux | macOS | Windows |
+===============+======================================================================================+======================================================================================+============================================================================================+
| PyPI | .. image:: https://circleci.com/gh/scikit-build/scikit-ci.svg?style=shield | .. image:: https://img.shields.io/travis/scikit-build/scikit-ci.svg?maxAge=2592000 | .. image:: https://ci.appveyor.com/api/projects/status/5to6lvgaqcrck675?svg=true |
| | :target: https://circleci.com/gh/scikit-build/scikit-ci | :target: https://travis-ci.org/scikit-build/scikit-ci | :target: https://ci.appveyor.com/project/scikit-build/scikit-ci/branch/master |
+---------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+
Overall Health
--------------
.. image:: https://readthedocs.org/projects/scikit-ci/badge/?version=latest
:target: http://scikit-ci.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://codecov.io/gh/scikit-build/scikit-ci/branch/master/graph/badge.svg
:target: https://codecov.io/gh/scikit-build/scikit-ci
Miscellaneous
-------------
* Free software: Apache Software license
* Documentation: http://scikit-ci.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build
| 0.834069 | 0.423458 |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Types of Contributions
----------------------
You can contribute in many ways:
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/scikit-build/scikit-ci/issues.
If you are reporting a bug, please include:
* Any details about your CI setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
The scikit-ci project could always use more documentation. We welcome help
with the official scikit-ci docs, in docstrings, or even on blog posts and
articles for the web.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at
https://github.com/scikit-build/scikit-ci/issues.
If you are proposing a new feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started
-----------
Ready to contribute? Here's how to set up `scikit-ci` for local development.
1. Fork the `scikit-ci` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scikit-ci.git
3. Install your local copy into a virtualenv. Assuming you have
virtualenvwrapper installed (`pip install virtualenvwrapper`), this is how
you set up your cloned fork for local development::
$ mkvirtualenv scikit-ci
$ cd scikit-ci/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and
the tests, including testing other Python versions with tox::
$ flake8
$ python setup.py test
$ tox
If needed, you can get flake8 and tox by using `pip install` to install
them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in `README.rst`.
3. The pull request should work for Python 2.7, and 3.3, 3.4, 3.5 and PyPy.
Check https://travis-ci.org/scikit-build/scikit-ci/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests/test_scikit_ci.py::test_expand_environment
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/CONTRIBUTING.rst | CONTRIBUTING.rst | ============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Types of Contributions
----------------------
You can contribute in many ways:
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/scikit-build/scikit-ci/issues.
If you are reporting a bug, please include:
* Any details about your CI setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
The scikit-ci project could always use more documentation. We welcome help
with the official scikit-ci docs, in docstrings, or even on blog posts and
articles for the web.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at
https://github.com/scikit-build/scikit-ci/issues.
If you are proposing a new feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started
-----------
Ready to contribute? Here's how to set up `scikit-ci` for local development.
1. Fork the `scikit-ci` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/scikit-ci.git
3. Install your local copy into a virtualenv. Assuming you have
virtualenvwrapper installed (`pip install virtualenvwrapper`), this is how
you set up your cloned fork for local development::
$ mkvirtualenv scikit-ci
$ cd scikit-ci/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and
the tests, including testing other Python versions with tox::
$ flake8
$ python setup.py test
$ tox
If needed, you can get flake8 and tox by using `pip install` to install
them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in `README.rst`.
3. The pull request should work for Python 2.7, and 3.3, 3.4, 3.5 and PyPy.
Check https://travis-ci.org/scikit-build/scikit-ci/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests/test_scikit_ci.py::test_expand_environment
| 0.579162 | 0.460471 |
.. _making_a_release:
================
Making a release
================
A core developer should use the following steps to create a release `X.Y.Z` of
**scikit-ci** on `PyPI`_.
-------------
Prerequisites
-------------
* All CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
* You have a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_.
-------------------------
Documentation conventions
-------------------------
The commands reported below should be evaluated in the same terminal session.
Commands to evaluate starts with a dollar sign. For example::
$ echo "Hello"
Hello
means that ``echo "Hello"`` should be copied and evaluated in the terminal.
----------------------
Setting up environment
----------------------
1. First, `register for an account on PyPI <https://pypi.org>`_.
2. If not already the case, ask to be added as a ``Package Index Maintainer``.
3. Create a ``~/.pypirc`` file with your login credentials::
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=<your-username>
password=<your-password>
[pypitest]
repository=https://test.pypi.org/legacy/
username=<your-username>
password=<your-password>
where ``<your-username>`` and ``<your-password>`` correspond to your PyPI account.
---------------------
`PyPI`_: Step-by-step
---------------------
1. Make sure that all CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
2. Download the latest sources
.. code::
$ cd /tmp && \
git clone [email protected]:scikit-build/scikit-ci && \
cd scikit-ci
3. List all tags sorted by version
.. code::
$ git fetch --tags && \
git tag -l | sort -V
4. Choose the next release version number
.. code::
$ release=X.Y.Z
.. warning::
To ensure the packages are uploaded on `PyPI`_, tags must match this regular
expression: ``^[0-9]+(\.[0-9]+)*(\.post[0-9]+)?$``.
5. In `README.rst`, update `PyPI`_ download count after running `this big table query`_
and commit the changes.
.. code::
$ git add README.rst && \
git commit -m "README: Update download stats [ci skip]"
.. note::
To learn more about `pypi-stats`, see `How to get PyPI download statistics <https://kirankoduru.github.io/python/pypi-stats.html>`_.
6. In `CHANGES.rst` replace ``Next Release`` section header with
``Scikit-ci X.Y.Z`` and commit the changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "Scikit-ci ${release}"
7. Tag the release
.. code::
$ git tag --sign -m "Scikit-ci ${release}" ${release} master
.. warning::
We recommend using a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_
to sign the tag.
8. Create the source distribution and wheel
.. code::
$ python setup.py sdist bdist_wheel
9. Publish the both release tag and the master branch
.. code::
$ git push origin ${release} && \
git push origin master
10. Upload the distributions on `PyPI`_
.. code::
twine upload dist/*
.. note::
To first upload on `TestPyPI`_ , do the following::
$ twine upload -r pypitest dist/*
11. Create a clean testing environment to test the installation
.. code::
$ pushd $(mktemp -d) && \
mkvirtualenv scikit-ci-${release}-install-test && \
pip install scikit-ci && \
ci --help
.. note::
If the ``mkvirtualenv`` command is not available, this means you do not have `virtualenvwrapper`_
installed, in that case, you could either install it or directly use `virtualenv`_ or `venv`_.
To install from `TestPyPI`_, do the following::
$ pip install -i https://test.pypi.org/simple scikit-ci
12. Cleanup
.. code::
$ popd && \
deactivate && \
rm -rf dist/* && \
rmvirtualenv scikit-ci-${release}-install-test
13. Add a ``Next Release`` section back in `CHANGES.rst`, commit and push local changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "CHANGES.rst: Add \"Next Release\" section [ci skip]" && \
git push origin master
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/
.. _virtualenv: http://virtualenv.readthedocs.io
.. _venv: https://docs.python.org/3/library/venv.html
.. _AppVeyor: https://ci.appveyor.com/project/scikit-build/scikit-ci/history
.. _CircleCI: https://circleci.com/gh/scikit-build/scikit-ci
.. _Travis CI: https://travis-ci.org/scikit-build/scikit-ci/builds
.. _PyPI: https://pypi.org/project/scikit-ci
.. _TestPyPI: https://test.pypi.org/project/scikit-ci
.. _this big table query: https://bigquery.cloud.google.com/savedquery/280188050539:ef89d872d6784e379d7153872901b00d | scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/docs/make_a_release.rst | make_a_release.rst | .. _making_a_release:
================
Making a release
================
A core developer should use the following steps to create a release `X.Y.Z` of
**scikit-ci** on `PyPI`_.
-------------
Prerequisites
-------------
* All CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
* You have a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_.
-------------------------
Documentation conventions
-------------------------
The commands reported below should be evaluated in the same terminal session.
Commands to evaluate starts with a dollar sign. For example::
$ echo "Hello"
Hello
means that ``echo "Hello"`` should be copied and evaluated in the terminal.
----------------------
Setting up environment
----------------------
1. First, `register for an account on PyPI <https://pypi.org>`_.
2. If not already the case, ask to be added as a ``Package Index Maintainer``.
3. Create a ``~/.pypirc`` file with your login credentials::
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=<your-username>
password=<your-password>
[pypitest]
repository=https://test.pypi.org/legacy/
username=<your-username>
password=<your-password>
where ``<your-username>`` and ``<your-password>`` correspond to your PyPI account.
---------------------
`PyPI`_: Step-by-step
---------------------
1. Make sure that all CI tests are passing on `AppVeyor`_, `CircleCI`_ and `Travis CI`_.
2. Download the latest sources
.. code::
$ cd /tmp && \
git clone [email protected]:scikit-build/scikit-ci && \
cd scikit-ci
3. List all tags sorted by version
.. code::
$ git fetch --tags && \
git tag -l | sort -V
4. Choose the next release version number
.. code::
$ release=X.Y.Z
.. warning::
To ensure the packages are uploaded on `PyPI`_, tags must match this regular
expression: ``^[0-9]+(\.[0-9]+)*(\.post[0-9]+)?$``.
5. In `README.rst`, update `PyPI`_ download count after running `this big table query`_
and commit the changes.
.. code::
$ git add README.rst && \
git commit -m "README: Update download stats [ci skip]"
.. note::
To learn more about `pypi-stats`, see `How to get PyPI download statistics <https://kirankoduru.github.io/python/pypi-stats.html>`_.
6. In `CHANGES.rst` replace ``Next Release`` section header with
``Scikit-ci X.Y.Z`` and commit the changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "Scikit-ci ${release}"
7. Tag the release
.. code::
$ git tag --sign -m "Scikit-ci ${release}" ${release} master
.. warning::
We recommend using a `GPG signing key <https://help.github.com/articles/generating-a-new-gpg-key/>`_
to sign the tag.
8. Create the source distribution and wheel
.. code::
$ python setup.py sdist bdist_wheel
9. Publish the both release tag and the master branch
.. code::
$ git push origin ${release} && \
git push origin master
10. Upload the distributions on `PyPI`_
.. code::
twine upload dist/*
.. note::
To first upload on `TestPyPI`_ , do the following::
$ twine upload -r pypitest dist/*
11. Create a clean testing environment to test the installation
.. code::
$ pushd $(mktemp -d) && \
mkvirtualenv scikit-ci-${release}-install-test && \
pip install scikit-ci && \
ci --help
.. note::
If the ``mkvirtualenv`` command is not available, this means you do not have `virtualenvwrapper`_
installed, in that case, you could either install it or directly use `virtualenv`_ or `venv`_.
To install from `TestPyPI`_, do the following::
$ pip install -i https://test.pypi.org/simple scikit-ci
12. Cleanup
.. code::
$ popd && \
deactivate && \
rm -rf dist/* && \
rmvirtualenv scikit-ci-${release}-install-test
13. Add a ``Next Release`` section back in `CHANGES.rst`, commit and push local changes.
.. code::
$ git add CHANGES.rst && \
git commit -m "CHANGES.rst: Add \"Next Release\" section [ci skip]" && \
git push origin master
.. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/
.. _virtualenv: http://virtualenv.readthedocs.io
.. _venv: https://docs.python.org/3/library/venv.html
.. _AppVeyor: https://ci.appveyor.com/project/scikit-build/scikit-ci/history
.. _CircleCI: https://circleci.com/gh/scikit-build/scikit-ci
.. _Travis CI: https://travis-ci.org/scikit-build/scikit-ci/builds
.. _PyPI: https://pypi.org/project/scikit-ci
.. _TestPyPI: https://test.pypi.org/project/scikit-ci
.. _this big table query: https://bigquery.cloud.google.com/savedquery/280188050539:ef89d872d6784e379d7153872901b00d | 0.862018 | 0.697493 |
=====
Usage
=====
The scikit-ci command line executable allows to execute commands associated
with steps described in a scikit-ci
:doc:`configuration file </scikit-ci-yml>`.
Executing scikit-ci steps
-------------------------
Invoking scikit-ci will execute all steps listed in
a scikit-ci :doc:`configuration file </scikit-ci-yml>`::
ci
This command executes in order the steps listed below:
- before_install
- install
- before_build
- build
- test
- after_test
It also possible to execute a given step and its dependent steps::
ci build
In that case, the executed steps will be:
- before_install
- install
- before_build
- build
.. note::
Remember that:
- steps are executed following a specific :ref:`ordering <step_order>`
- scikit-ci :ref:`keeps track <keeping_track_executed_steps>` of previously
executed steps.
- environment variables set in ``step(n)`` will be available in ``step(n+1)``.
For more details, see :ref:`environment_variable_persistence`
Calling scikit-ci through ``python -m ci``
------------------------------------------
You can invoke scikit-ci through the Python interpreter from the command line::
python -m ci [...]
This is equivalent to invoking the command line script ``ci [...]``
directly.
Getting help on version, option names
-------------------------------------
::
ci --version # shows where ci was imported from
ci -h | --help # show help on command line
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/docs/usage.rst | usage.rst | =====
Usage
=====
The scikit-ci command line executable allows to execute commands associated
with steps described in a scikit-ci
:doc:`configuration file </scikit-ci-yml>`.
Executing scikit-ci steps
-------------------------
Invoking scikit-ci will execute all steps listed in
a scikit-ci :doc:`configuration file </scikit-ci-yml>`::
ci
This command executes in order the steps listed below:
- before_install
- install
- before_build
- build
- test
- after_test
It also possible to execute a given step and its dependent steps::
ci build
In that case, the executed steps will be:
- before_install
- install
- before_build
- build
.. note::
Remember that:
- steps are executed following a specific :ref:`ordering <step_order>`
- scikit-ci :ref:`keeps track <keeping_track_executed_steps>` of previously
executed steps.
- environment variables set in ``step(n)`` will be available in ``step(n+1)``.
For more details, see :ref:`environment_variable_persistence`
Calling scikit-ci through ``python -m ci``
------------------------------------------
You can invoke scikit-ci through the Python interpreter from the command line::
python -m ci [...]
This is equivalent to invoking the command line script ``ci [...]``
directly.
Getting help on version, option names
-------------------------------------
::
ci --version # shows where ci was imported from
ci -h | --help # show help on command line
| 0.823541 | 0.477981 |
.. scikit-ci documentation master file, created by
sphinx-quickstart on Sat Oct 8 01:28:33 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-ci's documentation!
=====================================
scikit-ci enables a centralized and simpler CI configuration for Python
extensions.
By having ``appveyor.yml``, ``azure-pipelines.yml``, ``circle.yml`` and ``.travis.yml`` calling
the scikit-ci command-line executable, all the CI steps for all
service can be fully described in one ``scikit-ci.yml`` configuration file.
.. toctree::
:maxdepth: 2
:caption: User guide
installation
usage
scikit-ci-yml.rst
contributing
authors
history
changes
.. toctree::
:maxdepth: 2
:caption: For maintainers
make_a_release
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Resources
=========
* Free software: Apache Software license
* Documentation: http://scikit-ci.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build | scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/docs/index.rst | index.rst | .. scikit-ci documentation master file, created by
sphinx-quickstart on Sat Oct 8 01:28:33 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-ci's documentation!
=====================================
scikit-ci enables a centralized and simpler CI configuration for Python
extensions.
By having ``appveyor.yml``, ``azure-pipelines.yml``, ``circle.yml`` and ``.travis.yml`` calling
the scikit-ci command-line executable, all the CI steps for all
service can be fully described in one ``scikit-ci.yml`` configuration file.
.. toctree::
:maxdepth: 2
:caption: User guide
installation
usage
scikit-ci-yml.rst
contributing
authors
history
changes
.. toctree::
:maxdepth: 2
:caption: For maintainers
make_a_release
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Resources
=========
* Free software: Apache Software license
* Documentation: http://scikit-ci.readthedocs.org
* Source code: https://github.com/scikit-build/scikit-ci
* Mailing list: https://groups.google.com/forum/#!forum/scikit-build | 0.635109 | 0.3654 |
============
Installation
============
Install package with pip
------------------------
To install with pip::
$ pip install scikit-ci
Install from source
-------------------
To install scikit-ci from the latest source, first obtain the source code::
$ git clone https://github.com/scikit-build/scikit-ci
$ cd scikit-ci
then install with::
$ pip install .
or::
$ pip install -e .
for development.
Dependencies
------------
Python Packages
^^^^^^^^^^^^^^^
The project has a few common Python package dependencies. The runtime
dependencies are:
.. include:: ../requirements.txt
:literal:
The development dependencies (for testing and coverage) are:
.. include:: ../requirements-dev.txt
:literal:
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/docs/installation.rst | installation.rst | ============
Installation
============
Install package with pip
------------------------
To install with pip::
$ pip install scikit-ci
Install from source
-------------------
To install scikit-ci from the latest source, first obtain the source code::
$ git clone https://github.com/scikit-build/scikit-ci
$ cd scikit-ci
then install with::
$ pip install .
or::
$ pip install -e .
for development.
Dependencies
------------
Python Packages
^^^^^^^^^^^^^^^
The project has a few common Python package dependencies. The runtime
dependencies are:
.. include:: ../requirements.txt
:literal:
The development dependencies (for testing and coverage) are:
.. include:: ../requirements-dev.txt
:literal:
| 0.780913 | 0.277794 |
==================
Configuration file
==================
The configuration file is read by the scikit-ci executable to find out which
commands to execute for a given step.
The configuration file should named ``scikit-ci.yml`` and is usually added
to the root of a project.
It is a `YAML <http://www.yaml.org/spec/1.2/spec.html>`_ file that
can be validated against `scikit-ci-schema.yml <https://github.com/scikit-build/scikit-ci-schema>`_.
Concept of Step
---------------
A step consist of a list of ``commands`` and optional key/value pairs
describing the ``environment``.
More specifically, a step can be described using the following
structure:
.. code-block:: yaml
before_install:
environment:
FOO: bar
commands:
- echo "Hello world"
where ``before_install`` can be replaced by any of these:
- ``before_install``
- ``install``
- ``before_build``
- ``build``
- ``test``
- ``after_test``
.. _step_mapping:
Mapping with Appveyor, Azure Pipelines, CircleCI and TravisCI steps
-------------------------------------------------------------------
scikit-ci do not impose any particular mapping.
Documentation specific to each services is available here:
- `Appveyor build pipeline <https://www.appveyor.com/docs/build-configuration/#build-pipeline>`_
- `Azure pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/>`_
- `CircleCI configuration 2.0 <https://circleci.com/docs/2.0/configuration-reference/>`_
- `CircleCI configuration 1.0 <https://circleci.com/docs/configuration/>`_ (deprecated)
- `TravisCI build lifecycle <https://docs.travis-ci.com/user/customizing-the-build/#The-Build-Lifecycle>`_
Reported below are some recommended associations that
are know to work.
- ``appveyor.yml``:
.. literalinclude:: ../appveyor.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 2, 5, 8, 11
.. note:: Since on windows the ``ci`` executable is installed in the ``Scripts``
directory (e.g `C:\\Python27\\Scripts\\ci.exe`) which is not in the
``PATH`` by default, the ``python -m ci`` syntax is used.
- ``azure-pipelines.yml``:
.. literalinclude:: ../azure-pipelines.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 1, 4, 7, 12
- ``.circleci/config.yml`` (CircleCI 2.0):
.. literalinclude:: ../.circleci/config.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 23, 28, 33, 38, 43
- ``circle.yml`` (CircleCI 1.0):
.. literalinclude:: circle-v1-yml.txt
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 15, 19, 25
- ``.travis.yml``
.. literalinclude:: ../.travis.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 2, 5, 8
.. _step_order:
Order of steps
--------------
scikit-ci execute steps considering the following order:
#. ``before_install``
#. ``install``
#. ``before_build``
#. ``build``
#. ``test``
#. ``after_test``
This means that the :ref:`mapping specified <step_mapping>` in the continuous
integration file has to be done accordingly.
Automatic execution of dependent steps
--------------------------------------
Considering the :ref:`step ordering <step_order>`, executing any ``step(n)``
ensures that ``step(n-1)`` has been executed before.
.. _keeping_track_executed_steps:
Keeping track of executed steps
-------------------------------
scikit-ci keeps track of executed steps setting environment variables of the
form ``SCIKIT_CI_<STEP_NAME>`` where ``<STEP_NAME>`` is any of the step name
in upper-case.
.. note::
Specifying the command line option ``--force`` allows to force
the execution of the steps ignoring the values of the ``SCIKIT_CI_<STEP_NAME>``
environment variables.
.. _environment_variable_persistence:
Environment variable persistence
--------------------------------
Environment variable defined in any given step are always guaranteed to be
set in steps executed afterward.
This is made possible by serializing the environment on the filesystem.
.. note::
After executing steps, a file named ``env.json`` is created in the current
directory along side ``scikit-ci.yml``. This is where the environment is
cached for re-use in subsequent steps.
Specifying the command line option ``--clear-cached-env`` allows to execute
steps after removing the ``env.json`` file.
Step specialization
-------------------
For any given step, it is possible to specify ``commands`` and ``environment``
variables specific to each continuous integration service.
Recognized services are:
- ``appveyor``
- ``azure``
- ``circle``
- ``travis``
Commands
^^^^^^^^
``commands`` common to all services are executed first, then ``commands`` specific
to each services are executed.
For example, considering this configuration used on CircleCI and TravisCI:
.. code-block:: yaml
before_install:
commands:
- echo "Hello Everywhere"
circle:
commands:
- echo "Hello on CircleCI"
travis:
linux:
commands:
- echo "Hello on TravisCI"
The output on the different service will be the following:
- CircleCI:
::
Hello Everywhere
Hello on CircleCI
- TravisCI:
::
Hello Everywhere
Hello on TravisCI
.. note:: Sections :ref:`command_specification` and :ref:`python_command_specification`
describe the different types of command.
Environment
^^^^^^^^^^^
Similarly, ``environment`` can be overridden for each service.
For example, considering this configuration used on CircleCI and TravisCI:
.. code-block:: yaml
before_install:
circle:
environment:
CATEGORY_2: 42
travis:
linux:
environment:
CATEGORY_1: 99
environment:
CATEGORY_1: 1
CATEGORY_2: 2
commands:
- echo "CATEGORY_1 is ${CATEGORY_1}"
- echo "CATEGORY_2 is ${CATEGORY_2}"
The output on the different service will be the following:
- on CircleCI:
::
CATEGORY_1 is 1
CATEGORY_2 is 42
- on TravisCI:
::
CATEGORY_1 is 99
CATEGORY_2 is 2
Reserved Environment Variables
------------------------------
- ``CI_NAME``: This variable is automatically set by scikit-ci and will
contain the name of the continuous integration service currently executing
the step.
.. _environment_variable_usage:
Environment variable usage
--------------------------
To facilitate the `use <https://en.wikipedia.org/wiki/Environment_variable#Use_and_display>`_
of environment variable across interpreters, scikit-ci uses a specific syntax.
Environment variable specified using ``$<NAME_OF_VARIABLE>`` in both commands
and environment variable will be expanded.
For example, considering this configuration used on Appveyor, CircleCI
and TravisCI:
.. code-block:: yaml
before_install:
appveyor:
environment:
TEXT: Windows$<TEXT>
travis:
linux:
environment:
TEXT: LinuxWorld
environment:
TEXT: World
commands:
- echo $<TEXT>
The output on the different service will be the following:
- on Appveyor:
::
WindowsWorld
- on CircleCI:
::
World
- on TravisCI:
::
LinuxWorld
.. note:: On system having a POSIX interpreter, the environment variable will
**NOT** be expanded if included in string start with a single quote.
.. autoclass:: ci.driver.Driver
:members: expand_command
.. _command_specification:
Command Specification
---------------------
Specifying command composed of a program name and arguments is supported on all
platforms.
For example:
.. code-block:: yaml
test:
commands:
- echo "Hello"
- python -c "print('world')"
- git clone git://github.com/scikit-build/scikit-ci
On unix based platforms (e.g CircleCI and TravisCI), commands are interpreted
using ``bash``.
On windows based platform (e.g Appveyor), commands are
interpreted using the windows command terminal ``cmd.exe``.
Since both interpreters expand quotes differently, we recommend to avoid single
quoting argument. The following table list working recipes:
.. table::
+----------------------------------------+----------------------------+-----------------------------------+
| | CircleCi, TravisCI | Appveyor |
+========================================+============================+===================================+
| **scikit-ci command** | **bash output** | **cmd output** |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo Hello1`` | Hello1 | Hello1 |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo "Hello2"`` | Hello2 | "Hello2" |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo 'Hello3'`` | Hello3 | 'Hello3' |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c "print('Hello4')"`` | Hello4 | Hello4 |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c 'print("Hello5")'`` | Hello5 | ``no output`` |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c "print('Hello6\'World')"`` | Hello6'World | Hello6'World |
+----------------------------------------+----------------------------+-----------------------------------+
And here are the values associated with ``sys.argv`` for different scikit-ci commands:
::
python program.py --things "foo" "bar" --more-things "doo" 'dar'
Output on CircleCi, TravisCI::
arg_1 [--things]
arg_2 [foo]
arg_3 [bar]
arg_4 [--more-things]
arg_5 [doo]
arg_6 [dar]
Output on Appveyor::
arg_1 [--things]
arg_2 [foo]
arg_3 [bar]
arg_4 [--more-things]
arg_5 [doo]
arg_6 ['dar'] # <-- Note the presence of single quotes
::
python program.py --things "foo" "bar" --more-things "doo" 'dar'
Output on CircleCi, TravisCI::
arg_1 [--the-foo=foo]
arg_2 [-the-bar=bar]
Output on Appveyor::
arg_1 [--the-foo=foo]
arg_2 [-the-bar='bar'] # <-- Note the presence of single quotes
.. note::
Here are the source of ``program.py``:
.. code-block:: python
import sys
for index, arg in enumerate(sys.argv):
if index == 0:
continue
print("arg_%s [%s]" % (index, sys.argv[index]))
.. _python_command_specification:
Python Command Specification
----------------------------
.. versionadded:: 0.10.0
The ``python`` commands are supported on all platforms.
For example:
.. code-block:: yaml
test:
commands:
- python: print("single_line")
- python: "for letter in ['a', 'b', 'c']: print(letter)"
- python: |
import os
if 'FOO' in os.environ:
print("FOO is set")
else:
print("FOO is *NOT* set")
.. note::
By using ``os.environ``, they remove the need for specifying environment
variable using the ``$<NAME_OF_VARIABLE>`` syntax described in
:ref:`environment_variable_usage`.
| scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/docs/scikit-ci-yml.rst | scikit-ci-yml.rst | ==================
Configuration file
==================
The configuration file is read by the scikit-ci executable to find out which
commands to execute for a given step.
The configuration file should named ``scikit-ci.yml`` and is usually added
to the root of a project.
It is a `YAML <http://www.yaml.org/spec/1.2/spec.html>`_ file that
can be validated against `scikit-ci-schema.yml <https://github.com/scikit-build/scikit-ci-schema>`_.
Concept of Step
---------------
A step consist of a list of ``commands`` and optional key/value pairs
describing the ``environment``.
More specifically, a step can be described using the following
structure:
.. code-block:: yaml
before_install:
environment:
FOO: bar
commands:
- echo "Hello world"
where ``before_install`` can be replaced by any of these:
- ``before_install``
- ``install``
- ``before_build``
- ``build``
- ``test``
- ``after_test``
.. _step_mapping:
Mapping with Appveyor, Azure Pipelines, CircleCI and TravisCI steps
-------------------------------------------------------------------
scikit-ci do not impose any particular mapping.
Documentation specific to each services is available here:
- `Appveyor build pipeline <https://www.appveyor.com/docs/build-configuration/#build-pipeline>`_
- `Azure pipelines <https://docs.microsoft.com/en-us/azure/devops/pipelines/>`_
- `CircleCI configuration 2.0 <https://circleci.com/docs/2.0/configuration-reference/>`_
- `CircleCI configuration 1.0 <https://circleci.com/docs/configuration/>`_ (deprecated)
- `TravisCI build lifecycle <https://docs.travis-ci.com/user/customizing-the-build/#The-Build-Lifecycle>`_
Reported below are some recommended associations that
are know to work.
- ``appveyor.yml``:
.. literalinclude:: ../appveyor.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 2, 5, 8, 11
.. note:: Since on windows the ``ci`` executable is installed in the ``Scripts``
directory (e.g `C:\\Python27\\Scripts\\ci.exe`) which is not in the
``PATH`` by default, the ``python -m ci`` syntax is used.
- ``azure-pipelines.yml``:
.. literalinclude:: ../azure-pipelines.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 1, 4, 7, 12
- ``.circleci/config.yml`` (CircleCI 2.0):
.. literalinclude:: ../.circleci/config.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 23, 28, 33, 38, 43
- ``circle.yml`` (CircleCI 1.0):
.. literalinclude:: circle-v1-yml.txt
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 15, 19, 25
- ``.travis.yml``
.. literalinclude:: ../.travis.yml
:language: yaml
:start-after: scikit-ci-yml.rst: start
:end-before: scikit-ci-yml.rst: end
:emphasize-lines: 2, 5, 8
.. _step_order:
Order of steps
--------------
scikit-ci execute steps considering the following order:
#. ``before_install``
#. ``install``
#. ``before_build``
#. ``build``
#. ``test``
#. ``after_test``
This means that the :ref:`mapping specified <step_mapping>` in the continuous
integration file has to be done accordingly.
Automatic execution of dependent steps
--------------------------------------
Considering the :ref:`step ordering <step_order>`, executing any ``step(n)``
ensures that ``step(n-1)`` has been executed before.
.. _keeping_track_executed_steps:
Keeping track of executed steps
-------------------------------
scikit-ci keeps track of executed steps setting environment variables of the
form ``SCIKIT_CI_<STEP_NAME>`` where ``<STEP_NAME>`` is any of the step name
in upper-case.
.. note::
Specifying the command line option ``--force`` allows to force
the execution of the steps ignoring the values of the ``SCIKIT_CI_<STEP_NAME>``
environment variables.
.. _environment_variable_persistence:
Environment variable persistence
--------------------------------
Environment variable defined in any given step are always guaranteed to be
set in steps executed afterward.
This is made possible by serializing the environment on the filesystem.
.. note::
After executing steps, a file named ``env.json`` is created in the current
directory along side ``scikit-ci.yml``. This is where the environment is
cached for re-use in subsequent steps.
Specifying the command line option ``--clear-cached-env`` allows to execute
steps after removing the ``env.json`` file.
Step specialization
-------------------
For any given step, it is possible to specify ``commands`` and ``environment``
variables specific to each continuous integration service.
Recognized services are:
- ``appveyor``
- ``azure``
- ``circle``
- ``travis``
Commands
^^^^^^^^
``commands`` common to all services are executed first, then ``commands`` specific
to each services are executed.
For example, considering this configuration used on CircleCI and TravisCI:
.. code-block:: yaml
before_install:
commands:
- echo "Hello Everywhere"
circle:
commands:
- echo "Hello on CircleCI"
travis:
linux:
commands:
- echo "Hello on TravisCI"
The output on the different service will be the following:
- CircleCI:
::
Hello Everywhere
Hello on CircleCI
- TravisCI:
::
Hello Everywhere
Hello on TravisCI
.. note:: Sections :ref:`command_specification` and :ref:`python_command_specification`
describe the different types of command.
Environment
^^^^^^^^^^^
Similarly, ``environment`` can be overridden for each service.
For example, considering this configuration used on CircleCI and TravisCI:
.. code-block:: yaml
before_install:
circle:
environment:
CATEGORY_2: 42
travis:
linux:
environment:
CATEGORY_1: 99
environment:
CATEGORY_1: 1
CATEGORY_2: 2
commands:
- echo "CATEGORY_1 is ${CATEGORY_1}"
- echo "CATEGORY_2 is ${CATEGORY_2}"
The output on the different service will be the following:
- on CircleCI:
::
CATEGORY_1 is 1
CATEGORY_2 is 42
- on TravisCI:
::
CATEGORY_1 is 99
CATEGORY_2 is 2
Reserved Environment Variables
------------------------------
- ``CI_NAME``: This variable is automatically set by scikit-ci and will
contain the name of the continuous integration service currently executing
the step.
.. _environment_variable_usage:
Environment variable usage
--------------------------
To facilitate the `use <https://en.wikipedia.org/wiki/Environment_variable#Use_and_display>`_
of environment variable across interpreters, scikit-ci uses a specific syntax.
Environment variable specified using ``$<NAME_OF_VARIABLE>`` in both commands
and environment variable will be expanded.
For example, considering this configuration used on Appveyor, CircleCI
and TravisCI:
.. code-block:: yaml
before_install:
appveyor:
environment:
TEXT: Windows$<TEXT>
travis:
linux:
environment:
TEXT: LinuxWorld
environment:
TEXT: World
commands:
- echo $<TEXT>
The output on the different service will be the following:
- on Appveyor:
::
WindowsWorld
- on CircleCI:
::
World
- on TravisCI:
::
LinuxWorld
.. note:: On system having a POSIX interpreter, the environment variable will
**NOT** be expanded if included in string start with a single quote.
.. autoclass:: ci.driver.Driver
:members: expand_command
.. _command_specification:
Command Specification
---------------------
Specifying command composed of a program name and arguments is supported on all
platforms.
For example:
.. code-block:: yaml
test:
commands:
- echo "Hello"
- python -c "print('world')"
- git clone git://github.com/scikit-build/scikit-ci
On unix based platforms (e.g CircleCI and TravisCI), commands are interpreted
using ``bash``.
On windows based platform (e.g Appveyor), commands are
interpreted using the windows command terminal ``cmd.exe``.
Since both interpreters expand quotes differently, we recommend to avoid single
quoting argument. The following table list working recipes:
.. table::
+----------------------------------------+----------------------------+-----------------------------------+
| | CircleCi, TravisCI | Appveyor |
+========================================+============================+===================================+
| **scikit-ci command** | **bash output** | **cmd output** |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo Hello1`` | Hello1 | Hello1 |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo "Hello2"`` | Hello2 | "Hello2" |
+----------------------------------------+----------------------------+-----------------------------------+
| ``echo 'Hello3'`` | Hello3 | 'Hello3' |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c "print('Hello4')"`` | Hello4 | Hello4 |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c 'print("Hello5")'`` | Hello5 | ``no output`` |
+----------------------------------------+----------------------------+-----------------------------------+
| ``python -c "print('Hello6\'World')"`` | Hello6'World | Hello6'World |
+----------------------------------------+----------------------------+-----------------------------------+
And here are the values associated with ``sys.argv`` for different scikit-ci commands:
::
python program.py --things "foo" "bar" --more-things "doo" 'dar'
Output on CircleCi, TravisCI::
arg_1 [--things]
arg_2 [foo]
arg_3 [bar]
arg_4 [--more-things]
arg_5 [doo]
arg_6 [dar]
Output on Appveyor::
arg_1 [--things]
arg_2 [foo]
arg_3 [bar]
arg_4 [--more-things]
arg_5 [doo]
arg_6 ['dar'] # <-- Note the presence of single quotes
::
python program.py --things "foo" "bar" --more-things "doo" 'dar'
Output on CircleCi, TravisCI::
arg_1 [--the-foo=foo]
arg_2 [-the-bar=bar]
Output on Appveyor::
arg_1 [--the-foo=foo]
arg_2 [-the-bar='bar'] # <-- Note the presence of single quotes
.. note::
Here are the source of ``program.py``:
.. code-block:: python
import sys
for index, arg in enumerate(sys.argv):
if index == 0:
continue
print("arg_%s [%s]" % (index, sys.argv[index]))
.. _python_command_specification:
Python Command Specification
----------------------------
.. versionadded:: 0.10.0
The ``python`` commands are supported on all platforms.
For example:
.. code-block:: yaml
test:
commands:
- python: print("single_line")
- python: "for letter in ['a', 'b', 'c']: print(letter)"
- python: |
import os
if 'FOO' in os.environ:
print("FOO is set")
else:
print("FOO is *NOT* set")
.. note::
By using ``os.environ``, they remove the need for specifying environment
variable using the ``$<NAME_OF_VARIABLE>`` syntax described in
:ref:`environment_variable_usage`.
| 0.907297 | 0.630031 |
import argparse
import ci
import os
class _OptionalStep(argparse.Action):
"""Custom action making the ``step`` positional argument with choices
optional.
Setting the ``choices`` attribute will fail with an *invalid choice* error.
Adapted from http://stackoverflow.com/questions/8526675/python-argparse-optional-append-argument-with-choices/8527629#8527629
""" # noqa: E501
def __call__(self, parser, namespace, value, option_string=None):
if value:
if value not in ci.STEPS:
message = ("invalid choice: {0!r} (choose from {1})"
.format(value,
', '.join([repr(action)
for action in
ci.STEPS])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, value)
def main():
"""The main entry point to ``ci.py``.
This is installed as the script entry point.
"""
version_str = ("This is scikit-ci version %s, imported from %s\n" %
(ci.__version__, os.path.abspath(ci.__file__)))
parser = argparse.ArgumentParser(description=ci.__doc__)
parser.add_argument(
"step", type=str, nargs='?', default=ci.STEPS[-1],
action=_OptionalStep, metavar='STEP',
help="name of the step to execute. "
"Choose from: {}. "
"If no step is specified, all are executed.".format(", ".join(
[repr(action) for action in ci.STEPS]))
)
parser.add_argument(
"--force", action="store_true",
help="always execute the steps"
)
parser.add_argument(
"--without-deps", action="store_false",
help="do not execute dependent steps", dest='with_dependencies'
)
parser.add_argument(
"--clear-cached-env", action="store_true",
help="clear cached environment (removes 'env.json' file)"
)
parser.add_argument(
"--version", action="version",
version=version_str,
help="display scikit-ci version and import information.")
args = parser.parse_args()
try:
ci.execute_step(
args.step,
force=args.force,
with_dependencies=args.with_dependencies,
clear_cached_env=args.clear_cached_env
)
except ci.SKCIError as exc:
exit(exc)
if __name__ == '__main__': # pragma: no cover
main() | scikit-ci | /scikit-ci-0.21.0.tar.gz/scikit-ci-0.21.0/ci/__main__.py | __main__.py | import argparse
import ci
import os
class _OptionalStep(argparse.Action):
"""Custom action making the ``step`` positional argument with choices
optional.
Setting the ``choices`` attribute will fail with an *invalid choice* error.
Adapted from http://stackoverflow.com/questions/8526675/python-argparse-optional-append-argument-with-choices/8527629#8527629
""" # noqa: E501
def __call__(self, parser, namespace, value, option_string=None):
if value:
if value not in ci.STEPS:
message = ("invalid choice: {0!r} (choose from {1})"
.format(value,
', '.join([repr(action)
for action in
ci.STEPS])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, value)
def main():
"""The main entry point to ``ci.py``.
This is installed as the script entry point.
"""
version_str = ("This is scikit-ci version %s, imported from %s\n" %
(ci.__version__, os.path.abspath(ci.__file__)))
parser = argparse.ArgumentParser(description=ci.__doc__)
parser.add_argument(
"step", type=str, nargs='?', default=ci.STEPS[-1],
action=_OptionalStep, metavar='STEP',
help="name of the step to execute. "
"Choose from: {}. "
"If no step is specified, all are executed.".format(", ".join(
[repr(action) for action in ci.STEPS]))
)
parser.add_argument(
"--force", action="store_true",
help="always execute the steps"
)
parser.add_argument(
"--without-deps", action="store_false",
help="do not execute dependent steps", dest='with_dependencies'
)
parser.add_argument(
"--clear-cached-env", action="store_true",
help="clear cached environment (removes 'env.json' file)"
)
parser.add_argument(
"--version", action="version",
version=version_str,
help="display scikit-ci version and import information.")
args = parser.parse_args()
try:
ci.execute_step(
args.step,
force=args.force,
with_dependencies=args.with_dependencies,
clear_cached_env=args.clear_cached_env
)
except ci.SKCIError as exc:
exit(exc)
if __name__ == '__main__': # pragma: no cover
main() | 0.583559 | 0.174621 |
Scikit-clean
==================
**scikit-clean** is a python ML library for classification in the presence of \
label noise. Aimed primarily at researchers, this provides implementations of \
several state-of-the-art algorithms; tools to simulate artificial noise, create complex pipelines \
and evaluate them.
This library is fully scikit-learn API compatible: which means \
all scikit-learn's building blocks can be seamlessly integrated into workflow. \
Like scikit-learn estimators, most of the methods also support features like \
parallelization, reproducibility etc.
Example Usage
***************
A typical label noise research workflow begins with clean labels, simulates \
label noise into training set, and then evaluates how a model handles that noise \
using clean test set. In scikit-clean, this looks like:
.. code-block:: python
from skclean.simulate_noise import flip_labels_uniform
from skclean.models import RobustLR # Robust Logistic Regression
X, y = make_classification(n_samples=200,n_features=4)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.20)
y_train_noisy = flip_labels_uniform(y_train, .3) # Flip labels of 30% samples
clf = RobustLR().fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
scikit-clean provides a customized `Pipeline` for more complex workflow. Many noise robust \
algorithms can be broken down into two steps: detecting noise likelihood for each sample
in the dataset, and train robust classifiers by using that information. This fits
nicely with Pipeline's API:
.. code-block:: python
# ---Import scikit-learn stuff----
from skclean.simulate_noise import UniformNoise
from skclean.detectors import KDN
from skclean.handlers import Filter
from skclean.pipeline import Pipeline, make_pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]})
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=5).mean()) # 5-fold cross validation
Please see this notebook_ before you begin for a more detailed introduction, \
and this_ for complete API.
.. _notebook: https://scikit-clean.readthedocs.io/en/latest/examples/Introduction%20to%20Scikit-clean.html
.. _this: https://scikit-clean.readthedocs.io/en/latest/api.html
Installation
******************
Simplest option is probably using pip::
pip install scikit-clean
If you intend to modify the code, install in editable mode::
git clone https://github.com/Shihab-Shahriar/scikit-clean.git
cd scikit-clean
pip install -e .
If you're only interested in small part of this library, say one or two algorithms, feel free to simply \
copy/paste relevant code into your project.
Alternatives
**************
There are several open source tools to handle label noise, some of them are: \
1. Cleanlab_
2. Snorkel_
3. NoiseFiltersR_
.. _Cleanlab: https://github.com/cgnorthcutt/cleanlab
.. _Snorkel: https://github.com/snorkel-team/snorkel
.. _NoiseFiltersR: https://journal.r-project.org/archive/2017/RJ-2017-027/RJ-2017-027.pdf
`NoiseFiltersR` is closest in objective as ours, though it's implemented in R, and doesn't \
appear to be actively maintained.
`Cleanlab` and `Snorkel` are both in Python, though they have somewhat different \
priorities than us. While our goal is to implement as many algorithms as \
possible, these tools usually focus on one or few related papers. They have also been \
developed for some time- meaning they are more stable, well-optimized and better suited \
for practitioners/ engineers than `scikit-clean`.
Credits
**************
We want to `scikit-learn`, `imbalance-learn` and `Cleanlab`, these implemntations \
are inspired by, and dircetly borrows code from these libraries.
We also want to thank the authors of original papers. Here is a list of papers partially \
or fully implemented by `scikit-clean`:
* Taghi M Khoshgoftaar and Pierre Rebours. Improving software quality prediction by noise filtering techniques. Journal of Computer Science and Technology, 22(3):387–396, 2007.
* Sunghun Kim, Hongyu Zhang, Rongxin Wu, and Liang Gong. Dealing with noise in defect prediction. In 2011 33rd International Conference on Software Engineering (ICSE), 481–490. IEEE, 2011.
* Alexander Hanbo Li and Andrew Martin. Forest-type regression with general losses and robust forest. In International Conference on Machine Learning, 2091–2100. 2017.
* Aditya Krishna Menon, Brendan Van Rooyen, and Nagarajan Natarajan. Learning from binary labels with instance-dependent noise. Machine Learning, 107(8-10):1561–1595, 2018.
* Nagarajan Natarajan, Inderjit S Dhillon, Pradeep K Ravikumar, and Ambuj Tewari. Learning with noisy labels. In Advances in neural information processing systems, 1196–1204. 2013.
* Maryam Sabzevari, Gonzalo Martínez-Muñoz, and Alberto Suárez. A two-stage ensemble method for the detection of class-label noise. Neurocomputing, 275:2374–2383, 2018.
* Michael R Smith, Tony Martinez, and Christophe Giraud-Carrier. An instance level analysis of data complexity. Machine learning, 95(2):225–256, 2014.
* Felipe N Walmsley, George DC Cavalcanti, Dayvid VR Oliveira, Rafael MO Cruz, and Robert Sabourin. An ensemble generation method based on instance hardness. In 2018 International Joint Conference on Neural Networks (IJCNN), 1–8. IEEE, 2018.
* Bianca Zadrozny, John Langford, and Naoki Abe. Cost-sensitive learning by cost-proportionate example weighting. In Third IEEE international conference on data mining, 435–442. IEEE, 2003.
* Zijin Zhao, Lingyang Chu, Dacheng Tao, and Jian Pei. Classification with label noise: a markov chain sampling framework. Data Mining and Knowledge Discovery, 33(5):1468–1504, 2019.
A note about naming
-----------------------------------------------
"There are 2 hard problems in computer science: cache invalidation, naming things, and \
off-by-1 errors."
Majority of the algorithms in `scikit-clean` are not explicitly named by their authors. \
In some rare cases, similar or very similar ideas appear under different names (e.g. `KDN`). \
We tried to name things as best as we could. However, if you're the author of any of these \
methods and want to rename it, we'll happily oblige.
| scikit-clean | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/README.rst | README.rst | Scikit-clean
==================
**scikit-clean** is a python ML library for classification in the presence of \
label noise. Aimed primarily at researchers, this provides implementations of \
several state-of-the-art algorithms; tools to simulate artificial noise, create complex pipelines \
and evaluate them.
This library is fully scikit-learn API compatible: which means \
all scikit-learn's building blocks can be seamlessly integrated into workflow. \
Like scikit-learn estimators, most of the methods also support features like \
parallelization, reproducibility etc.
Example Usage
***************
A typical label noise research workflow begins with clean labels, simulates \
label noise into training set, and then evaluates how a model handles that noise \
using clean test set. In scikit-clean, this looks like:
.. code-block:: python
from skclean.simulate_noise import flip_labels_uniform
from skclean.models import RobustLR # Robust Logistic Regression
X, y = make_classification(n_samples=200,n_features=4)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.20)
y_train_noisy = flip_labels_uniform(y_train, .3) # Flip labels of 30% samples
clf = RobustLR().fit(X_train,y_train_noisy)
print(clf.score(X_test, y_test))
scikit-clean provides a customized `Pipeline` for more complex workflow. Many noise robust \
algorithms can be broken down into two steps: detecting noise likelihood for each sample
in the dataset, and train robust classifiers by using that information. This fits
nicely with Pipeline's API:
.. code-block:: python
# ---Import scikit-learn stuff----
from skclean.simulate_noise import UniformNoise
from skclean.detectors import KDN
from skclean.handlers import Filter
from skclean.pipeline import Pipeline, make_pipeline # Importing from skclean, not sklearn
clf = Pipeline([
('scale', StandardScaler()), # Scale features
('feat_sel', VarianceThreshold(.2)), # Feature selection
('detector', KDN()), # Detect mislabeled samples
('handler', Filter(SVC())), # Filter out likely mislabeled samples and then train a SVM
])
clf_g = GridSearchCV(clf,{'detector__n_neighbors':[2,5,10]})
n_clf_g = make_pipeline(UniformNoise(.3),clf_g) # Create label noise at the very first step
print(cross_val_score(n_clf_g, X, y, cv=5).mean()) # 5-fold cross validation
Please see this notebook_ before you begin for a more detailed introduction, \
and this_ for complete API.
.. _notebook: https://scikit-clean.readthedocs.io/en/latest/examples/Introduction%20to%20Scikit-clean.html
.. _this: https://scikit-clean.readthedocs.io/en/latest/api.html
Installation
******************
Simplest option is probably using pip::
pip install scikit-clean
If you intend to modify the code, install in editable mode::
git clone https://github.com/Shihab-Shahriar/scikit-clean.git
cd scikit-clean
pip install -e .
If you're only interested in small part of this library, say one or two algorithms, feel free to simply \
copy/paste relevant code into your project.
Alternatives
**************
There are several open source tools to handle label noise, some of them are: \
1. Cleanlab_
2. Snorkel_
3. NoiseFiltersR_
.. _Cleanlab: https://github.com/cgnorthcutt/cleanlab
.. _Snorkel: https://github.com/snorkel-team/snorkel
.. _NoiseFiltersR: https://journal.r-project.org/archive/2017/RJ-2017-027/RJ-2017-027.pdf
`NoiseFiltersR` is closest in objective as ours, though it's implemented in R, and doesn't \
appear to be actively maintained.
`Cleanlab` and `Snorkel` are both in Python, though they have somewhat different \
priorities than us. While our goal is to implement as many algorithms as \
possible, these tools usually focus on one or few related papers. They have also been \
developed for some time- meaning they are more stable, well-optimized and better suited \
for practitioners/ engineers than `scikit-clean`.
Credits
**************
We want to `scikit-learn`, `imbalance-learn` and `Cleanlab`, these implemntations \
are inspired by, and dircetly borrows code from these libraries.
We also want to thank the authors of original papers. Here is a list of papers partially \
or fully implemented by `scikit-clean`:
* Taghi M Khoshgoftaar and Pierre Rebours. Improving software quality prediction by noise filtering techniques. Journal of Computer Science and Technology, 22(3):387–396, 2007.
* Sunghun Kim, Hongyu Zhang, Rongxin Wu, and Liang Gong. Dealing with noise in defect prediction. In 2011 33rd International Conference on Software Engineering (ICSE), 481–490. IEEE, 2011.
* Alexander Hanbo Li and Andrew Martin. Forest-type regression with general losses and robust forest. In International Conference on Machine Learning, 2091–2100. 2017.
* Aditya Krishna Menon, Brendan Van Rooyen, and Nagarajan Natarajan. Learning from binary labels with instance-dependent noise. Machine Learning, 107(8-10):1561–1595, 2018.
* Nagarajan Natarajan, Inderjit S Dhillon, Pradeep K Ravikumar, and Ambuj Tewari. Learning with noisy labels. In Advances in neural information processing systems, 1196–1204. 2013.
* Maryam Sabzevari, Gonzalo Martínez-Muñoz, and Alberto Suárez. A two-stage ensemble method for the detection of class-label noise. Neurocomputing, 275:2374–2383, 2018.
* Michael R Smith, Tony Martinez, and Christophe Giraud-Carrier. An instance level analysis of data complexity. Machine learning, 95(2):225–256, 2014.
* Felipe N Walmsley, George DC Cavalcanti, Dayvid VR Oliveira, Rafael MO Cruz, and Robert Sabourin. An ensemble generation method based on instance hardness. In 2018 International Joint Conference on Neural Networks (IJCNN), 1–8. IEEE, 2018.
* Bianca Zadrozny, John Langford, and Naoki Abe. Cost-sensitive learning by cost-proportionate example weighting. In Third IEEE international conference on data mining, 435–442. IEEE, 2003.
* Zijin Zhao, Lingyang Chu, Dacheng Tao, and Jian Pei. Classification with label noise: a markov chain sampling framework. Data Mining and Knowledge Discovery, 33(5):1468–1504, 2019.
A note about naming
-----------------------------------------------
"There are 2 hard problems in computer science: cache invalidation, naming things, and \
off-by-1 errors."
Majority of the algorithms in `scikit-clean` are not explicitly named by their authors. \
In some rare cases, similar or very similar ideas appear under different names (e.g. `KDN`). \
We tried to name things as best as we could. However, if you're the author of any of these \
methods and want to rename it, we'll happily oblige.
| 0.875121 | 0.862178 |
Contributing
==============
Since researchers are the intended audience of this library, we value correctness \
and readability over complex performance optimizations and broad functionality. \
Although note that we reuse scikit-learn's built-in functions whenever we can, \
and that has somewhat different priorities.
We welcome all types of contributions: correcting bugs in code, typos in \
documentation or implementation of new algorithms. Our inclusion criteria for new \
algorithm is pretty relaxed- any algorithm that has been published in a peer-reviewed \
journal/conference is eligible. Please view this guideline_ from scikit-learn before \
you open a pull request.
.. _guideline: https://scikit-learn.org/stable/developers/contributing.html | scikit-clean | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/contributing.rst | contributing.rst | Contributing
==============
Since researchers are the intended audience of this library, we value correctness \
and readability over complex performance optimizations and broad functionality. \
Although note that we reuse scikit-learn's built-in functions whenever we can, \
and that has somewhat different priorities.
We welcome all types of contributions: correcting bugs in code, typos in \
documentation or implementation of new algorithms. Our inclusion criteria for new \
algorithm is pretty relaxed- any algorithm that has been published in a peer-reviewed \
journal/conference is eligible. Please view this guideline_ from scikit-learn before \
you open a pull request.
.. _guideline: https://scikit-learn.org/stable/developers/contributing.html | 0.760206 | 0.878471 |
.. scikit-clean documentation master file, created by
sphinx-quickstart on Thu Jul 23 22:34:03 2020.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-clean's documentation!
========================================
.. toctree::
:maxdepth: 2
:caption: Getting Started:
intro
.. toctree::
:maxdepth: 2
:hidden:
:caption: Documentation
user_guide
api
.. toctree::
:maxdepth: 2
:hidden:
:caption: Additional Information
contributing
references
-------
.. rubric:: References2
.. bibliography:: zrefs.bib
:cited:
:labelprefix: A
:keyprefix: a-
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| scikit-clean | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/index.rst | index.rst | .. scikit-clean documentation master file, created by
sphinx-quickstart on Thu Jul 23 22:34:03 2020.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to scikit-clean's documentation!
========================================
.. toctree::
:maxdepth: 2
:caption: Getting Started:
intro
.. toctree::
:maxdepth: 2
:hidden:
:caption: Documentation
user_guide
api
.. toctree::
:maxdepth: 2
:hidden:
:caption: Additional Information
contributing
references
-------
.. rubric:: References2
.. bibliography:: zrefs.bib
:cited:
:labelprefix: A
:keyprefix: a-
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.700178 | 0.246862 |
API Reference
=============
Detectors (`skclean.detectors`)
--------------------------------
.. automodule:: skclean.detectors
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.detectors.KDN
skclean.detectors.ForestKDN
skclean.detectors.RkDN
skclean.detectors.PartitioningDetector
skclean.detectors.MCS
skclean.detectors.InstanceHardness
skclean.detectors.RandomForestDetector
Handlers (`skclean.handlers`)
-------------------------------
.. automodule:: skclean.handlers
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.handlers.Filter
skclean.handlers.FilterCV
skclean.handlers.CLNI
skclean.handlers.IPF
skclean.handlers.SampleWeight
skclean.handlers.WeightedBagging
skclean.handlers.Costing
Models (`skclean.models`)
-----------------------------
.. automodule:: skclean.models
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.models.RobustForest
skclean.models.RobustLR
Pipeline (`skclean.pipeline`)
--------------------------------
.. automodule:: skclean.pipeline
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.pipeline.Pipeline
skclean.pipeline.make_pipeline
Noise Simulation (`skclean.simulate_noise`)
--------------------------------------------
.. automodule:: skclean.simulate_noise
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.simulate_noise.flip_labels_uniform
skclean.simulate_noise.flip_labels_cc
skclean.simulate_noise.UniformNoise
skclean.simulate_noise.CCNoise
skclean.simulate_noise.BCNoise
:ref:`paper-refs`
| scikit-clean | /scikit-clean-0.1.2.tar.gz/scikit-clean-0.1.2/doc/api.rst | api.rst | API Reference
=============
Detectors (`skclean.detectors`)
--------------------------------
.. automodule:: skclean.detectors
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.detectors.KDN
skclean.detectors.ForestKDN
skclean.detectors.RkDN
skclean.detectors.PartitioningDetector
skclean.detectors.MCS
skclean.detectors.InstanceHardness
skclean.detectors.RandomForestDetector
Handlers (`skclean.handlers`)
-------------------------------
.. automodule:: skclean.handlers
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.handlers.Filter
skclean.handlers.FilterCV
skclean.handlers.CLNI
skclean.handlers.IPF
skclean.handlers.SampleWeight
skclean.handlers.WeightedBagging
skclean.handlers.Costing
Models (`skclean.models`)
-----------------------------
.. automodule:: skclean.models
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.models.RobustForest
skclean.models.RobustLR
Pipeline (`skclean.pipeline`)
--------------------------------
.. automodule:: skclean.pipeline
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.pipeline.Pipeline
skclean.pipeline.make_pipeline
Noise Simulation (`skclean.simulate_noise`)
--------------------------------------------
.. automodule:: skclean.simulate_noise
.. py:currentmodule::skclean
.. autosummary::
:toctree: _autosummary
skclean.simulate_noise.flip_labels_uniform
skclean.simulate_noise.flip_labels_cc
skclean.simulate_noise.UniformNoise
skclean.simulate_noise.CCNoise
skclean.simulate_noise.BCNoise
:ref:`paper-refs`
| 0.733261 | 0.152505 |