repository
stringclasses
11 values
repo_id
stringlengths
1
3
target_module_path
stringlengths
16
72
prompt
stringlengths
298
21.7k
relavent_test_path
stringlengths
50
99
full_function
stringlengths
336
33.8k
function_name
stringlengths
2
51
seaborn
0
seaborn/_core/scales.py
def label( self, formatter: Formatter | None = None, *, like: str | Callable | None = None, base: int | None | Default = default, unit: str | None = None, ) -> Continuous: """ Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., `".2f"`), a format string with fields named `x` and/or `pos` (e.g., `"${x:.2f}"`), or a callable with a signature like `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the tick value and `pos` is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to `None` to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with `unit="g"`, a tick value of 5000 will appear as `5 kg`). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration. """
/usr/src/app/target_test_cases/failed_tests_Continuous.label.txt
def label( self, formatter: Formatter | None = None, *, like: str | Callable | None = None, base: int | None | Default = default, unit: str | None = None, ) -> Continuous: """ Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., `".2f"`), a format string with fields named `x` and/or `pos` (e.g., `"${x:.2f}"`), or a callable with a signature like `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the tick value and `pos` is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to `None` to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with `unit="g"`, a tick value of 5000 will appear as `5 kg`). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration. """ # Input checks if formatter is not None and not isinstance(formatter, Formatter): raise TypeError( f"Label formatter must be an instance of {Formatter!r}, " f"not {type(formatter)!r}" ) if like is not None and not (isinstance(like, str) or callable(like)): msg = f"`like` must be a string or callable, not {type(like).__name__}." raise TypeError(msg) new = copy(self) new._label_params = { "formatter": formatter, "like": like, "base": base, "unit": unit, } return new
Continuous.label
seaborn
1
seaborn/_core/plot.py
def add( self, mark: Mark, *transforms: Stat | Move, orient: str | None = None, legend: bool = True, label: str | None = None, data: DataSource = None, **variables: VariableSpec, ) -> Plot: """ Specify a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define a plot with multiple layers. Parameters ---------- mark : :class:`Mark` The visual representation of the data to use in this layer. transforms : :class:`Stat` or :class:`Move` Objects representing transforms to be applied before plotting the data. Currently, at most one :class:`Stat` can be used, and it must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which also affects how transforms are computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. legend : bool Option to suppress the mark/mappings for this layer from the legend. label : str A label to use for the layer in the legend, independent of any mappings. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be passed directly to the transforms without scaling. Examples -------- .. include:: ../docstrings/objects.Plot.add.rst """
/usr/src/app/target_test_cases/failed_tests_Plot.add.txt
def add( self, mark: Mark, *transforms: Stat | Move, orient: str | None = None, legend: bool = True, label: str | None = None, data: DataSource = None, **variables: VariableSpec, ) -> Plot: """ Specify a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define a plot with multiple layers. Parameters ---------- mark : :class:`Mark` The visual representation of the data to use in this layer. transforms : :class:`Stat` or :class:`Move` Objects representing transforms to be applied before plotting the data. Currently, at most one :class:`Stat` can be used, and it must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which also affects how transforms are computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. legend : bool Option to suppress the mark/mappings for this layer from the legend. label : str A label to use for the layer in the legend, independent of any mappings. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be passed directly to the transforms without scaling. Examples -------- .. include:: ../docstrings/objects.Plot.add.rst """ if not isinstance(mark, Mark): msg = f"mark must be a Mark instance, not {type(mark)!r}." raise TypeError(msg) # TODO This API for transforms was a late decision, and previously Plot.add # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances. # It will take some work to refactor the internals so that Stat and Move are # treated identically, and until then well need to "unpack" the transforms # here and enforce limitations on the order / types. stat: Optional[Stat] move: Optional[List[Move]] error = False if not transforms: stat, move = None, None elif isinstance(transforms[0], Stat): stat = transforms[0] move = [m for m in transforms[1:] if isinstance(m, Move)] error = len(move) != len(transforms) - 1 else: stat = None move = [m for m in transforms if isinstance(m, Move)] error = len(move) != len(transforms) if error: msg = " ".join([ "Transforms must have at most one Stat type (in the first position),", "and all others must be a Move type. Given transform type(s):", ", ".join(str(type(t).__name__) for t in transforms) + "." ]) raise TypeError(msg) new = self._clone() new._layers.append({ "mark": mark, "stat": stat, "move": move, # TODO it doesn't work to supply scalars to variables, but it should "vars": variables, "source": data, "legend": legend, "label": label, "orient": {"v": "x", "h": "y"}.get(orient, orient), # type: ignore }) return new
Plot.add
seaborn
2
seaborn/_core/plot.py
def facet( self, col: VariableSpec = None, row: VariableSpec = None, order: OrderSpec | dict[str, OrderSpec] = None, wrap: int | None = None, ) -> Plot: """ Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only `col` or `row`, wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst """
/usr/src/app/target_test_cases/failed_tests_Plot.facet.txt
def facet( self, col: VariableSpec = None, row: VariableSpec = None, order: OrderSpec | dict[str, OrderSpec] = None, wrap: int | None = None, ) -> Plot: """ Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only `col` or `row`, wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst """ variables: dict[str, VariableSpec] = {} if col is not None: variables["col"] = col if row is not None: variables["row"] = row structure = {} if isinstance(order, dict): for dim in ["col", "row"]: dim_order = order.get(dim) if dim_order is not None: structure[dim] = list(dim_order) elif order is not None: if col is not None and row is not None: err = " ".join([ "When faceting on both col= and row=, passing `order` as a list" "is ambiguous. Use a dict with 'col' and/or 'row' keys instead." ]) raise RuntimeError(err) elif col is not None: structure["col"] = list(order) elif row is not None: structure["row"] = list(order) spec: FacetSpec = { "variables": variables, "structure": structure, "wrap": wrap, } new = self._clone() new._facet_spec.update(spec) return new
Plot.facet
seaborn
3
seaborn/_core/plot.py
def on(self, target: Axes | SubFigure | Figure) -> Plot: """ Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot` first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class:`matplotlib.figure.Figure` or :class:`matplotlib.figure.SubFigure`. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst """
/usr/src/app/target_test_cases/failed_tests_Plot.on.txt
def on(self, target: Axes | SubFigure | Figure) -> Plot: """ Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot` first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class:`matplotlib.figure.Figure` or :class:`matplotlib.figure.SubFigure`. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst """ accepted_types: tuple # Allow tuple of various length accepted_types = ( mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure ) accepted_types_str = ( f"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}" ) if not isinstance(target, accepted_types): err = ( f"The `Plot.on` target must be an instance of {accepted_types_str}. " f"You passed an instance of {target.__class__} instead." ) raise TypeError(err) new = self._clone() new._target = target return new
Plot.on
seaborn
4
seaborn/_core/plot.py
def pair( self, x: VariableSpecList = None, y: VariableSpecList = None, wrap: int | None = None, cross: bool = True, ) -> Plot: """ Produce subplots by pairing multiple `x` and/or `y` variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only `x` or `y`, "wrap" subplots across a two-dimensional grid with this many columns (when using `x`) or rows (when using `y`). cross : bool When False, zip the `x` and `y` lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst """
/usr/src/app/target_test_cases/failed_tests_Plot.pair.txt
def pair( self, x: VariableSpecList = None, y: VariableSpecList = None, wrap: int | None = None, cross: bool = True, ) -> Plot: """ Produce subplots by pairing multiple `x` and/or `y` variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only `x` or `y`, "wrap" subplots across a two-dimensional grid with this many columns (when using `x`) or rows (when using `y`). cross : bool When False, zip the `x` and `y` lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst """ # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows # This may also be possible by setting `wrap=1`, but is that too unobvious? # TODO PairGrid features not currently implemented: diagonals, corner pair_spec: PairSpec = {} axes = {"x": [] if x is None else x, "y": [] if y is None else y} for axis, arg in axes.items(): if isinstance(arg, (str, int)): err = f"You must pass a sequence of variable keys to `{axis}`" raise TypeError(err) pair_spec["variables"] = {} pair_spec["structure"] = {} for axis in "xy": keys = [] for i, col in enumerate(axes[axis]): key = f"{axis}{i}" keys.append(key) pair_spec["variables"][key] = col if keys: pair_spec["structure"][axis] = keys if not cross and len(axes["x"]) != len(axes["y"]): err = "Lengths of the `x` and `y` lists must match with cross=False" raise ValueError(err) pair_spec["cross"] = cross pair_spec["wrap"] = wrap new = self._clone() new._pair_spec.update(pair_spec) return new
Plot.pair
seaborn
5
seaborn/_base.py
def _attach( self, obj, allowed_types=None, log_scale=None, ): """Associate the plotter with an Axes manager and initialize its units. Parameters ---------- obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid` Structural object that we will eventually plot onto. allowed_types : str or list of str If provided, raise when either the x or y variable does not have one of the declared seaborn types. log_scale : bool, number, or pair of bools or numbers If not False, set the axes to use log scaling, with the given base or defaulting to 10. If a tuple, interpreted as separate arguments for the x and y axes. """
/usr/src/app/target_test_cases/failed_tests__base.VectorPlotter._attach.txt
def _attach( self, obj, allowed_types=None, log_scale=None, ): """Associate the plotter with an Axes manager and initialize its units. Parameters ---------- obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid` Structural object that we will eventually plot onto. allowed_types : str or list of str If provided, raise when either the x or y variable does not have one of the declared seaborn types. log_scale : bool, number, or pair of bools or numbers If not False, set the axes to use log scaling, with the given base or defaulting to 10. If a tuple, interpreted as separate arguments for the x and y axes. """ from .axisgrid import FacetGrid if isinstance(obj, FacetGrid): self.ax = None self.facets = obj ax_list = obj.axes.flatten() if obj.col_names is not None: self.var_levels["col"] = obj.col_names if obj.row_names is not None: self.var_levels["row"] = obj.row_names else: self.ax = obj self.facets = None ax_list = [obj] # Identify which "axis" variables we have defined axis_variables = set("xy").intersection(self.variables) # -- Verify the types of our x and y variables here. # This doesn't really make complete sense being here here, but it's a fine # place for it, given the current system. # (Note that for some plots, there might be more complicated restrictions) # e.g. the categorical plots have their own check that as specific to the # non-categorical axis. if allowed_types is None: allowed_types = ["numeric", "datetime", "categorical"] elif isinstance(allowed_types, str): allowed_types = [allowed_types] for var in axis_variables: var_type = self.var_types[var] if var_type not in allowed_types: err = ( f"The {var} variable is {var_type}, but one of " f"{allowed_types} is required" ) raise TypeError(err) # -- Get axis objects for each row in plot_data for type conversions and scaling facet_dim = {"x": "col", "y": "row"} self.converters = {} for var in axis_variables: other_var = {"x": "y", "y": "x"}[var] converter = pd.Series(index=self.plot_data.index, name=var, dtype=object) share_state = getattr(self.facets, f"_share{var}", True) # Simplest cases are that we have a single axes, all axes are shared, # or sharing is only on the orthogonal facet dimension. In these cases, # all datapoints get converted the same way, so use the first axis if share_state is True or share_state == facet_dim[other_var]: converter.loc[:] = getattr(ax_list[0], f"{var}axis") else: # Next simplest case is when no axes are shared, and we can # use the axis objects within each facet if share_state is False: for axes_vars, axes_data in self.iter_data(): ax = self._get_axes(axes_vars) converter.loc[axes_data.index] = getattr(ax, f"{var}axis") # In the more complicated case, the axes are shared within each # "file" of the facetgrid. In that case, we need to subset the data # for that file and assign it the first axis in the slice of the grid else: names = getattr(self.facets, f"{share_state}_names") for i, level in enumerate(names): idx = (i, 0) if share_state == "row" else (0, i) axis = getattr(self.facets.axes[idx], f"{var}axis") converter.loc[self.plot_data[share_state] == level] = axis # Store the converter vector, which we use elsewhere (e.g comp_data) self.converters[var] = converter # Now actually update the matplotlib objects to do the conversion we want grouped = self.plot_data[var].groupby(self.converters[var], sort=False) for converter, seed_data in grouped: if self.var_types[var] == "categorical": if self._var_ordered[var]: order = self.var_levels[var] else: order = None seed_data = categorical_order(seed_data, order) converter.update_units(seed_data) # -- Set numerical axis scales # First unpack the log_scale argument if log_scale is None: scalex = scaley = False else: # Allow single value or x, y tuple try: scalex, scaley = log_scale except TypeError: scalex = log_scale if self.var_types.get("x") == "numeric" else False scaley = log_scale if self.var_types.get("y") == "numeric" else False # Now use it for axis, scale in zip("xy", (scalex, scaley)): if scale: for ax in ax_list: set_scale = getattr(ax, f"set_{axis}scale") if scale is True: set_scale("log", nonpositive="mask") else: set_scale("log", base=scale, nonpositive="mask") # For categorical y, we want the "first" level to be at the top of the axis if self.var_types.get("y", None) == "categorical": for ax in ax_list: ax.yaxis.set_inverted(True) # TODO -- Add axes labels
VectorPlotter._attach
seaborn
6
seaborn/_base.py
def iter_data( self, grouping_vars=None, *, reverse=False, from_comp_data=False, by_facet=True, allow_empty=False, dropna=True, ): """Generator for getting subsets of data defined by semantic variables. Also injects "col" and "row" into grouping semantics. Parameters ---------- grouping_vars : string or list of strings Semantic variables that define the subsets of data. reverse : bool If True, reverse the order of iteration. from_comp_data : bool If True, use self.comp_data rather than self.plot_data by_facet : bool If True, add faceting variables to the set of grouping variables. allow_empty : bool If True, yield an empty dataframe when no observations exist for combinations of grouping variables. dropna : bool If True, remove rows with missing data. Yields ------ sub_vars : dict Keys are semantic names, values are the level of that semantic. sub_data : :class:`pandas.DataFrame` Subset of ``plot_data`` for this combination of semantic values. """
/usr/src/app/target_test_cases/failed_tests_VectorPlotter.iter_data.txt
def iter_data( self, grouping_vars=None, *, reverse=False, from_comp_data=False, by_facet=True, allow_empty=False, dropna=True, ): """Generator for getting subsets of data defined by semantic variables. Also injects "col" and "row" into grouping semantics. Parameters ---------- grouping_vars : string or list of strings Semantic variables that define the subsets of data. reverse : bool If True, reverse the order of iteration. from_comp_data : bool If True, use self.comp_data rather than self.plot_data by_facet : bool If True, add faceting variables to the set of grouping variables. allow_empty : bool If True, yield an empty dataframe when no observations exist for combinations of grouping variables. dropna : bool If True, remove rows with missing data. Yields ------ sub_vars : dict Keys are semantic names, values are the level of that semantic. sub_data : :class:`pandas.DataFrame` Subset of ``plot_data`` for this combination of semantic values. """ # TODO should this default to using all (non x/y?) semantics? # or define grouping vars somewhere? if grouping_vars is None: grouping_vars = [] elif isinstance(grouping_vars, str): grouping_vars = [grouping_vars] elif isinstance(grouping_vars, tuple): grouping_vars = list(grouping_vars) # Always insert faceting variables if by_facet: facet_vars = {"col", "row"} grouping_vars.extend( facet_vars & set(self.variables) - set(grouping_vars) ) # Reduce to the semantics used in this plot grouping_vars = [var for var in grouping_vars if var in self.variables] if from_comp_data: data = self.comp_data else: data = self.plot_data if dropna: data = data.dropna() levels = self.var_levels.copy() if from_comp_data: for axis in {"x", "y"} & set(grouping_vars): converter = self.converters[axis].iloc[0] if self.var_types[axis] == "categorical": if self._var_ordered[axis]: # If the axis is ordered, then the axes in a possible # facet grid are by definition "shared", or there is a # single axis with a unique cat -> idx mapping. # So we can just take the first converter object. levels[axis] = converter.convert_units(levels[axis]) else: # Otherwise, the mappings may not be unique, but we can # use the unique set of index values in comp_data. levels[axis] = np.sort(data[axis].unique()) else: transform = converter.get_transform().transform levels[axis] = transform(converter.convert_units(levels[axis])) if grouping_vars: grouped_data = data.groupby( grouping_vars, sort=False, as_index=False, observed=False, ) grouping_keys = [] for var in grouping_vars: key = levels.get(var) grouping_keys.append([] if key is None else key) iter_keys = itertools.product(*grouping_keys) if reverse: iter_keys = reversed(list(iter_keys)) for key in iter_keys: pd_key = ( key[0] if len(key) == 1 and _version_predates(pd, "2.2.0") else key ) try: data_subset = grouped_data.get_group(pd_key) except KeyError: # XXX we are adding this to allow backwards compatibility # with the empty artists that old categorical plots would # add (before 0.12), which we may decide to break, in which # case this option could be removed data_subset = data.loc[[]] if data_subset.empty and not allow_empty: continue sub_vars = dict(zip(grouping_vars, key)) yield sub_vars, data_subset.copy() else: yield {}, data.copy()
VectorPlotter.iter_data
seaborn
7
seaborn/_base.py
def scale_categorical(self, axis, order=None, formatter=None): """ Enforce categorical (fixed-scale) rules for the data on given axis. Parameters ---------- axis : "x" or "y" Axis of the plot to operate on. order : list Order that unique values should appear in. formatter : callable Function mapping values to a string representation. Returns ------- self """
/usr/src/app/target_test_cases/failed_tests__base.VectorPlotter.scale_categorical.txt
def scale_categorical(self, axis, order=None, formatter=None): """ Enforce categorical (fixed-scale) rules for the data on given axis. Parameters ---------- axis : "x" or "y" Axis of the plot to operate on. order : list Order that unique values should appear in. formatter : callable Function mapping values to a string representation. Returns ------- self """ # This method both modifies the internal representation of the data # (converting it to string) and sets some attributes on self. It might be # a good idea to have a separate object attached to self that contains the # information in those attributes (i.e. whether to enforce variable order # across facets, the order to use) similar to the SemanticMapping objects # we have for semantic variables. That object could also hold the converter # objects that get used, if we can decouple those from an existing axis # (cf. https://github.com/matplotlib/matplotlib/issues/19229). # There are some interactions with faceting information that would need # to be thought through, since the converts to use depend on facets. # If we go that route, these methods could become "borrowed" methods similar # to what happens with the alternate semantic mapper constructors, although # that approach is kind of fussy and confusing. # TODO this method could also set the grid state? Since we like to have no # grid on the categorical axis by default. Again, a case where we'll need to # store information until we use it, so best to have a way to collect the # attributes that this method sets. # TODO if we are going to set visual properties of the axes with these methods, # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis # TODO another, and distinct idea, is to expose a cut= param here _check_argument("axis", ["x", "y"], axis) # Categorical plots can be "univariate" in which case they get an anonymous # category label on the opposite axis. if axis not in self.variables: self.variables[axis] = None self.var_types[axis] = "categorical" self.plot_data[axis] = "" # If the "categorical" variable has a numeric type, sort the rows so that # the default result from categorical_order has those values sorted after # they have been coerced to strings. The reason for this is so that later # we can get facet-wise orders that are correct. # XXX Should this also sort datetimes? # It feels more consistent, but technically will be a default change # If so, should also change categorical_order to behave that way if self.var_types[axis] == "numeric": self.plot_data = self.plot_data.sort_values(axis, kind="mergesort") # Now get a reference to the categorical data vector and remove na values cat_data = self.plot_data[axis].dropna() # Get the initial categorical order, which we do before string # conversion to respect the original types of the order list. # Track whether the order is given explicitly so that we can know # whether or not to use the order constructed here downstream self._var_ordered[axis] = order is not None or cat_data.dtype.name == "category" order = pd.Index(categorical_order(cat_data, order), name=axis) # Then convert data to strings. This is because in matplotlib, # "categorical" data really mean "string" data, so doing this artists # will be drawn on the categorical axis with a fixed scale. # TODO implement formatter here; check that it returns strings? if formatter is not None: cat_data = cat_data.map(formatter) order = order.map(formatter) else: cat_data = cat_data.astype(str) order = order.astype(str) # Update the levels list with the type-converted order variable self.var_levels[axis] = order # Now ensure that seaborn will use categorical rules internally self.var_types[axis] = "categorical" # Put the string-typed categorical vector back into the plot_data structure self.plot_data[axis] = cat_data return self
VectorPlotter.scale_categorical
seaborn
8
seaborn/_base.py
def categorical_order(vector, order=None): """Return a list of unique data values. Determine an ordered list of levels in ``values``. Parameters ---------- vector : list, array, Categorical, or Series Vector of "categorical" values order : list-like, optional Desired order of category levels to override the order determined from the ``values`` object. Returns ------- order : list Ordered list of category levels not including null values. """
/usr/src/app/target_test_cases/failed_tests__base.categorical_order.txt
def categorical_order(vector, order=None): """Return a list of unique data values. Determine an ordered list of levels in ``values``. Parameters ---------- vector : list, array, Categorical, or Series Vector of "categorical" values order : list-like, optional Desired order of category levels to override the order determined from the ``values`` object. Returns ------- order : list Ordered list of category levels not including null values. """ if order is None: if hasattr(vector, "categories"): order = vector.categories else: try: order = vector.cat.categories except (TypeError, AttributeError): order = pd.Series(vector).unique() if variable_type(vector) == "numeric": order = np.sort(order) order = filter(pd.notnull, order) return list(order)
_base.categorical_order
seaborn
9
seaborn/_base.py
def infer_orient(x=None, y=None, orient=None, require_numeric=True): """Determine how the plot should be oriented based on the data. For historical reasons, the convention is to call a plot "horizontally" or "vertically" oriented based on the axis representing its dependent variable. Practically, this is used when determining the axis for numerical aggregation. Parameters ---------- x, y : Vector data or None Positional data vectors for the plot. orient : string or None Specified orientation. If not None, can be "x" or "y", or otherwise must start with "v" or "h". require_numeric : bool If set, raise when the implied dependent variable is not numeric. Returns ------- orient : "x" or "y" Raises ------ ValueError: When `orient` is an unknown string. TypeError: When dependent variable is not numeric, with `require_numeric` """
/usr/src/app/target_test_cases/failed_tests_infer_orient.txt
def infer_orient(x=None, y=None, orient=None, require_numeric=True): """Determine how the plot should be oriented based on the data. For historical reasons, the convention is to call a plot "horizontally" or "vertically" oriented based on the axis representing its dependent variable. Practically, this is used when determining the axis for numerical aggregation. Parameters ---------- x, y : Vector data or None Positional data vectors for the plot. orient : string or None Specified orientation. If not None, can be "x" or "y", or otherwise must start with "v" or "h". require_numeric : bool If set, raise when the implied dependent variable is not numeric. Returns ------- orient : "x" or "y" Raises ------ ValueError: When `orient` is an unknown string. TypeError: When dependent variable is not numeric, with `require_numeric` """ x_type = None if x is None else variable_type(x) y_type = None if y is None else variable_type(y) nonnumeric_dv_error = "{} orientation requires numeric `{}` variable." single_var_warning = "{} orientation ignored with only `{}` specified." if x is None: if str(orient).startswith("h"): warnings.warn(single_var_warning.format("Horizontal", "y")) if require_numeric and y_type != "numeric": raise TypeError(nonnumeric_dv_error.format("Vertical", "y")) return "x" elif y is None: if str(orient).startswith("v"): warnings.warn(single_var_warning.format("Vertical", "x")) if require_numeric and x_type != "numeric": raise TypeError(nonnumeric_dv_error.format("Horizontal", "x")) return "y" elif str(orient).startswith("v") or orient == "x": if require_numeric and y_type != "numeric": raise TypeError(nonnumeric_dv_error.format("Vertical", "y")) return "x" elif str(orient).startswith("h") or orient == "y": if require_numeric and x_type != "numeric": raise TypeError(nonnumeric_dv_error.format("Horizontal", "x")) return "y" elif orient is not None: err = ( "`orient` must start with 'v' or 'h' or be None, " f"but `{repr(orient)}` was passed." ) raise ValueError(err) elif x_type != "categorical" and y_type == "categorical": return "y" elif x_type != "numeric" and y_type == "numeric": return "x" elif x_type == "numeric" and y_type != "numeric": return "y" elif require_numeric and "numeric" not in (x_type, y_type): err = "Neither the `x` nor `y` variable appears to be numeric." raise TypeError(err) else: return "x"
_base.infer_orient
seaborn
10
seaborn/_base.py
def unique_dashes(n): """Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. """
/usr/src/app/target_test_cases/failed_tests__base.unique_dashes.txt
def unique_dashes(n): """Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. """ # Start with dash specs that are well distinguishable dashes = [ "", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1), ] # Now programmatically build as many as we need p = 3 while len(dashes) < n: # Take combinations of long and short dashes a = itertools.combinations_with_replacement([3, 1.25], p) b = itertools.combinations_with_replacement([4, 1], p) # Interleave the combinations, reversing one of the streams segment_list = itertools.chain(*zip( list(a)[1:-1][::-1], list(b)[1:-1] )) # Now insert the gaps for segments in segment_list: gap = min(segments) spec = tuple(itertools.chain(*((seg, gap) for seg in segments))) dashes.append(spec) p += 1 return dashes[:n]
_base.unique_dashes
seaborn
11
seaborn/_base.py
def unique_markers(n): """Build an arbitrarily long list of unique marker styles for points. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. """
/usr/src/app/target_test_cases/failed_tests_unique_markers.txt
def unique_markers(n): """Build an arbitrarily long list of unique marker styles for points. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. """ # Start with marker specs that are well distinguishable markers = [ "o", "X", (4, 0, 45), "P", (4, 0, 0), (4, 1, 0), "^", (4, 1, 45), "v", ] # Now generate more from regular polygons of increasing order s = 5 while len(markers) < n: a = 360 / (s + 1) / 2 markers.extend([ (s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0), ]) s += 1 # Convert to MarkerStyle object, using only exactly what we need # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]] return markers[:n]
_base.unique_markers
seaborn
12
seaborn/_base.py
def variable_type(vector, boolean_type="numeric"): """ Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in two ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric' or 'categorical' Type to use for vectors containing only 0s and 1s (and NAs). Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """
/usr/src/app/target_test_cases/failed_tests_variable_type.txt
def variable_type(vector, boolean_type="numeric"): """ Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in two ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric' or 'categorical' Type to use for vectors containing only 0s and 1s (and NAs). Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """ vector = pd.Series(vector) # If a categorical dtype is set, infer categorical if isinstance(vector.dtype, pd.CategoricalDtype): return VariableType("categorical") # Special-case all-na data, which is always "numeric" if pd.isna(vector).all(): return VariableType("numeric") # At this point, drop nans to simplify further type inference vector = vector.dropna() # Special-case binary/boolean data, allow caller to determine # This triggers a numpy warning when vector has strings/objects # https://github.com/numpy/numpy/issues/6784 # Because we reduce with .all(), we are agnostic about whether the # comparison returns a scalar or vector, so we will ignore the warning. # It triggers a separate DeprecationWarning when the vector has datetimes: # https://github.com/numpy/numpy/issues/13548 # This is considered a bug by numpy and will likely go away. with warnings.catch_warnings(): warnings.simplefilter( action='ignore', category=(FutureWarning, DeprecationWarning) ) try: if np.isin(vector, [0, 1]).all(): return VariableType(boolean_type) except TypeError: # .isin comparison is not guaranteed to be possible under NumPy # casting rules, depending on the (unknown) dtype of 'vector' pass # Defer to positive pandas tests if pd.api.types.is_numeric_dtype(vector): return VariableType("numeric") if pd.api.types.is_datetime64_dtype(vector): return VariableType("datetime") # --- If we get to here, we need to check the entries # Check for a collection where everything is a number def all_numeric(x): for x_i in x: if not isinstance(x_i, Number): return False return True if all_numeric(vector): return VariableType("numeric") # Check for a collection where everything is a datetime def all_datetime(x): for x_i in x: if not isinstance(x_i, (datetime, np.datetime64)): return False return True if all_datetime(vector): return VariableType("datetime") # Otherwise, our final fallback is to consider things categorical return VariableType("categorical")
_base.variable_type
seaborn
13
seaborn/_statistics.py
def __init__(self, k_depth, outlier_prop, trust_alpha): """ Compute percentiles of a distribution using various tail stopping rules. Parameters ---------- k_depth: "tukey", "proportion", "trustworthy", or "full" Stopping rule for choosing tail percentiled to show: - tukey: Show a similar number of outliers as in a conventional boxplot. - proportion: Show approximately `outlier_prop` outliers. - trust_alpha: Use `trust_alpha` level for most extreme tail percentile. outlier_prop: float Parameter for `k_depth="proportion"` setting the expected outlier rate. trust_alpha: float Parameter for `k_depth="trustworthy"` setting the confidence threshold. Notes ----- Based on the proposal in this paper: https://vita.had.co.nz/papers/letter-value-plot.pdf """
/usr/src/app/target_test_cases/failed_tests__statistics.LetterValues.__init__.txt
def __init__(self, k_depth, outlier_prop, trust_alpha): """ Compute percentiles of a distribution using various tail stopping rules. Parameters ---------- k_depth: "tukey", "proportion", "trustworthy", or "full" Stopping rule for choosing tail percentiled to show: - tukey: Show a similar number of outliers as in a conventional boxplot. - proportion: Show approximately `outlier_prop` outliers. - trust_alpha: Use `trust_alpha` level for most extreme tail percentile. outlier_prop: float Parameter for `k_depth="proportion"` setting the expected outlier rate. trust_alpha: float Parameter for `k_depth="trustworthy"` setting the confidence threshold. Notes ----- Based on the proposal in this paper: https://vita.had.co.nz/papers/letter-value-plot.pdf """ k_options = ["tukey", "proportion", "trustworthy", "full"] if isinstance(k_depth, str): _check_argument("k_depth", k_options, k_depth) elif not isinstance(k_depth, int): err = ( "The `k_depth` parameter must be either an integer or string " f"(one of {k_options}), not {k_depth!r}." ) raise TypeError(err) self.k_depth = k_depth self.outlier_prop = outlier_prop self.trust_alpha = trust_alpha
_statistics.LetterValues.__init__
seaborn
14
seaborn/_statistics.py
def __init__(self, estimator, errorbar=None, **boot_kws): """ Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only "mean". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is "ci". boot_kws Additional keywords are passed to bootstrap when error_method is "ci". """
/usr/src/app/target_test_cases/failed_tests__statistics.WeightedAggregator.__init__.txt
def __init__(self, estimator, errorbar=None, **boot_kws): """ Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only "mean". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is "ci". boot_kws Additional keywords are passed to bootstrap when error_method is "ci". """ if estimator != "mean": # Note that, while other weighted estimators may make sense (e.g. median), # I'm not aware of an implementation in our dependencies. We can add one # in seaborn later, if there is sufficient interest. For now, limit to mean. raise ValueError(f"Weighted estimator must be 'mean', not {estimator!r}.") self.estimator = estimator method, level = _validate_errorbar_arg(errorbar) if method is not None and method != "ci": # As with the estimator, weighted 'sd' or 'pi' error bars may make sense. # But we'll keep things simple for now and limit to (bootstrap) CI. raise ValueError(f"Error bar method must be 'ci', not {method!r}.") self.error_method = method self.error_level = level self.boot_kws = boot_kws
_statistics.WeightedAggregator.__init__
seaborn
15
seaborn/algorithms.py
def bootstrap(*args, **kwargs): """Resample one or more arrays with replacement and store aggregate values. Positional arguments are a sequence of arrays to bootstrap along the first axis and pass to a summary function. Keyword arguments: n_boot : int, default=10000 Number of iterations axis : int, default=None Will pass axis to ``func`` as a keyword argument. units : array, default=None Array of sampling unit IDs. When used the bootstrap resamples units and then observations within units instead of individual datapoints. func : string or callable, default="mean" Function to call on the args that are passed in. If string, uses as name of function in the numpy namespace. If nans are present in the data, will try to use nan-aware version of named function. seed : Generator | SeedSequence | RandomState | int | None Seed for the random number generator; useful if you want reproducible resamples. Returns ------- boot_dist: array array of bootstrapped statistic values """
/usr/src/app/target_test_cases/failed_tests_algorithms.bootstrap.txt
def bootstrap(*args, **kwargs): """Resample one or more arrays with replacement and store aggregate values. Positional arguments are a sequence of arrays to bootstrap along the first axis and pass to a summary function. Keyword arguments: n_boot : int, default=10000 Number of iterations axis : int, default=None Will pass axis to ``func`` as a keyword argument. units : array, default=None Array of sampling unit IDs. When used the bootstrap resamples units and then observations within units instead of individual datapoints. func : string or callable, default="mean" Function to call on the args that are passed in. If string, uses as name of function in the numpy namespace. If nans are present in the data, will try to use nan-aware version of named function. seed : Generator | SeedSequence | RandomState | int | None Seed for the random number generator; useful if you want reproducible resamples. Returns ------- boot_dist: array array of bootstrapped statistic values """ # Ensure list of arrays are same length if len(np.unique(list(map(len, args)))) > 1: raise ValueError("All input arrays must have the same length") n = len(args[0]) # Default keyword arguments n_boot = kwargs.get("n_boot", 10000) func = kwargs.get("func", "mean") axis = kwargs.get("axis", None) units = kwargs.get("units", None) random_seed = kwargs.get("random_seed", None) if random_seed is not None: msg = "`random_seed` has been renamed to `seed` and will be removed" warnings.warn(msg) seed = kwargs.get("seed", random_seed) if axis is None: func_kwargs = dict() else: func_kwargs = dict(axis=axis) # Initialize the resampler if isinstance(seed, np.random.RandomState): rng = seed else: rng = np.random.default_rng(seed) # Coerce to arrays args = list(map(np.asarray, args)) if units is not None: units = np.asarray(units) if isinstance(func, str): # Allow named numpy functions f = getattr(np, func) # Try to use nan-aware version of function if necessary missing_data = np.isnan(np.sum(np.column_stack(args))) if missing_data and not func.startswith("nan"): nanf = getattr(np, f"nan{func}", None) if nanf is None: msg = f"Data contain nans but no nan-aware version of `{func}` found" warnings.warn(msg, UserWarning) else: f = nanf else: f = func # Handle numpy changes try: integers = rng.integers except AttributeError: integers = rng.randint # Do the bootstrap if units is not None: return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers) boot_dist = [] for i in range(int(n_boot)): resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype sample = [a.take(resampler, axis=0) for a in args] boot_dist.append(f(*sample, **func_kwargs)) return np.array(boot_dist)
algorithms.bootstrap
seaborn
16
seaborn/axisgrid.py
def facet_data(self): """Generator for name indices and data subsets for each facet. Yields ------ (i, j, k), data_ijk : tuple of ints, DataFrame The ints provide an index into the {row, col, hue}_names attribute, and the dataframe contains a subset of the full data corresponding to each facet. The generator yields subsets that correspond with the self.axes.flat iterator, or self.axes[i, j] when `col_wrap` is None. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.FacetGrid.facet_data.txt
def facet_data(self): """Generator for name indices and data subsets for each facet. Yields ------ (i, j, k), data_ijk : tuple of ints, DataFrame The ints provide an index into the {row, col, hue}_names attribute, and the dataframe contains a subset of the full data corresponding to each facet. The generator yields subsets that correspond with the self.axes.flat iterator, or self.axes[i, j] when `col_wrap` is None. """ data = self.data # Construct masks for the row variable if self.row_names: row_masks = [data[self._row_var] == n for n in self.row_names] else: row_masks = [np.repeat(True, len(self.data))] # Construct masks for the column variable if self.col_names: col_masks = [data[self._col_var] == n for n in self.col_names] else: col_masks = [np.repeat(True, len(self.data))] # Construct masks for the hue variable if self.hue_names: hue_masks = [data[self._hue_var] == n for n in self.hue_names] else: hue_masks = [np.repeat(True, len(self.data))] # Here is the main generator loop for (i, row), (j, col), (k, hue) in product(enumerate(row_masks), enumerate(col_masks), enumerate(hue_masks)): data_ijk = data[row & col & hue & self._not_na] yield (i, j, k), data_ijk
axisgrid.FacetGrid.facet_data
seaborn
17
seaborn/axisgrid.py
def map(self, func, *args, **kwargs): """Apply a plotting function to each facet's subset of the data. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. It must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. args : strings Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : object Returns self. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.FacetGrid.map.txt
def map(self, func, *args, **kwargs): """Apply a plotting function to each facet's subset of the data. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. It must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. args : strings Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : object Returns self. """ # If color was a keyword argument, grab it here kw_color = kwargs.pop("color", None) # How we use the function depends on where it comes from func_module = str(getattr(func, "__module__", "")) # Check for categorical plots without order information if func_module == "seaborn.categorical": if "order" not in kwargs: warning = ("Using the {} function without specifying " "`order` is likely to produce an incorrect " "plot.".format(func.__name__)) warnings.warn(warning) if len(args) == 3 and "hue_order" not in kwargs: warning = ("Using the {} function without specifying " "`hue_order` is likely to produce an incorrect " "plot.".format(func.__name__)) warnings.warn(warning) # Iterate over the data subsets for (row_i, col_j, hue_k), data_ijk in self.facet_data(): # If this subset is null, move on if not data_ijk.values.size: continue # Get the current axis modify_state = not func_module.startswith("seaborn") ax = self.facet_axis(row_i, col_j, modify_state) # Decide what color to plot with kwargs["color"] = self._facet_color(hue_k, kw_color) # Insert the other hue aesthetics if appropriate for kw, val_list in self.hue_kws.items(): kwargs[kw] = val_list[hue_k] # Insert a label in the keyword arguments for the legend if self._hue_var is not None: kwargs["label"] = utils.to_utf8(self.hue_names[hue_k]) # Get the actual data we are going to plot with plot_data = data_ijk[list(args)] if self._dropna: plot_data = plot_data.dropna() plot_args = [v for k, v in plot_data.items()] # Some matplotlib functions don't handle pandas objects correctly if func_module.startswith("matplotlib"): plot_args = [v.values for v in plot_args] # Draw the plot self._facet_plot(func, ax, plot_args, kwargs) # Finalize the annotations and layout self._finalize_grid(args[:2]) return self
axisgrid.FacetGrid.map
seaborn
18
seaborn/axisgrid.py
def map_dataframe(self, func, *args, **kwargs): """Like ``.map`` but passes args as strings and inserts data in kwargs. This method is suitable for plotting with functions that accept a long-form DataFrame as a `data` keyword argument and access the data in that DataFrame using string variable names. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. Unlike the `map` method, a function used here must "understand" Pandas objects. It also must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. args : strings Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : object Returns self. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.FacetGrid.map_dataframe.txt
def map_dataframe(self, func, *args, **kwargs): """Like ``.map`` but passes args as strings and inserts data in kwargs. This method is suitable for plotting with functions that accept a long-form DataFrame as a `data` keyword argument and access the data in that DataFrame using string variable names. Parameters ---------- func : callable A plotting function that takes data and keyword arguments. Unlike the `map` method, a function used here must "understand" Pandas objects. It also must plot to the currently active matplotlib Axes and take a `color` keyword argument. If faceting on the `hue` dimension, it must also take a `label` keyword argument. args : strings Column names in self.data that identify variables with data to plot. The data for each variable is passed to `func` in the order the variables are specified in the call. kwargs : keyword arguments All keyword arguments are passed to the plotting function. Returns ------- self : object Returns self. """ # If color was a keyword argument, grab it here kw_color = kwargs.pop("color", None) # Iterate over the data subsets for (row_i, col_j, hue_k), data_ijk in self.facet_data(): # If this subset is null, move on if not data_ijk.values.size: continue # Get the current axis modify_state = not str(func.__module__).startswith("seaborn") ax = self.facet_axis(row_i, col_j, modify_state) # Decide what color to plot with kwargs["color"] = self._facet_color(hue_k, kw_color) # Insert the other hue aesthetics if appropriate for kw, val_list in self.hue_kws.items(): kwargs[kw] = val_list[hue_k] # Insert a label in the keyword arguments for the legend if self._hue_var is not None: kwargs["label"] = self.hue_names[hue_k] # Stick the facet dataframe into the kwargs if self._dropna: data_ijk = data_ijk.dropna() kwargs["data"] = data_ijk # Draw the plot self._facet_plot(func, ax, args, kwargs) # For axis labels, prefer to use positional args for backcompat # but also extract the x/y kwargs and use if no corresponding arg axis_labels = [kwargs.get("x", None), kwargs.get("y", None)] for i, val in enumerate(args[:2]): axis_labels[i] = val self._finalize_grid(axis_labels) return self
axisgrid.FacetGrid.map_dataframe
seaborn
19
seaborn/axisgrid.py
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws): """Add a reference line(s) to each facet. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. color : :mod:`matplotlib color <matplotlib.colors>` Specifies the color of the reference line(s). Pass ``color=None`` to use ``hue`` mapping. linestyle : str Specifies the style of the reference line(s). line_kws : key, value mappings Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline` when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y`` is not None. Returns ------- :class:`FacetGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.FacetGrid.refline.txt
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws): """Add a reference line(s) to each facet. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. color : :mod:`matplotlib color <matplotlib.colors>` Specifies the color of the reference line(s). Pass ``color=None`` to use ``hue`` mapping. linestyle : str Specifies the style of the reference line(s). line_kws : key, value mappings Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline` when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y`` is not None. Returns ------- :class:`FacetGrid` instance Returns ``self`` for easy method chaining. """ line_kws['color'] = color line_kws['linestyle'] = linestyle if x is not None: self.map(plt.axvline, x=x, **line_kws) if y is not None: self.map(plt.axhline, y=y, **line_kws) return self
axisgrid.FacetGrid.refline
seaborn
20
seaborn/axisgrid.py
def set_titles(self, template=None, row_template=None, col_template=None, **kwargs): """Draw titles either above each facet or on the grid margins. Parameters ---------- template : string Template for all titles with the formatting keys {col_var} and {col_name} (if using a `col` faceting variable) and/or {row_var} and {row_name} (if using a `row` faceting variable). row_template: Template for the row variable when titles are drawn on the grid margins. Must have {row_var} and {row_name} formatting keys. col_template: Template for the column variable when titles are drawn on the grid margins. Must have {col_var} and {col_name} formatting keys. Returns ------- self: object Returns self. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.FacetGrid.set_titles.txt
def set_titles(self, template=None, row_template=None, col_template=None, **kwargs): """Draw titles either above each facet or on the grid margins. Parameters ---------- template : string Template for all titles with the formatting keys {col_var} and {col_name} (if using a `col` faceting variable) and/or {row_var} and {row_name} (if using a `row` faceting variable). row_template: Template for the row variable when titles are drawn on the grid margins. Must have {row_var} and {row_name} formatting keys. col_template: Template for the column variable when titles are drawn on the grid margins. Must have {col_var} and {col_name} formatting keys. Returns ------- self: object Returns self. """ args = dict(row_var=self._row_var, col_var=self._col_var) kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"]) # Establish default templates if row_template is None: row_template = "{row_var} = {row_name}" if col_template is None: col_template = "{col_var} = {col_name}" if template is None: if self._row_var is None: template = col_template elif self._col_var is None: template = row_template else: template = " | ".join([row_template, col_template]) row_template = utils.to_utf8(row_template) col_template = utils.to_utf8(col_template) template = utils.to_utf8(template) if self._margin_titles: # Remove any existing title texts for text in self._margin_titles_texts: text.remove() self._margin_titles_texts = [] if self.row_names is not None: # Draw the row titles on the right edge of the grid for i, row_name in enumerate(self.row_names): ax = self.axes[i, -1] args.update(dict(row_name=row_name)) title = row_template.format(**args) text = ax.annotate( title, xy=(1.02, .5), xycoords="axes fraction", rotation=270, ha="left", va="center", **kwargs ) self._margin_titles_texts.append(text) if self.col_names is not None: # Draw the column titles as normal titles for j, col_name in enumerate(self.col_names): args.update(dict(col_name=col_name)) title = col_template.format(**args) self.axes[0, j].set_title(title, **kwargs) return self # Otherwise title each facet with all the necessary information if (self._row_var is not None) and (self._col_var is not None): for i, row_name in enumerate(self.row_names): for j, col_name in enumerate(self.col_names): args.update(dict(row_name=row_name, col_name=col_name)) title = template.format(**args) self.axes[i, j].set_title(title, **kwargs) elif self.row_names is not None and len(self.row_names): for i, row_name in enumerate(self.row_names): args.update(dict(row_name=row_name)) title = template.format(**args) self.axes[i, 0].set_title(title, **kwargs) elif self.col_names is not None and len(self.col_names): for i, col_name in enumerate(self.col_names): args.update(dict(col_name=col_name)) title = template.format(**args) # Index the flat array so col_wrap works self.axes.flat[i].set_title(title, **kwargs) return self
axisgrid.FacetGrid.set_titles
seaborn
21
seaborn/axisgrid.py
def add_legend(self, legend_data=None, title=None, label_order=None, adjust_subtitles=False, **kwargs): """Draw a legend, maybe placing it outside axes and resizing the figure. Parameters ---------- legend_data : dict Dictionary mapping label names (or two-element tuples where the second element is a label name) to matplotlib artist handles. The default reads from ``self._legend_data``. title : string Title for the legend. The default reads from ``self._hue_var``. label_order : list of labels The order that the legend entries should appear in. The default reads from ``self.hue_names``. adjust_subtitles : bool If True, modify entries with invisible artists to left-align the labels and set the font size to that of a title. kwargs : key, value pairings Other keyword arguments are passed to the underlying legend methods on the Figure or Axes object. Returns ------- self : Grid instance Returns self for easy chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.Grid.add_legend.txt
def add_legend(self, legend_data=None, title=None, label_order=None, adjust_subtitles=False, **kwargs): """Draw a legend, maybe placing it outside axes and resizing the figure. Parameters ---------- legend_data : dict Dictionary mapping label names (or two-element tuples where the second element is a label name) to matplotlib artist handles. The default reads from ``self._legend_data``. title : string Title for the legend. The default reads from ``self._hue_var``. label_order : list of labels The order that the legend entries should appear in. The default reads from ``self.hue_names``. adjust_subtitles : bool If True, modify entries with invisible artists to left-align the labels and set the font size to that of a title. kwargs : key, value pairings Other keyword arguments are passed to the underlying legend methods on the Figure or Axes object. Returns ------- self : Grid instance Returns self for easy chaining. """ # Find the data for the legend if legend_data is None: legend_data = self._legend_data if label_order is None: if self.hue_names is None: label_order = list(legend_data.keys()) else: label_order = list(map(utils.to_utf8, self.hue_names)) blank_handle = mpl.patches.Patch(alpha=0, linewidth=0) handles = [legend_data.get(lab, blank_handle) for lab in label_order] title = self._hue_var if title is None else title title_size = mpl.rcParams["legend.title_fontsize"] # Unpack nested labels from a hierarchical legend labels = [] for entry in label_order: if isinstance(entry, tuple): _, label = entry else: label = entry labels.append(label) # Set default legend kwargs kwargs.setdefault("scatterpoints", 1) if self._legend_out: kwargs.setdefault("frameon", False) kwargs.setdefault("loc", "center right") # Draw a full-figure legend outside the grid figlegend = self._figure.legend(handles, labels, **kwargs) self._legend = figlegend figlegend.set_title(title, prop={"size": title_size}) if adjust_subtitles: adjust_legend_subtitles(figlegend) # Draw the plot to set the bounding boxes correctly _draw_figure(self._figure) # Calculate and set the new width of the figure so the legend fits legend_width = figlegend.get_window_extent().width / self._figure.dpi fig_width, fig_height = self._figure.get_size_inches() self._figure.set_size_inches(fig_width + legend_width, fig_height) # Draw the plot again to get the new transformations _draw_figure(self._figure) # Now calculate how much space we need on the right side legend_width = figlegend.get_window_extent().width / self._figure.dpi space_needed = legend_width / (fig_width + legend_width) margin = .04 if self._margin_titles else .01 self._space_needed = margin + space_needed right = 1 - self._space_needed # Place the subplot axes to give space for the legend self._figure.subplots_adjust(right=right) self._tight_layout_rect[2] = right else: # Draw a legend in the first axis ax = self.axes.flat[0] kwargs.setdefault("loc", "best") leg = ax.legend(handles, labels, **kwargs) leg.set_title(title, prop={"size": title_size}) self._legend = leg if adjust_subtitles: adjust_legend_subtitles(leg) return self
axisgrid.Grid.add_legend
seaborn
22
seaborn/axisgrid.py
def tick_params(self, axis='both', **kwargs): """Modify the ticks, tick labels, and gridlines. Parameters ---------- axis : {'x', 'y', 'both'} The axis on which to apply the formatting. kwargs : keyword arguments Additional keyword arguments to pass to :meth:`matplotlib.axes.Axes.tick_params`. Returns ------- self : Grid instance Returns self for easy chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.Grid.tick_params.txt
def tick_params(self, axis='both', **kwargs): """Modify the ticks, tick labels, and gridlines. Parameters ---------- axis : {'x', 'y', 'both'} The axis on which to apply the formatting. kwargs : keyword arguments Additional keyword arguments to pass to :meth:`matplotlib.axes.Axes.tick_params`. Returns ------- self : Grid instance Returns self for easy chaining. """ for ax in self.figure.axes: ax.tick_params(axis=axis, **kwargs) return self
axisgrid.Grid.tick_params
seaborn
23
seaborn/axisgrid.py
def plot(self, joint_func, marginal_func, **kwargs): """Draw the plot by passing functions for joint and marginal axes. This method passes the ``kwargs`` dictionary to both functions. If you need more control, call :meth:`JointGrid.plot_joint` and :meth:`JointGrid.plot_marginals` directly with specific parameters. Parameters ---------- joint_func, marginal_func : callables Functions to draw the bivariate and univariate plots. See methods referenced above for information about the required characteristics of these functions. kwargs Additional keyword arguments are passed to both functions. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.JointGrid.plot.txt
def plot(self, joint_func, marginal_func, **kwargs): """Draw the plot by passing functions for joint and marginal axes. This method passes the ``kwargs`` dictionary to both functions. If you need more control, call :meth:`JointGrid.plot_joint` and :meth:`JointGrid.plot_marginals` directly with specific parameters. Parameters ---------- joint_func, marginal_func : callables Functions to draw the bivariate and univariate plots. See methods referenced above for information about the required characteristics of these functions. kwargs Additional keyword arguments are passed to both functions. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """ self.plot_marginals(marginal_func, **kwargs) self.plot_joint(joint_func, **kwargs) return self
axisgrid.JointGrid.plot
seaborn
24
seaborn/axisgrid.py
def plot_joint(self, func, **kwargs): """Draw a bivariate plot on the joint axes of the grid. Parameters ---------- func : plotting callable If a seaborn function, it should accept ``x`` and ``y``. Otherwise, it must accept ``x`` and ``y`` vectors of data as the first two positional arguments, and it must plot on the "current" axes. If ``hue`` was defined in the class constructor, the function must accept ``hue`` as a parameter. kwargs Keyword argument are passed to the plotting function. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.JointGrid.plot_joint.txt
def plot_joint(self, func, **kwargs): """Draw a bivariate plot on the joint axes of the grid. Parameters ---------- func : plotting callable If a seaborn function, it should accept ``x`` and ``y``. Otherwise, it must accept ``x`` and ``y`` vectors of data as the first two positional arguments, and it must plot on the "current" axes. If ``hue`` was defined in the class constructor, the function must accept ``hue`` as a parameter. kwargs Keyword argument are passed to the plotting function. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """ kwargs = kwargs.copy() if str(func.__module__).startswith("seaborn"): kwargs["ax"] = self.ax_joint else: plt.sca(self.ax_joint) if self.hue is not None: kwargs["hue"] = self.hue self._inject_kwargs(func, kwargs, self._hue_params) if str(func.__module__).startswith("seaborn"): func(x=self.x, y=self.y, **kwargs) else: func(self.x, self.y, **kwargs) return self
axisgrid.JointGrid.plot_joint
seaborn
25
seaborn/axisgrid.py
def plot_marginals(self, func, **kwargs): """Draw univariate plots on each marginal axes. Parameters ---------- func : plotting callable If a seaborn function, it should accept ``x`` and ``y`` and plot when only one of them is defined. Otherwise, it must accept a vector of data as the first positional argument and determine its orientation using the ``vertical`` parameter, and it must plot on the "current" axes. If ``hue`` was defined in the class constructor, it must accept ``hue`` as a parameter. kwargs Keyword argument are passed to the plotting function. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.JointGrid.plot_marginals.txt
def plot_marginals(self, func, **kwargs): """Draw univariate plots on each marginal axes. Parameters ---------- func : plotting callable If a seaborn function, it should accept ``x`` and ``y`` and plot when only one of them is defined. Otherwise, it must accept a vector of data as the first positional argument and determine its orientation using the ``vertical`` parameter, and it must plot on the "current" axes. If ``hue`` was defined in the class constructor, it must accept ``hue`` as a parameter. kwargs Keyword argument are passed to the plotting function. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """ seaborn_func = ( str(func.__module__).startswith("seaborn") # deprecated distplot has a legacy API, special case it and not func.__name__ == "distplot" ) func_params = signature(func).parameters kwargs = kwargs.copy() if self.hue is not None: kwargs["hue"] = self.hue self._inject_kwargs(func, kwargs, self._hue_params) if "legend" in func_params: kwargs.setdefault("legend", False) if "orientation" in func_params: # e.g. plt.hist orient_kw_x = {"orientation": "vertical"} orient_kw_y = {"orientation": "horizontal"} elif "vertical" in func_params: # e.g. sns.distplot (also how did this get backwards?) orient_kw_x = {"vertical": False} orient_kw_y = {"vertical": True} if seaborn_func: func(x=self.x, ax=self.ax_marg_x, **kwargs) else: plt.sca(self.ax_marg_x) func(self.x, **orient_kw_x, **kwargs) if seaborn_func: func(y=self.y, ax=self.ax_marg_y, **kwargs) else: plt.sca(self.ax_marg_y) func(self.y, **orient_kw_y, **kwargs) self.ax_marg_x.yaxis.get_label().set_visible(False) self.ax_marg_y.xaxis.get_label().set_visible(False) return self
axisgrid.JointGrid.plot_marginals
seaborn
26
seaborn/axisgrid.py
def refline( self, *, x=None, y=None, joint=True, marginal=True, color='.5', linestyle='--', **line_kws ): """Add a reference line(s) to joint and/or marginal axes. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. joint, marginal : bools Whether to add the reference line(s) to the joint/marginal axes. color : :mod:`matplotlib color <matplotlib.colors>` Specifies the color of the reference line(s). linestyle : str Specifies the style of the reference line(s). line_kws : key, value mappings Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline` when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y`` is not None. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.JointGrid.refline.txt
def refline( self, *, x=None, y=None, joint=True, marginal=True, color='.5', linestyle='--', **line_kws ): """Add a reference line(s) to joint and/or marginal axes. Parameters ---------- x, y : numeric Value(s) to draw the line(s) at. joint, marginal : bools Whether to add the reference line(s) to the joint/marginal axes. color : :mod:`matplotlib color <matplotlib.colors>` Specifies the color of the reference line(s). linestyle : str Specifies the style of the reference line(s). line_kws : key, value mappings Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline` when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y`` is not None. Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """ line_kws['color'] = color line_kws['linestyle'] = linestyle if x is not None: if joint: self.ax_joint.axvline(x, **line_kws) if marginal: self.ax_marg_x.axvline(x, **line_kws) if y is not None: if joint: self.ax_joint.axhline(y, **line_kws) if marginal: self.ax_marg_y.axhline(y, **line_kws) return self
axisgrid.JointGrid.refline
seaborn
27
seaborn/axisgrid.py
def set_axis_labels(self, xlabel="", ylabel="", **kwargs): """Set axis labels on the bivariate axes. Parameters ---------- xlabel, ylabel : strings Label names for the x and y variables. kwargs : key, value mappings Other keyword arguments are passed to the following functions: - :meth:`matplotlib.axes.Axes.set_xlabel` - :meth:`matplotlib.axes.Axes.set_ylabel` Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """
/usr/src/app/target_test_cases/failed_tests_axisgrid.JointGrid.set_axis_labels.txt
def set_axis_labels(self, xlabel="", ylabel="", **kwargs): """Set axis labels on the bivariate axes. Parameters ---------- xlabel, ylabel : strings Label names for the x and y variables. kwargs : key, value mappings Other keyword arguments are passed to the following functions: - :meth:`matplotlib.axes.Axes.set_xlabel` - :meth:`matplotlib.axes.Axes.set_ylabel` Returns ------- :class:`JointGrid` instance Returns ``self`` for easy method chaining. """ self.ax_joint.set_xlabel(xlabel, **kwargs) self.ax_joint.set_ylabel(ylabel, **kwargs) return self
axisgrid.JointGrid.set_axis_labels
seaborn
28
seaborn/axisgrid.py
def __init__( self, data, *, hue=None, vars=None, x_vars=None, y_vars=None, hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True, height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False, ): """Initialize the plot figure and PairGrid object. Parameters ---------- data : DataFrame Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : string (variable name) Variable in ``data`` to map plot aspects to different colors. This variable will be excluded from the default x and y variables. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. hue_kws : dictionary of param -> list of values mapping Other keyword arguments to insert into the plotting call to let other plot attributes vary across levels of the hue variable (e.g. the markers in a scatterplot). corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. layout_pad : scalar Padding between axes; passed to ``fig.tight_layout``. despine : boolean Remove the top and right spines from the plots. dropna : boolean Drop missing values from the data before plotting. See Also -------- pairplot : Easily drawing common uses of :class:`PairGrid`. FacetGrid : Subplot grid for plotting conditional relationships. Examples -------- .. include:: ../docstrings/PairGrid.rst """
/usr/src/app/target_test_cases/failed_tests_axisgrid.PairGrid.__init__.txt
def __init__( self, data, *, hue=None, vars=None, x_vars=None, y_vars=None, hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True, height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False, ): """Initialize the plot figure and PairGrid object. Parameters ---------- data : DataFrame Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : string (variable name) Variable in ``data`` to map plot aspects to different colors. This variable will be excluded from the default x and y variables. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. hue_kws : dictionary of param -> list of values mapping Other keyword arguments to insert into the plotting call to let other plot attributes vary across levels of the hue variable (e.g. the markers in a scatterplot). corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. layout_pad : scalar Padding between axes; passed to ``fig.tight_layout``. despine : boolean Remove the top and right spines from the plots. dropna : boolean Drop missing values from the data before plotting. See Also -------- pairplot : Easily drawing common uses of :class:`PairGrid`. FacetGrid : Subplot grid for plotting conditional relationships. Examples -------- .. include:: ../docstrings/PairGrid.rst """ super().__init__() data = handle_data_source(data) # Sort out the variables that define the grid numeric_cols = self._find_numeric_cols(data) if hue in numeric_cols: numeric_cols.remove(hue) if vars is not None: x_vars = list(vars) y_vars = list(vars) if x_vars is None: x_vars = numeric_cols if y_vars is None: y_vars = numeric_cols if np.isscalar(x_vars): x_vars = [x_vars] if np.isscalar(y_vars): y_vars = [y_vars] self.x_vars = x_vars = list(x_vars) self.y_vars = y_vars = list(y_vars) self.square_grid = self.x_vars == self.y_vars if not x_vars: raise ValueError("No variables found for grid columns.") if not y_vars: raise ValueError("No variables found for grid rows.") # Create the figure and the array of subplots figsize = len(x_vars) * height * aspect, len(y_vars) * height with _disable_autolayout(): fig = plt.figure(figsize=figsize) axes = fig.subplots(len(y_vars), len(x_vars), sharex="col", sharey="row", squeeze=False) # Possibly remove upper axes to make a corner grid # Note: setting up the axes is usually the most time-intensive part # of using the PairGrid. We are foregoing the speed improvement that # we would get by just not setting up the hidden axes so that we can # avoid implementing fig.subplots ourselves. But worth thinking about. self._corner = corner if corner: hide_indices = np.triu_indices_from(axes, 1) for i, j in zip(*hide_indices): axes[i, j].remove() axes[i, j] = None self._figure = fig self.axes = axes self.data = data # Save what we are going to do with the diagonal self.diag_sharey = diag_sharey self.diag_vars = None self.diag_axes = None self._dropna = dropna # Label the axes self._add_axis_labels() # Sort out the hue variable self._hue_var = hue if hue is None: self.hue_names = hue_order = ["_nolegend_"] self.hue_vals = pd.Series(["_nolegend_"] * len(data), index=data.index) else: # We need hue_order and hue_names because the former is used to control # the order of drawing and the latter is used to control the order of # the legend. hue_names can become string-typed while hue_order must # retain the type of the input data. This is messy but results from # the fact that PairGrid can implement the hue-mapping logic itself # (and was originally written exclusively that way) but now can delegate # to the axes-level functions, while always handling legend creation. # See GH2307 hue_names = hue_order = categorical_order(data[hue], hue_order) if dropna: # Filter NA from the list of unique hue names hue_names = list(filter(pd.notnull, hue_names)) self.hue_names = hue_names self.hue_vals = data[hue] # Additional dict of kwarg -> list of values for mapping the hue var self.hue_kws = hue_kws if hue_kws is not None else {} self._orig_palette = palette self._hue_order = hue_order self.palette = self._get_palette(data, hue, hue_order, palette) self._legend_data = {} # Make the plot look nice for ax in axes[:-1, :].flat: if ax is None: continue for label in ax.get_xticklabels(): label.set_visible(False) ax.xaxis.offsetText.set_visible(False) ax.xaxis.label.set_visible(False) for ax in axes[:, 1:].flat: if ax is None: continue for label in ax.get_yticklabels(): label.set_visible(False) ax.yaxis.offsetText.set_visible(False) ax.yaxis.label.set_visible(False) self._tight_layout_rect = [.01, .01, .99, .99] self._tight_layout_pad = layout_pad self._despine = despine if despine: utils.despine(fig=fig) self.tight_layout(pad=layout_pad)
axisgrid.PairGrid.__init__
seaborn
29
seaborn/axisgrid.py
def pairplot( data, *, hue=None, hue_order=None, palette=None, vars=None, x_vars=None, y_vars=None, kind="scatter", diag_kind="auto", markers=None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None, diag_kws=None, grid_kws=None, size=None, ): """Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst """
/usr/src/app/target_test_cases/failed_tests_axisgrid.pairplot.txt
def pairplot( data, *, hue=None, hue_order=None, palette=None, vars=None, x_vars=None, y_vars=None, kind="scatter", diag_kind="auto", markers=None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None, diag_kws=None, grid_kws=None, size=None, ): """Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst """ # Avoid circular import from .distributions import histplot, kdeplot # Handle deprecations if size is not None: height = size msg = ("The `size` parameter has been renamed to `height`; " "please update your code.") warnings.warn(msg, UserWarning) if not isinstance(data, pd.DataFrame): raise TypeError( f"'data' must be pandas DataFrame object, not: {type(data)}") plot_kws = {} if plot_kws is None else plot_kws.copy() diag_kws = {} if diag_kws is None else diag_kws.copy() grid_kws = {} if grid_kws is None else grid_kws.copy() # Resolve "auto" diag kind if diag_kind == "auto": if hue is None: diag_kind = "kde" if kind == "kde" else "hist" else: diag_kind = "hist" if kind == "hist" else "kde" # Set up the PairGrid grid_kws.setdefault("diag_sharey", diag_kind == "hist") grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue, hue_order=hue_order, palette=palette, corner=corner, height=height, aspect=aspect, dropna=dropna, **grid_kws) # Add the markers here as PairGrid has figured out how many levels of the # hue variable are needed and we don't want to duplicate that process if markers is not None: if kind == "reg": # Needed until regplot supports style if grid.hue_names is None: n_markers = 1 else: n_markers = len(grid.hue_names) if not isinstance(markers, list): markers = [markers] * n_markers if len(markers) != n_markers: raise ValueError("markers must be a singleton or a list of " "markers for each level of the hue variable") grid.hue_kws = {"marker": markers} elif kind == "scatter": if isinstance(markers, str): plot_kws["marker"] = markers elif hue is not None: plot_kws["style"] = data[hue] plot_kws["markers"] = markers # Draw the marginal plots on the diagonal diag_kws = diag_kws.copy() diag_kws.setdefault("legend", False) if diag_kind == "hist": grid.map_diag(histplot, **diag_kws) elif diag_kind == "kde": diag_kws.setdefault("fill", True) diag_kws.setdefault("warn_singular", False) grid.map_diag(kdeplot, **diag_kws) # Maybe plot on the off-diagonals if diag_kind is not None: plotter = grid.map_offdiag else: plotter = grid.map if kind == "scatter": from .relational import scatterplot # Avoid circular import plotter(scatterplot, **plot_kws) elif kind == "reg": from .regression import regplot # Avoid circular import plotter(regplot, **plot_kws) elif kind == "kde": from .distributions import kdeplot # Avoid circular import plot_kws.setdefault("warn_singular", False) plotter(kdeplot, **plot_kws) elif kind == "hist": from .distributions import histplot # Avoid circular import plotter(histplot, **plot_kws) # Add a legend if hue is not None: grid.add_legend() grid.tight_layout() return grid
axisgrid.pairplot
seaborn
30
seaborn/_marks/base.py
def _resolve( self, data: DataFrame | dict[str, Any], name: str, scales: dict[str, Scale] | None = None, ) -> Any: """Obtain default, specified, or mapped value for a named feature. Parameters ---------- data : DataFrame or dict with scalar values Container with data values for features that will be semantically mapped. name : string Identity of the feature / semantic. scales: dict Mapping from variable to corresponding scale object. Returns ------- value or array of values Outer return type depends on whether `data` is a dict (implying that we want a single value) or DataFrame (implying that we want an array of values with matching length). """
/usr/src/app/target_test_cases/failed_tests_base.Mark._resolve.txt
def _resolve( self, data: DataFrame | dict[str, Any], name: str, scales: dict[str, Scale] | None = None, ) -> Any: """Obtain default, specified, or mapped value for a named feature. Parameters ---------- data : DataFrame or dict with scalar values Container with data values for features that will be semantically mapped. name : string Identity of the feature / semantic. scales: dict Mapping from variable to corresponding scale object. Returns ------- value or array of values Outer return type depends on whether `data` is a dict (implying that we want a single value) or DataFrame (implying that we want an array of values with matching length). """ feature = self._mappable_props[name] prop = PROPERTIES.get(name, Property(name)) directly_specified = not isinstance(feature, Mappable) return_multiple = isinstance(data, pd.DataFrame) return_array = return_multiple and not name.endswith("style") # Special case width because it needs to be resolved and added to the dataframe # during layer prep (so the Move operations use it properly). # TODO how does width *scaling* work, e.g. for violin width by count? if name == "width": directly_specified = directly_specified and name not in data if directly_specified: feature = prop.standardize(feature) if return_multiple: feature = [feature] * len(data) if return_array: feature = np.array(feature) return feature if name in data: if scales is None or name not in scales: # TODO Might this obviate the identity scale? Just don't add a scale? feature = data[name] else: scale = scales[name] value = data[name] try: feature = scale(value) except Exception as err: raise PlotSpecError._during("Scaling operation", name) from err if return_array: feature = np.asarray(feature) return feature if feature.depend is not None: # TODO add source_func or similar to transform the source value? # e.g. set linewidth as a proportion of pointsize? return self._resolve(data, feature.depend, scales) default = prop.standardize(feature.default) if return_multiple: default = [default] * len(data) if return_array: default = np.array(default) return default
base.Mark._resolve
seaborn
31
seaborn/_marks/base.py
def resolve_color( mark: Mark, data: DataFrame | dict, prefix: str = "", scales: dict[str, Scale] | None = None, ) -> RGBATuple | ndarray: """ Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc. """
/usr/src/app/target_test_cases/failed_tests_resolve_color.txt
def resolve_color( mark: Mark, data: DataFrame | dict, prefix: str = "", scales: dict[str, Scale] | None = None, ) -> RGBATuple | ndarray: """ Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc. """ color = mark._resolve(data, f"{prefix}color", scales) if f"{prefix}alpha" in mark._mappable_props: alpha = mark._resolve(data, f"{prefix}alpha", scales) else: alpha = mark._resolve(data, "alpha", scales) def visible(x, axis=None): """Detect "invisible" colors to set alpha appropriately.""" # TODO First clause only needed to handle non-rgba arrays, # which we are trying to handle upstream return np.array(x).dtype.kind != "f" or np.isfinite(x).all(axis) # Second check here catches vectors of strings with identity scale # It could probably be handled better upstream. This is a tricky problem if np.ndim(color) < 2 and all(isinstance(x, float) for x in color): if len(color) == 4: return mpl.colors.to_rgba(color) alpha = alpha if visible(color) else np.nan return mpl.colors.to_rgba(color, alpha) else: if np.ndim(color) == 2 and color.shape[1] == 4: return mpl.colors.to_rgba_array(color) alpha = np.where(visible(color, axis=1), alpha, np.nan) return mpl.colors.to_rgba_array(color, alpha) # TODO should we be implementing fill here too? # (i.e. set fillalpha to 0 when fill=False)
base.resolve_color
seaborn
32
seaborn/_core/rules.py
def categorical_order(vector: Series, order: list | None = None) -> list: """ Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values. """
/usr/src/app/target_test_cases/failed_tests_rules.categorical_order.txt
def categorical_order(vector: Series, order: list | None = None) -> list: """ Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values. """ if order is not None: return order if vector.dtype.name == "category": order = list(vector.cat.categories) else: order = list(filter(pd.notnull, vector.unique())) if variable_type(pd.Series(order)) == "numeric": order.sort() return order
categorical_order
seaborn
33
seaborn/palettes.py
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False): """Return a list of colors or continuous colormap defining a palette. Possible ``palette`` values include: - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind) - Name of matplotlib colormap - 'husl' or 'hls' - 'ch:<cubehelix arguments>' - 'light:<color>', 'dark:<color>', 'blend:<color>,<color>', - A sequence of colors in any format matplotlib accepts Calling this function with ``palette=None`` will return the current matplotlib color cycle. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. See the :ref:`tutorial <palette_tutorial>` for more information. Parameters ---------- palette : None, string, or sequence, optional Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int, optional Number of colors in the palette. If ``None``, the default will depend on how ``palette`` is specified. Named palettes default to 6 colors, but grabbing the current palette or passing in a list of colors will not change the number of colors unless this is specified. Asking for more colors than exist in the palette will cause it to cycle. Ignored when ``as_cmap`` is True. desat : float, optional Proportion to desaturate each color by. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- set_palette : Set the default color cycle for all plots. set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to colors from one of the seaborn palettes. Examples -------- .. include:: ../docstrings/color_palette.rst """
/usr/src/app/target_test_cases/failed_tests_color_palette.txt
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False): """Return a list of colors or continuous colormap defining a palette. Possible ``palette`` values include: - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind) - Name of matplotlib colormap - 'husl' or 'hls' - 'ch:<cubehelix arguments>' - 'light:<color>', 'dark:<color>', 'blend:<color>,<color>', - A sequence of colors in any format matplotlib accepts Calling this function with ``palette=None`` will return the current matplotlib color cycle. This function can also be used in a ``with`` statement to temporarily set the color cycle for a plot or set of plots. See the :ref:`tutorial <palette_tutorial>` for more information. Parameters ---------- palette : None, string, or sequence, optional Name of palette or None to return current palette. If a sequence, input colors are used but possibly cycled and desaturated. n_colors : int, optional Number of colors in the palette. If ``None``, the default will depend on how ``palette`` is specified. Named palettes default to 6 colors, but grabbing the current palette or passing in a list of colors will not change the number of colors unless this is specified. Asking for more colors than exist in the palette will cause it to cycle. Ignored when ``as_cmap`` is True. desat : float, optional Proportion to desaturate each color by. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- set_palette : Set the default color cycle for all plots. set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to colors from one of the seaborn palettes. Examples -------- .. include:: ../docstrings/color_palette.rst """ if palette is None: palette = get_color_cycle() if n_colors is None: n_colors = len(palette) elif not isinstance(palette, str): palette = palette if n_colors is None: n_colors = len(palette) else: if n_colors is None: # Use all colors in a qualitative palette or 6 of another kind n_colors = QUAL_PALETTE_SIZES.get(palette, 6) if palette in SEABORN_PALETTES: # Named "seaborn variant" of matplotlib default color cycle palette = SEABORN_PALETTES[palette] elif palette == "hls": # Evenly spaced colors in cylindrical RGB space palette = hls_palette(n_colors, as_cmap=as_cmap) elif palette == "husl": # Evenly spaced colors in cylindrical Lab space palette = husl_palette(n_colors, as_cmap=as_cmap) elif palette.lower() == "jet": # Paternalism raise ValueError("No.") elif palette.startswith("ch:"): # Cubehelix palette with params specified in string args, kwargs = _parse_cubehelix_args(palette) palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap) elif palette.startswith("light:"): # light palette to color specified in string _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("dark:"): # light palette to color specified in string _, color = palette.split(":") reverse = color.endswith("_r") if reverse: color = color[:-2] palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap) elif palette.startswith("blend:"): # blend palette between colors specified in string _, colors = palette.split(":") colors = colors.split(",") palette = blend_palette(colors, n_colors, as_cmap=as_cmap) else: try: # Perhaps a named matplotlib colormap? palette = mpl_palette(palette, n_colors, as_cmap=as_cmap) except (ValueError, KeyError): # Error class changed in mpl36 raise ValueError(f"{palette!r} is not a valid palette name") if desat is not None: palette = [desaturate(c, desat) for c in palette] if not as_cmap: # Always return as many colors as we asked for pal_cycle = cycle(palette) palette = [next(pal_cycle) for _ in range(n_colors)] # Always return in r, g, b tuple format try: palette = map(mpl.colors.colorConverter.to_rgb, palette) palette = _ColorPalette(palette) except ValueError: raise ValueError(f"Could not generate a palette for {palette}") return palette
color_palette
seaborn
34
seaborn/utils.py
def desaturate(color, prop): """Decrease the saturation channel of a color by some percent. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name prop : float saturation channel of color will be multiplied by this value Returns ------- new_color : rgb tuple desaturated color code in RGB tuple representation """
/usr/src/app/target_test_cases/failed_tests_desaturate.txt
def desaturate(color, prop): """Decrease the saturation channel of a color by some percent. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name prop : float saturation channel of color will be multiplied by this value Returns ------- new_color : rgb tuple desaturated color code in RGB tuple representation """ # Check inputs if not 0 <= prop <= 1: raise ValueError("prop must be between 0 and 1") # Get rgb tuple rep rgb = to_rgb(color) # Short circuit to avoid floating point issues if prop == 1: return rgb # Convert to hls h, l, s = colorsys.rgb_to_hls(*rgb) # Desaturate the saturation channel s *= prop # Convert back to rgb new_color = colorsys.hls_to_rgb(h, l, s) return new_color
desaturate
seaborn
35
seaborn/_core/groupby.py
def __init__(self, order: list[str] | dict[str, list | None]): """ Initialize the GroupBy from grouping variables and optional level orders. Parameters ---------- order List of variable names or dict mapping names to desired level orders. Level order values can be None to use default ordering rules. The variables can include names that are not expected to appear in the data; these will be dropped before the groups are defined. """
/usr/src/app/target_test_cases/failed_tests_groupby.GroupBy.__init__.txt
def __init__(self, order: list[str] | dict[str, list | None]): """ Initialize the GroupBy from grouping variables and optional level orders. Parameters ---------- order List of variable names or dict mapping names to desired level orders. Level order values can be None to use default ordering rules. The variables can include names that are not expected to appear in the data; these will be dropped before the groups are defined. """ if not order: raise ValueError("GroupBy requires at least one grouping variable") if isinstance(order, list): order = {k: None for k in order} self.order = order
groupby.GroupBy.__init__
seaborn
36
seaborn/external/kde.py
def evaluate(self, points): """Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """
/usr/src/app/target_test_cases/failed_tests_kde.gaussian_kde.evaluate.txt
def evaluate(self, points): """Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """ points = atleast_2d(asarray(points)) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = f"points have dimension {d}, dataset has dimension {self.d}" raise ValueError(msg) output_dtype = np.common_type(self.covariance, points) result = zeros((m,), dtype=output_dtype) whitening = linalg.cholesky(self.inv_cov) scaled_dataset = dot(whitening, self.dataset) scaled_points = dot(whitening, points) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = scaled_dataset[:, i, newaxis] - scaled_points energy = sum(diff * diff, axis=0) / 2.0 result += self.weights[i]*exp(-energy) else: # loop over points for i in range(m): diff = scaled_dataset - scaled_points[:, i, newaxis] energy = sum(diff * diff, axis=0) / 2.0 result[i] = sum(exp(-energy)*self.weights, axis=0) result = result / self._norm_factor return result
kde.gaussian_kde.evaluate
seaborn
37
seaborn/external/kde.py
def set_bandwidth(self, bw_method=None): """Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11 """
/usr/src/app/target_test_cases/failed_tests_kde.gaussian_kde.set_bandwidth.txt
def set_bandwidth(self, bw_method=None): """Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11 """ if bw_method is None: pass elif bw_method == 'scott': self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method) and not isinstance(bw_method, str): self._bw_method = 'use constant' self.covariance_factor = lambda: bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ "or a callable." raise ValueError(msg) self._compute_covariance()
kde.gaussian_kde.set_bandwidth
seaborn
38
seaborn/utils.py
def load_dataset(name, cache=True, data_home=None, **kws): """Load an example dataset from the online repository (requires internet). This function provides quick access to a small number of example datasets that are useful for documenting seaborn or generating reproducible examples for bug reports. It is not necessary for normal usage. Note that some of the datasets have a small amount of preprocessing applied to define a proper ordering for categorical variables. Use :func:`get_dataset_names` to see a list of available datasets. Parameters ---------- name : str Name of the dataset (``{name}.csv`` on https://github.com/mwaskom/seaborn-data). cache : boolean, optional If True, try to load from the local cache first, and save to the cache if a download is required. data_home : string, optional The directory in which to cache data; see :func:`get_data_home`. kws : keys and values, optional Additional keyword arguments are passed to passed through to :func:`pandas.read_csv`. Returns ------- df : :class:`pandas.DataFrame` Tabular data, possibly with some preprocessing applied. """
/usr/src/app/target_test_cases/failed_tests_load_dataset.txt
def load_dataset(name, cache=True, data_home=None, **kws): """Load an example dataset from the online repository (requires internet). This function provides quick access to a small number of example datasets that are useful for documenting seaborn or generating reproducible examples for bug reports. It is not necessary for normal usage. Note that some of the datasets have a small amount of preprocessing applied to define a proper ordering for categorical variables. Use :func:`get_dataset_names` to see a list of available datasets. Parameters ---------- name : str Name of the dataset (``{name}.csv`` on https://github.com/mwaskom/seaborn-data). cache : boolean, optional If True, try to load from the local cache first, and save to the cache if a download is required. data_home : string, optional The directory in which to cache data; see :func:`get_data_home`. kws : keys and values, optional Additional keyword arguments are passed to passed through to :func:`pandas.read_csv`. Returns ------- df : :class:`pandas.DataFrame` Tabular data, possibly with some preprocessing applied. """ # A common beginner mistake is to assume that one's personal data needs # to be passed through this function to be usable with seaborn. # Let's provide a more helpful error than you would otherwise get. if isinstance(name, pd.DataFrame): err = ( "This function accepts only strings (the name of an example dataset). " "You passed a pandas DataFrame. If you have your own dataset, " "it is not necessary to use this function before plotting." ) raise TypeError(err) url = f"{DATASET_SOURCE}/{name}.csv" if cache: cache_path = os.path.join(get_data_home(data_home), os.path.basename(url)) if not os.path.exists(cache_path): if name not in get_dataset_names(): raise ValueError(f"'{name}' is not one of the example datasets.") urlretrieve(url, cache_path) full_path = cache_path else: full_path = url df = pd.read_csv(full_path, **kws) if df.iloc[-1].isnull().all(): df = df.iloc[:-1] # Set some columns as a categorical type with ordered levels if name == "tips": df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"]) df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"]) df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"]) df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"]) elif name == "flights": months = df["month"].str[:3] df["month"] = pd.Categorical(months, months.unique()) elif name == "exercise": df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"]) df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"]) df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"]) elif name == "titanic": df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"]) df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG")) elif name == "penguins": df["sex"] = df["sex"].str.title() elif name == "diamonds": df["color"] = pd.Categorical( df["color"], ["D", "E", "F", "G", "H", "I", "J"], ) df["clarity"] = pd.Categorical( df["clarity"], ["IF", "VVS1", "VVS2", "VS1", "VS2", "SI1", "SI2", "I1"], ) df["cut"] = pd.Categorical( df["cut"], ["Ideal", "Premium", "Very Good", "Good", "Fair"], ) elif name == "taxis": df["pickup"] = pd.to_datetime(df["pickup"]) df["dropoff"] = pd.to_datetime(df["dropoff"]) elif name == "seaice": df["Date"] = pd.to_datetime(df["Date"]) elif name == "dowjones": df["Date"] = pd.to_datetime(df["Date"]) return df
load_dataset
seaborn
39
seaborn/matrix.py
def clustermap( data, *, pivot_kws=None, method='average', metric='euclidean', z_score=None, standard_scale=None, figsize=(10, 10), cbar_kws=None, row_cluster=True, col_cluster=True, row_linkage=None, col_linkage=None, row_colors=None, col_colors=None, mask=None, dendrogram_ratio=.2, colors_ratio=0.03, cbar_pos=(.02, .8, .05, .18), tree_kws=None, **kwargs ): """ Plot a matrix dataset as a hierarchically-clustered heatmap. This function requires scipy to be available. Parameters ---------- data : 2D array-like Rectangular data for clustering. Cannot contain NAs. pivot_kws : dict, optional If `data` is a tidy dataframe, can provide keyword arguments for pivot to create a rectangular dataframe. method : str, optional Linkage method to use for calculating clusters. See :func:`scipy.cluster.hierarchy.linkage` documentation for more information. metric : str, optional Distance metric to use for the data. See :func:`scipy.spatial.distance.pdist` documentation for more options. To use different metrics (or methods) for rows and columns, you may construct each linkage matrix yourself and provide them as `{row,col}_linkage`. z_score : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores for the rows or the columns. Z scores are: z = (x - mean)/std, so values in each row (column) will get the mean of the row (column) subtracted, then divided by the standard deviation of the row (column). This ensures that each row (column) has mean of 0 and variance of 1. standard_scale : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension, meaning for each row or column, subtract the minimum and divide each by its maximum. figsize : tuple of (width, height), optional Overall size of the figure. cbar_kws : dict, optional Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to add a label to the colorbar. {row,col}_cluster : bool, optional If ``True``, cluster the {rows, columns}. {row,col}_linkage : :class:`numpy.ndarray`, optional Precomputed linkage matrix for the rows or columns. See :func:`scipy.cluster.hierarchy.linkage` for specific formats. {row,col}_colors : list-like or pandas DataFrame/Series, optional List of colors to label for either the rows or columns. Useful to evaluate whether samples within a group are clustered together. Can use nested lists or DataFrame for multiple color levels of labeling. If given as a :class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are extracted from the DataFrames column names or from the name of the Series. DataFrame/Series colors are also matched to the data by their index, ensuring colors are drawn in the correct order. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where `mask` is True. Cells with missing values are automatically masked. Only used for visualizing, not for calculating. {dendrogram,colors}_ratio : float, or pair of floats, optional Proportion of the figure size devoted to the two marginal elements. If a pair is given, they correspond to (row, col) ratios. cbar_pos : tuple of (left, bottom, width, height), optional Position of the colorbar axes in the figure. Setting to ``None`` will disable the colorbar. tree_kws : dict, optional Parameters for the :class:`matplotlib.collections.LineCollection` that is used to plot the lines of the dendrogram tree. kwargs : other keyword arguments All other keyword arguments are passed to :func:`heatmap`. Returns ------- :class:`ClusterGrid` A :class:`ClusterGrid` instance. See Also -------- heatmap : Plot rectangular data as a color-encoded matrix. Notes ----- The returned object has a ``savefig`` method that should be used if you want to save the figure object without clipping the dendrograms. To access the reordered row indices, use: ``clustergrid.dendrogram_row.reordered_ind`` Column indices, use: ``clustergrid.dendrogram_col.reordered_ind`` Examples -------- .. include:: ../docstrings/clustermap.rst """
/usr/src/app/target_test_cases/failed_tests_matrix.clustermap.txt
def clustermap( data, *, pivot_kws=None, method='average', metric='euclidean', z_score=None, standard_scale=None, figsize=(10, 10), cbar_kws=None, row_cluster=True, col_cluster=True, row_linkage=None, col_linkage=None, row_colors=None, col_colors=None, mask=None, dendrogram_ratio=.2, colors_ratio=0.03, cbar_pos=(.02, .8, .05, .18), tree_kws=None, **kwargs ): """ Plot a matrix dataset as a hierarchically-clustered heatmap. This function requires scipy to be available. Parameters ---------- data : 2D array-like Rectangular data for clustering. Cannot contain NAs. pivot_kws : dict, optional If `data` is a tidy dataframe, can provide keyword arguments for pivot to create a rectangular dataframe. method : str, optional Linkage method to use for calculating clusters. See :func:`scipy.cluster.hierarchy.linkage` documentation for more information. metric : str, optional Distance metric to use for the data. See :func:`scipy.spatial.distance.pdist` documentation for more options. To use different metrics (or methods) for rows and columns, you may construct each linkage matrix yourself and provide them as `{row,col}_linkage`. z_score : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores for the rows or the columns. Z scores are: z = (x - mean)/std, so values in each row (column) will get the mean of the row (column) subtracted, then divided by the standard deviation of the row (column). This ensures that each row (column) has mean of 0 and variance of 1. standard_scale : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension, meaning for each row or column, subtract the minimum and divide each by its maximum. figsize : tuple of (width, height), optional Overall size of the figure. cbar_kws : dict, optional Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to add a label to the colorbar. {row,col}_cluster : bool, optional If ``True``, cluster the {rows, columns}. {row,col}_linkage : :class:`numpy.ndarray`, optional Precomputed linkage matrix for the rows or columns. See :func:`scipy.cluster.hierarchy.linkage` for specific formats. {row,col}_colors : list-like or pandas DataFrame/Series, optional List of colors to label for either the rows or columns. Useful to evaluate whether samples within a group are clustered together. Can use nested lists or DataFrame for multiple color levels of labeling. If given as a :class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are extracted from the DataFrames column names or from the name of the Series. DataFrame/Series colors are also matched to the data by their index, ensuring colors are drawn in the correct order. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where `mask` is True. Cells with missing values are automatically masked. Only used for visualizing, not for calculating. {dendrogram,colors}_ratio : float, or pair of floats, optional Proportion of the figure size devoted to the two marginal elements. If a pair is given, they correspond to (row, col) ratios. cbar_pos : tuple of (left, bottom, width, height), optional Position of the colorbar axes in the figure. Setting to ``None`` will disable the colorbar. tree_kws : dict, optional Parameters for the :class:`matplotlib.collections.LineCollection` that is used to plot the lines of the dendrogram tree. kwargs : other keyword arguments All other keyword arguments are passed to :func:`heatmap`. Returns ------- :class:`ClusterGrid` A :class:`ClusterGrid` instance. See Also -------- heatmap : Plot rectangular data as a color-encoded matrix. Notes ----- The returned object has a ``savefig`` method that should be used if you want to save the figure object without clipping the dendrograms. To access the reordered row indices, use: ``clustergrid.dendrogram_row.reordered_ind`` Column indices, use: ``clustergrid.dendrogram_col.reordered_ind`` Examples -------- .. include:: ../docstrings/clustermap.rst """ if _no_scipy: raise RuntimeError("clustermap requires scipy to be available") plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize, row_colors=row_colors, col_colors=col_colors, z_score=z_score, standard_scale=standard_scale, mask=mask, dendrogram_ratio=dendrogram_ratio, colors_ratio=colors_ratio, cbar_pos=cbar_pos) return plotter.plot(metric=metric, method=method, colorbar_kws=cbar_kws, row_cluster=row_cluster, col_cluster=col_cluster, row_linkage=row_linkage, col_linkage=col_linkage, tree_kws=tree_kws, **kwargs)
matrix.clustermap
seaborn
40
seaborn/matrix.py
def dendrogram( data, *, linkage=None, axis=1, label=True, metric='euclidean', method='average', rotate=False, tree_kws=None, ax=None ): """Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right tree_kws : dict, optional Keyword arguments for the ``matplotlib.collections.LineCollection`` that is used for plotting the lines of the dendrogram tree. ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind """
/usr/src/app/target_test_cases/failed_tests_matrix.dendrogram.txt
def dendrogram( data, *, linkage=None, axis=1, label=True, metric='euclidean', method='average', rotate=False, tree_kws=None, ax=None ): """Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right tree_kws : dict, optional Keyword arguments for the ``matplotlib.collections.LineCollection`` that is used for plotting the lines of the dendrogram tree. ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind """ if _no_scipy: raise RuntimeError("dendrogram requires scipy to be installed") plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis, metric=metric, method=method, label=label, rotate=rotate) if ax is None: ax = plt.gca() return plotter.plot(ax=ax, tree_kws=tree_kws)
matrix.dendrogram
seaborn
41
seaborn/matrix.py
def heatmap( data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt=".2g", annot_kws=None, linewidths=0, linecolor="white", cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels="auto", yticklabels="auto", mask=None, ax=None, **kwargs ): """Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the ``ax`` argument. Part of this Axes space will be taken and used to plot a colormap, unless ``cbar`` is False or a separate Axes is provided to ``cbar_ax``. Parameters ---------- data : rectangular dataset 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame is provided, the index/column information will be used to label the columns and rows. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. cmap : matplotlib colormap name or object, or list of colors, optional The mapping from data values to color space. If not provided, the default will depend on whether ``center`` is set. center : float, optional The value at which to center the colormap when plotting divergent data. Using this parameter will change the default ``cmap`` if none is specified. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with robust quantiles instead of the extreme values. annot : bool or rectangular dataset, optional If True, write the data value in each cell. If an array-like with the same shape as ``data``, then use this to annotate the heatmap instead of the data. Note that DataFrames will match on position, not index. fmt : str, optional String formatting code to use when adding annotations. annot_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot`` is True. linewidths : float, optional Width of the lines that will divide each cell. linecolor : color, optional Color of the lines that will divide each cell. cbar : bool, optional Whether to draw a colorbar. cbar_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar, otherwise take space from the main Axes. square : bool, optional If True, set the Axes aspect to "equal" so each cell will be square-shaped. xticklabels, yticklabels : "auto", bool, list-like, or int, optional If True, plot the column names of the dataframe. If False, don't plot the column names. If list-like, plot these alternate labels as the xticklabels. If an integer, use the column names but plot only every n label. If "auto", try to densely plot non-overlapping labels. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. ax : matplotlib Axes, optional Axes in which to draw the plot, otherwise use the currently-active Axes. kwargs : other keyword arguments All other keyword arguments are passed to :meth:`matplotlib.axes.Axes.pcolormesh`. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See Also -------- clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples -------- .. include:: ../docstrings/heatmap.rst """
/usr/src/app/target_test_cases/failed_tests_matrix.heatmap.txt
def heatmap( data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt=".2g", annot_kws=None, linewidths=0, linecolor="white", cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels="auto", yticklabels="auto", mask=None, ax=None, **kwargs ): """Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the ``ax`` argument. Part of this Axes space will be taken and used to plot a colormap, unless ``cbar`` is False or a separate Axes is provided to ``cbar_ax``. Parameters ---------- data : rectangular dataset 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame is provided, the index/column information will be used to label the columns and rows. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. cmap : matplotlib colormap name or object, or list of colors, optional The mapping from data values to color space. If not provided, the default will depend on whether ``center`` is set. center : float, optional The value at which to center the colormap when plotting divergent data. Using this parameter will change the default ``cmap`` if none is specified. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with robust quantiles instead of the extreme values. annot : bool or rectangular dataset, optional If True, write the data value in each cell. If an array-like with the same shape as ``data``, then use this to annotate the heatmap instead of the data. Note that DataFrames will match on position, not index. fmt : str, optional String formatting code to use when adding annotations. annot_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot`` is True. linewidths : float, optional Width of the lines that will divide each cell. linecolor : color, optional Color of the lines that will divide each cell. cbar : bool, optional Whether to draw a colorbar. cbar_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar, otherwise take space from the main Axes. square : bool, optional If True, set the Axes aspect to "equal" so each cell will be square-shaped. xticklabels, yticklabels : "auto", bool, list-like, or int, optional If True, plot the column names of the dataframe. If False, don't plot the column names. If list-like, plot these alternate labels as the xticklabels. If an integer, use the column names but plot only every n label. If "auto", try to densely plot non-overlapping labels. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. ax : matplotlib Axes, optional Axes in which to draw the plot, otherwise use the currently-active Axes. kwargs : other keyword arguments All other keyword arguments are passed to :meth:`matplotlib.axes.Axes.pcolormesh`. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See Also -------- clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples -------- .. include:: ../docstrings/heatmap.rst """ # Initialize the plotter object plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt, annot_kws, cbar, cbar_kws, xticklabels, yticklabels, mask) # Add the pcolormesh kwargs here kwargs["linewidths"] = linewidths kwargs["edgecolor"] = linecolor # Draw the plot and return the Axes if ax is None: ax = plt.gca() if square: ax.set_aspect("equal") plotter.plot(ax, cbar_ax, kwargs) return ax
matrix.heatmap
seaborn
42
seaborn/palettes.py
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"): """Make a palette that blends between a list of colors. Parameters ---------- colors : sequence of colors in various formats interpreted by `input` hex code, html color name, or tuple in `input` space. n_colors : int, optional Number of colors in the palette. as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` Examples -------- .. include: ../docstrings/blend_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.blend_palette.txt
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"): """Make a palette that blends between a list of colors. Parameters ---------- colors : sequence of colors in various formats interpreted by `input` hex code, html color name, or tuple in `input` space. n_colors : int, optional Number of colors in the palette. as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` Examples -------- .. include: ../docstrings/blend_palette.rst """ colors = [_color_to_rgb(color, input) for color in colors] name = "blend" pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors) if not as_cmap: rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha pal = _ColorPalette(map(tuple, rgb_array)) return pal
palettes.blend_palette
seaborn
43
seaborn/palettes.py
def crayon_palette(colors): """Make a palette with color names from Crayola crayons. Colors are taken from here: https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors This is just a simple wrapper around the `seaborn.crayons` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.crayons` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- xkcd_palette : Make a palette with named colors from the XKCD color survey. """
/usr/src/app/target_test_cases/failed_tests_palettes.crayon_palette.txt
def crayon_palette(colors): """Make a palette with color names from Crayola crayons. Colors are taken from here: https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors This is just a simple wrapper around the `seaborn.crayons` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.crayons` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- xkcd_palette : Make a palette with named colors from the XKCD color survey. """ palette = [crayons[name] for name in colors] return color_palette(palette, len(palette))
palettes.crayon_palette
seaborn
44
seaborn/palettes.py
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False, as_cmap=False): """Make a sequential palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. "cubehelix" is also available as a matplotlib-based palette, but this function gives the user more control over the look of the palette and has a different set of defaults. In addition to using this function, it is also possible to generate a cubehelix palette generally in seaborn using a string starting with `ch:` and containing other parameters (e.g. `"ch:s=.25,r=-.5"`). Parameters ---------- n_colors : int Number of colors in the palette. start : float, 0 <= start <= 3 The hue value at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float 0 <= gamma Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors. hue : float, 0 <= hue <= 1 Saturation of the colors. dark : float 0 <= dark <= 1 Intensity of the darkest color in the palette. light : float 0 <= light <= 1 Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- choose_cubehelix_palette : Launch an interactive widget to select cubehelix palette parameters. dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. Examples -------- .. include:: ../docstrings/cubehelix_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.cubehelix_palette.txt
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8, light=.85, dark=.15, reverse=False, as_cmap=False): """Make a sequential palette from the cubehelix system. This produces a colormap with linearly-decreasing (or increasing) brightness. That means that information will be preserved if printed to black and white or viewed by someone who is colorblind. "cubehelix" is also available as a matplotlib-based palette, but this function gives the user more control over the look of the palette and has a different set of defaults. In addition to using this function, it is also possible to generate a cubehelix palette generally in seaborn using a string starting with `ch:` and containing other parameters (e.g. `"ch:s=.25,r=-.5"`). Parameters ---------- n_colors : int Number of colors in the palette. start : float, 0 <= start <= 3 The hue value at the start of the helix. rot : float Rotations around the hue wheel over the range of the palette. gamma : float 0 <= gamma Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors. hue : float, 0 <= hue <= 1 Saturation of the colors. dark : float 0 <= dark <= 1 Intensity of the darkest color in the palette. light : float 0 <= light <= 1 Intensity of the lightest color in the palette. reverse : bool If True, the palette will go from dark to light. as_cmap : bool If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- choose_cubehelix_palette : Launch an interactive widget to select cubehelix palette parameters. dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. References ---------- Green, D. A. (2011). "A colour scheme for the display of astronomical intensity images". Bulletin of the Astromical Society of India, Vol. 39, p. 289-295. Examples -------- .. include:: ../docstrings/cubehelix_palette.rst """ def get_color_function(p0, p1): # Copied from matplotlib because it lives in private module def color(x): # Apply gamma factor to emphasise low or high intensity values xg = x ** gamma # Calculate amplitude and angle of deviation from the black # to white diagonal in the plane of constant # perceived intensity. a = hue * xg * (1 - xg) / 2 phi = 2 * np.pi * (start / 3 + rot * x) return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi)) return color cdict = { "red": get_color_function(-0.14861, 1.78277), "green": get_color_function(-0.29227, -0.90649), "blue": get_color_function(1.97294, 0.0), } cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict) x = np.linspace(light, dark, int(n_colors)) pal = cmap(x)[:, :3].tolist() if reverse: pal = pal[::-1] if as_cmap: x_256 = np.linspace(light, dark, 256) if reverse: x_256 = x_256[::-1] pal_256 = cmap(x_256) cmap = mpl.colors.ListedColormap(pal_256, "seaborn_cubehelix") return cmap else: return _ColorPalette(pal)
palettes.cubehelix_palette
seaborn
45
seaborn/palettes.py
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): """Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.dark_palette.txt
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): """Make a sequential palette that blends from dark to ``color``. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_dark_palette` function. Parameters ---------- color : base color for high values hex, rgb-tuple, or html color name n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/dark_palette.rst """ rgb = _color_to_rgb(color, input) hue, sat, _ = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * sat, 15 gray = _color_to_rgb((hue, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap)
palettes.dark_palette
seaborn
46
seaborn/palettes.py
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa center="light", as_cmap=False): """Make a diverging palette between two HUSL colors. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_diverging_palette` function. Parameters ---------- h_neg, h_pos : float in [0, 359] Anchor hues for negative and positive extents of the map. s : float in [0, 100], optional Anchor saturation for both extents of the map. l : float in [0, 100], optional Anchor lightness for both extents of the map. sep : int, optional Size of the intermediate region. n : int, optional Number of colors in the palette (if not returning a cmap) center : {"light", "dark"}, optional Whether the center of the palette is light or dark as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- dark_palette : Create a sequential palette with dark values. light_palette : Create a sequential palette with light values. Examples -------- .. include: ../docstrings/diverging_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.diverging_palette.txt
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa center="light", as_cmap=False): """Make a diverging palette between two HUSL colors. If you are using the IPython notebook, you can also choose this palette interactively with the :func:`choose_diverging_palette` function. Parameters ---------- h_neg, h_pos : float in [0, 359] Anchor hues for negative and positive extents of the map. s : float in [0, 100], optional Anchor saturation for both extents of the map. l : float in [0, 100], optional Anchor lightness for both extents of the map. sep : int, optional Size of the intermediate region. n : int, optional Number of colors in the palette (if not returning a cmap) center : {"light", "dark"}, optional Whether the center of the palette is light or dark as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- dark_palette : Create a sequential palette with dark values. light_palette : Create a sequential palette with light values. Examples -------- .. include: ../docstrings/diverging_palette.rst """ palfunc = dict(dark=dark_palette, light=light_palette)[center] n_half = int(128 - (sep // 2)) neg = palfunc((h_neg, s, l), n_half, reverse=True, input="husl") pos = palfunc((h_pos, s, l), n_half, input="husl") midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center] mid = midpoint * sep pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap) return pal
palettes.diverging_palette
seaborn
47
seaborn/palettes.py
def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa """ Return hues with constant lightness and saturation in the HLS system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The `h`, `l`, and `s` values should be between 0 and 1. .. note:: While the separation of the resulting colors will be mathematically constant, the HLS system does not construct a perceptually-uniform space, so their apparent intensity will vary. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- husl_palette : Make a palette using evenly spaced hues in the HUSL system. Examples -------- .. include:: ../docstrings/hls_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.hls_palette.txt
def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa """ Return hues with constant lightness and saturation in the HLS system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The `h`, `l`, and `s` values should be between 0 and 1. .. note:: While the separation of the resulting colors will be mathematically constant, the HLS system does not construct a perceptually-uniform space, so their apparent intensity will vary. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- husl_palette : Make a palette using evenly spaced hues in the HUSL system. Examples -------- .. include:: ../docstrings/hls_palette.rst """ if as_cmap: n_colors = 256 hues = np.linspace(0, 1, int(n_colors) + 1)[:-1] hues += h hues %= 1 hues -= hues.astype(int) palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues] if as_cmap: return mpl.colors.ListedColormap(palette, "hls") else: return _ColorPalette(palette)
palettes.hls_palette
seaborn
48
seaborn/palettes.py
def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa """ Return hues with constant lightness and saturation in the HUSL system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The `h`, `l`, and `s` values should be between 0 and 1. This function is similar to :func:`hls_palette`, but it uses a nonlinear color space that is more perceptually uniform. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- hls_palette : Make a palette using evenly spaced hues in the HSL system. Examples -------- .. include:: ../docstrings/husl_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.husl_palette.txt
def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa """ Return hues with constant lightness and saturation in the HUSL system. The hues are evenly sampled along a circular path. The resulting palette will be appropriate for categorical or cyclical data. The `h`, `l`, and `s` values should be between 0 and 1. This function is similar to :func:`hls_palette`, but it uses a nonlinear color space that is more perceptually uniform. Parameters ---------- n_colors : int Number of colors in the palette. h : float The value of the first hue. l : float The lightness value. s : float The saturation intensity. as_cmap : bool If True, return a matplotlib colormap object. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- hls_palette : Make a palette using evenly spaced hues in the HSL system. Examples -------- .. include:: ../docstrings/husl_palette.rst """ if as_cmap: n_colors = 256 hues = np.linspace(0, 1, int(n_colors) + 1)[:-1] hues += h hues %= 1 hues *= 359 s *= 99 l *= 99 # noqa palette = [_color_to_rgb((h_i, s, l), input="husl") for h_i in hues] if as_cmap: return mpl.colors.ListedColormap(palette, "hsl") else: return _ColorPalette(palette)
palettes.husl_palette
seaborn
49
seaborn/palettes.py
def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): """Make a sequential palette that blends from light to ``color``. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using a Jupyter notebook, you can also choose this palette interactively with the :func:`choose_light_palette` function. Parameters ---------- color : base color for high values hex code, html color name, or tuple in `input` space. n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- dark_palette : Create a sequential palette with dark low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/light_palette.rst """
/usr/src/app/target_test_cases/failed_tests_palettes.light_palette.txt
def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"): """Make a sequential palette that blends from light to ``color``. The ``color`` parameter can be specified in a number of ways, including all options for defining a color in matplotlib and several additional color spaces that are handled by seaborn. You can also use the database of named colors from the XKCD color survey. If you are using a Jupyter notebook, you can also choose this palette interactively with the :func:`choose_light_palette` function. Parameters ---------- color : base color for high values hex code, html color name, or tuple in `input` space. n_colors : int, optional number of colors in the palette reverse : bool, optional if True, reverse the direction of the blend as_cmap : bool, optional If True, return a :class:`matplotlib.colors.ListedColormap`. input : {'rgb', 'hls', 'husl', xkcd'} Color space to interpret the input color. The first three options apply to tuple inputs and the latter applies to string inputs. Returns ------- palette list of RGB tuples or :class:`matplotlib.colors.ListedColormap` See Also -------- dark_palette : Create a sequential palette with dark low values. diverging_palette : Create a diverging palette with two colors. Examples -------- .. include:: ../docstrings/light_palette.rst """ rgb = _color_to_rgb(color, input) hue, sat, _ = husl.rgb_to_husl(*rgb) gray_s, gray_l = .15 * sat, 95 gray = _color_to_rgb((hue, gray_s, gray_l), input="husl") colors = [rgb, gray] if reverse else [gray, rgb] return blend_palette(colors, n_colors, as_cmap)
palettes.light_palette
seaborn
50
seaborn/palettes.py
def set_color_codes(palette="deep"): """Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like "b" or "g" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle. """
/usr/src/app/target_test_cases/failed_tests_palettes.set_color_codes.txt
def set_color_codes(palette="deep"): """Change how matplotlib color shorthands are interpreted. Calling this will change how shorthand codes like "b" or "g" are interpreted by matplotlib in subsequent plots. Parameters ---------- palette : {deep, muted, pastel, dark, bright, colorblind} Named seaborn palette to use as the source of colors. See Also -------- set : Color codes can be set through the high-level seaborn style manager. set_palette : Color codes can also be set through the function that sets the matplotlib color cycle. """ if palette == "reset": colors = [ (0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75), (.75, .75, 0.), (0., .75, .75), (0., 0., 0.) ] elif not isinstance(palette, str): err = "set_color_codes requires a named seaborn palette" raise TypeError(err) elif palette in SEABORN_PALETTES: if not palette.endswith("6"): palette = palette + "6" colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)] else: err = f"Cannot set colors with palette '{palette}'" raise ValueError(err) for code, color in zip("bgrmyck", colors): rgb = mpl.colors.colorConverter.to_rgb(color) mpl.colors.colorConverter.colors[code] = rgb
palettes.set_color_codes
seaborn
51
seaborn/palettes.py
def xkcd_palette(colors): """Make a palette with color names from the xkcd color survey. See xkcd for the full list of colors: https://xkcd.com/color/rgb/ This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.xkcd_rgb` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- crayon_palette : Make a palette with Crayola crayon colors. """
/usr/src/app/target_test_cases/failed_tests_palettes.xkcd_palette.txt
def xkcd_palette(colors): """Make a palette with color names from the xkcd color survey. See xkcd for the full list of colors: https://xkcd.com/color/rgb/ This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.xkcd_rgb` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- crayon_palette : Make a palette with Crayola crayon colors. """ palette = [xkcd_rgb[name] for name in colors] return color_palette(palette, len(palette))
palettes.xkcd_palette
seaborn
52
seaborn/_core/plot.py
def label( self, *, title: str | None = None, legend: str | None = None, **variables: str | Callable[[str], str] ) -> Plot: """ Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass "" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, `title=` modifies the subplot-specific label, while `col=` and/or `row=` add a label for the faceting variable. When using a single subplot, `title=` sets its title. The `legend=` parameter sets the title for the "layer" legend (i.e., when using `label` in :meth:`Plot.add`). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.label.txt
def label( self, *, title: str | None = None, legend: str | None = None, **variables: str | Callable[[str], str] ) -> Plot: """ Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass "" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, `title=` modifies the subplot-specific label, while `col=` and/or `row=` add a label for the faceting variable. When using a single subplot, `title=` sets its title. The `legend=` parameter sets the title for the "layer" legend (i.e., when using `label` in :meth:`Plot.add`). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst """ new = self._clone() if title is not None: new._labels["title"] = title if legend is not None: new._labels["legend"] = legend new._labels.update(variables) return new
plot.Plot.label
seaborn
53
seaborn/_core/plot.py
def layout( self, *, size: tuple[float, float] | Default = default, engine: str | None | Default = default, extent: tuple[float, float, float, float] | Default = default, ) -> Plot: """ Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future "experimental" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{"tight", "constrained", "none"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth:`Plot.on` is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when `engine="none"`. Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.layout.txt
def layout( self, *, size: tuple[float, float] | Default = default, engine: str | None | Default = default, extent: tuple[float, float, float, float] | Default = default, ) -> Plot: """ Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future "experimental" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{"tight", "constrained", "none"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth:`Plot.on` is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when `engine="none"`. Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst """ # TODO add an "auto" mode for figsize that roughly scales with the rcParams # figsize (so that works), but expands to prevent subplots from being squished # Also should we have height=, aspect=, exclusive with figsize? Or working # with figsize when only one is defined? new = self._clone() if size is not default: new._figure_spec["figsize"] = size if engine is not default: new._layout_spec["engine"] = engine if extent is not default: new._layout_spec["extent"] = extent return new
plot.Plot.layout
seaborn
54
seaborn/_core/plot.py
def limit(self, **limits: tuple[Any, Any]) -> Plot: """ Control the range of visible data. Keywords correspond to variables defined in the plot, and values are a `(min, max)` tuple (where either can be `None` to leave unset). Limits apply only to the axis; data outside the visible range are still used for any stat transforms and added to the plot. Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.limit.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.limit.txt
def limit(self, **limits: tuple[Any, Any]) -> Plot: """ Control the range of visible data. Keywords correspond to variables defined in the plot, and values are a `(min, max)` tuple (where either can be `None` to leave unset). Limits apply only to the axis; data outside the visible range are still used for any stat transforms and added to the plot. Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.limit.rst """ new = self._clone() new._limits.update(limits) return new
plot.Plot.limit
seaborn
55
seaborn/_core/plot.py
def save(self, loc, **kwargs) -> Plot: """ Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.save.txt
def save(self, loc, **kwargs) -> Plot: """ Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. """ # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
plot.Plot.save
seaborn
56
seaborn/_core/plot.py
def scale(self, **scales: Scale) -> Plot: """ Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.). A number of "magic" arguments are accepted, including: - The name of a transform (e.g., `"log"`, `"sqrt"`) - The name of a palette (e.g., `"viridis"`, `"muted"`) - A tuple of values, defining the output range (e.g. `(1, 5)`) - A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`) - A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`) For more explicit control, pass a scale spec object such as :class:`Continuous` or :class:`Nominal`. Or pass `None` to use an "identity" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.scale.txt
def scale(self, **scales: Scale) -> Plot: """ Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.). A number of "magic" arguments are accepted, including: - The name of a transform (e.g., `"log"`, `"sqrt"`) - The name of a palette (e.g., `"viridis"`, `"muted"`) - A tuple of values, defining the output range (e.g. `(1, 5)`) - A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`) - A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`) For more explicit control, pass a scale spec object such as :class:`Continuous` or :class:`Nominal`. Or pass `None` to use an "identity" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst """ new = self._clone() new._scales.update(scales) return new
plot.Plot.scale
seaborn
57
seaborn/_core/plot.py
def share(self, **shares: bool | str) -> Plot: """ Control sharing of axis limits and ticks across subplots. Keywords correspond to variables defined in the plot, and values can be boolean (to share across all subplots), or one of "row" or "col" (to share more selectively across one dimension of a grid). Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.share.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.share.txt
def share(self, **shares: bool | str) -> Plot: """ Control sharing of axis limits and ticks across subplots. Keywords correspond to variables defined in the plot, and values can be boolean (to share across all subplots), or one of "row" or "col" (to share more selectively across one dimension of a grid). Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.share.rst """ new = self._clone() new._shares.update(shares) return new
plot.Plot.share
seaborn
58
seaborn/_core/plot.py
def theme(self, config: Mapping[str, Any], /) -> Plot: """ Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: https://matplotlib.org/stable/tutorials/introductory/customizing.html Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst """
/usr/src/app/target_test_cases/failed_tests_plot.Plot.theme.txt
def theme(self, config: Mapping[str, Any], /) -> Plot: """ Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: https://matplotlib.org/stable/tutorials/introductory/customizing.html Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst """ new = self._clone() rc = mpl.RcParams(config) new._theme.update(rc) return new
plot.Plot.theme
seaborn
59
seaborn/_core/properties.py
def _default_values(self, n: int) -> list[DashPatternWithOffset]: """Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. """
/usr/src/app/target_test_cases/failed_tests_properties.LineStyle._default_values.txt
def _default_values(self, n: int) -> list[DashPatternWithOffset]: """Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. """ # Start with dash specs that are well distinguishable dashes: list[str | DashPattern] = [ "-", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1), ] # Now programmatically build as many as we need p = 3 while len(dashes) < n: # Take combinations of long and short dashes a = itertools.combinations_with_replacement([3, 1.25], p) b = itertools.combinations_with_replacement([4, 1], p) # Interleave the combinations, reversing one of the streams segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1])) # Now insert the gaps for segments in segment_list: gap = min(segments) spec = tuple(itertools.chain(*((seg, gap) for seg in segments))) dashes.append(spec) p += 1 return [self._get_dash_pattern(x) for x in dashes]
properties.LineStyle._default_values
seaborn
60
seaborn/_core/properties.py
def _default_values(self, n: int) -> list[MarkerStyle]: """Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. """
/usr/src/app/target_test_cases/failed_tests_properties.Marker._default_values.txt
def _default_values(self, n: int) -> list[MarkerStyle]: """Build an arbitrarily long list of unique marker styles. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. """ # Start with marker specs that are well distinguishable markers = [ "o", "X", (4, 0, 45), "P", (4, 0, 0), (4, 1, 0), "^", (4, 1, 45), "v", ] # Now generate more from regular polygons of increasing order s = 5 while len(markers) < n: a = 360 / (s + 1) / 2 markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)]) s += 1 markers = [MarkerStyle(m) for m in markers[:n]] return markers
properties.Marker._default_values
seaborn
61
seaborn/rcmod.py
def set_context(context=None, font_scale=1, rc=None): """ Set the parameters that control the scaling of plot elements. These parameters correspond to label size, line thickness, etc. Calling this function modifies the global matplotlib `rcParams`. For more information, see the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by different values. Font elements can also be scaled independently of (but relative to) the other values. See :func:`plotting_context` to get the parameter values. Parameters ---------- context : dict, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- .. include:: ../docstrings/set_context.rst """
/usr/src/app/target_test_cases/failed_tests_rcmod.set_context.txt
def set_context(context=None, font_scale=1, rc=None): """ Set the parameters that control the scaling of plot elements. These parameters correspond to label size, line thickness, etc. Calling this function modifies the global matplotlib `rcParams`. For more information, see the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. The base context is "notebook", and the other contexts are "paper", "talk", and "poster", which are version of the notebook parameters scaled by different values. Font elements can also be scaled independently of (but relative to) the other values. See :func:`plotting_context` to get the parameter values. Parameters ---------- context : dict, or one of {paper, notebook, talk, poster} A dictionary of parameters or the name of a preconfigured set. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. rc : dict, optional Parameter mappings to override the values in the preset seaborn context dictionaries. This only updates parameters that are considered part of the context definition. Examples -------- .. include:: ../docstrings/set_context.rst """ context_object = plotting_context(context, font_scale, rc) mpl.rcParams.update(context_object)
rcmod.set_context
seaborn
62
seaborn/rcmod.py
def set_palette(palette, n_colors=None, desat=None, color_codes=False): """Set the matplotlib color cycle using a seaborn palette. Parameters ---------- palette : seaborn color palette | matplotlib colormap | hls | husl Palette definition. Should be something :func:`color_palette` can process. n_colors : int Number of colors in the cycle. The default number of colors will depend on the format of ``palette``, see the :func:`color_palette` documentation for more information. desat : float Proportion to desaturate each color by. color_codes : bool If ``True`` and ``palette`` is a seaborn palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. See Also -------- color_palette : build a color palette or set the color cycle temporarily in a ``with`` statement. set_context : set parameters to scale plot elements set_style : set the default parameters for figure style """
/usr/src/app/target_test_cases/failed_tests_rcmod.set_palette.txt
def set_palette(palette, n_colors=None, desat=None, color_codes=False): """Set the matplotlib color cycle using a seaborn palette. Parameters ---------- palette : seaborn color palette | matplotlib colormap | hls | husl Palette definition. Should be something :func:`color_palette` can process. n_colors : int Number of colors in the cycle. The default number of colors will depend on the format of ``palette``, see the :func:`color_palette` documentation for more information. desat : float Proportion to desaturate each color by. color_codes : bool If ``True`` and ``palette`` is a seaborn palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. See Also -------- color_palette : build a color palette or set the color cycle temporarily in a ``with`` statement. set_context : set parameters to scale plot elements set_style : set the default parameters for figure style """ colors = palettes.color_palette(palette, n_colors, desat) cyl = cycler('color', colors) mpl.rcParams['axes.prop_cycle'] = cyl if color_codes: try: palettes.set_color_codes(palette) except (ValueError, TypeError): pass
rcmod.set_palette
seaborn
63
seaborn/rcmod.py
def set_style(style=None, rc=None): """ Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. See :func:`axes_style` to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst """
/usr/src/app/target_test_cases/failed_tests_rcmod.set_style.txt
def set_style(style=None, rc=None): """ Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:`aesthetics tutorial <../tutorial/aesthetics>`. See :func:`axes_style` to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst """ style_object = axes_style(style, rc) mpl.rcParams.update(style_object)
rcmod.set_style
seaborn
64
seaborn/rcmod.py
def set_theme(context="notebook", style="darkgrid", palette="deep", font="sans-serif", font_scale=1, color_codes=True, rc=None): """ Set aspects of the visual theme for all matplotlib and seaborn plots. This function changes the global defaults for all plots using the matplotlib rcParams system. The themeing is decomposed into several distinct sets of parameter values. The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>` and :doc:`color palette <../tutorial/color_palettes>` tutorials. Parameters ---------- context : string or dict Scaling parameters, see :func:`plotting_context`. style : string or dict Axes style parameters, see :func:`axes_style`. palette : string or sequence Color palette, see :func:`color_palette`. font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If ``True`` and ``palette`` is a seaborn palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above. Examples -------- .. include:: ../docstrings/set_theme.rst """
/usr/src/app/target_test_cases/failed_tests_rcmod.set_theme.txt
def set_theme(context="notebook", style="darkgrid", palette="deep", font="sans-serif", font_scale=1, color_codes=True, rc=None): """ Set aspects of the visual theme for all matplotlib and seaborn plots. This function changes the global defaults for all plots using the matplotlib rcParams system. The themeing is decomposed into several distinct sets of parameter values. The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>` and :doc:`color palette <../tutorial/color_palettes>` tutorials. Parameters ---------- context : string or dict Scaling parameters, see :func:`plotting_context`. style : string or dict Axes style parameters, see :func:`axes_style`. palette : string or sequence Color palette, see :func:`color_palette`. font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If ``True`` and ``palette`` is a seaborn palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above. Examples -------- .. include:: ../docstrings/set_theme.rst """ set_context(context, font_scale) set_style(style, rc={"font.family": font}) set_palette(palette, color_codes=color_codes) if rc is not None: mpl.rcParams.update(rc)
rcmod.set_theme
seaborn
65
seaborn/regression.py
def residplot( data=None, *, x=None, y=None, x_partial=None, y_partial=None, lowess=False, order=1, robust=False, dropna=True, label=None, color=None, scatter_kws=None, line_kws=None, ax=None ): """Plot the residuals of a linear regression. This function will regress y on x (possibly as a robust or polynomial regression) and then draw a scatterplot of the residuals. You can optionally fit a lowess smoother to the residual plot, which can help in determining if there is structure to the residuals. Parameters ---------- data : DataFrame, optional DataFrame to use if `x` and `y` are column names. x : vector or string Data or column name in `data` for the predictor variable. y : vector or string Data or column name in `data` for the response variable. {x, y}_partial : vectors or string(s) , optional These variables are treated as confounding and are removed from the `x` or `y` variables before plotting. lowess : boolean, optional Fit a lowess smoother to the residual scatterplot. order : int, optional Order of the polynomial to fit when calculating the residuals. robust : boolean, optional Fit a robust linear regression when calculating the residuals. dropna : boolean, optional If True, ignore observations with missing data when fitting and plotting. label : string, optional Label that will be used in any plot legends. color : matplotlib color, optional Color to use for all elements of the plot. {scatter, line}_kws : dictionaries, optional Additional keyword arguments passed to scatter() and plot() for drawing the components of the plot. ax : matplotlib axis, optional Plot into this axis, otherwise grab the current axis or make a new one if not existing. Returns ------- ax: matplotlib axes Axes with the regression plot. See Also -------- regplot : Plot a simple linear regression model. jointplot : Draw a :func:`residplot` with univariate marginal distributions (when used with ``kind="resid"``). Examples -------- .. include:: ../docstrings/residplot.rst """
/usr/src/app/target_test_cases/failed_tests_regression.residplot.txt
def residplot( data=None, *, x=None, y=None, x_partial=None, y_partial=None, lowess=False, order=1, robust=False, dropna=True, label=None, color=None, scatter_kws=None, line_kws=None, ax=None ): """Plot the residuals of a linear regression. This function will regress y on x (possibly as a robust or polynomial regression) and then draw a scatterplot of the residuals. You can optionally fit a lowess smoother to the residual plot, which can help in determining if there is structure to the residuals. Parameters ---------- data : DataFrame, optional DataFrame to use if `x` and `y` are column names. x : vector or string Data or column name in `data` for the predictor variable. y : vector or string Data or column name in `data` for the response variable. {x, y}_partial : vectors or string(s) , optional These variables are treated as confounding and are removed from the `x` or `y` variables before plotting. lowess : boolean, optional Fit a lowess smoother to the residual scatterplot. order : int, optional Order of the polynomial to fit when calculating the residuals. robust : boolean, optional Fit a robust linear regression when calculating the residuals. dropna : boolean, optional If True, ignore observations with missing data when fitting and plotting. label : string, optional Label that will be used in any plot legends. color : matplotlib color, optional Color to use for all elements of the plot. {scatter, line}_kws : dictionaries, optional Additional keyword arguments passed to scatter() and plot() for drawing the components of the plot. ax : matplotlib axis, optional Plot into this axis, otherwise grab the current axis or make a new one if not existing. Returns ------- ax: matplotlib axes Axes with the regression plot. See Also -------- regplot : Plot a simple linear regression model. jointplot : Draw a :func:`residplot` with univariate marginal distributions (when used with ``kind="resid"``). Examples -------- .. include:: ../docstrings/residplot.rst """ plotter = _RegressionPlotter(x, y, data, ci=None, order=order, robust=robust, x_partial=x_partial, y_partial=y_partial, dropna=dropna, color=color, label=label) if ax is None: ax = plt.gca() # Calculate the residual from a linear regression _, yhat, _ = plotter.fit_regression(grid=plotter.x) plotter.y = plotter.y - yhat # Set the regression option on the plotter if lowess: plotter.lowess = True else: plotter.fit_reg = False # Plot a horizontal line at 0 ax.axhline(0, ls=":", c=".2") # Draw the scatterplot scatter_kws = {} if scatter_kws is None else scatter_kws.copy() line_kws = {} if line_kws is None else line_kws.copy() plotter.plot(ax, scatter_kws, line_kws) return ax
regression.residplot
seaborn
66
seaborn/_core/rules.py
def variable_type( vector: Series, boolean_type: Literal["numeric", "categorical", "boolean"] = "numeric", strict_boolean: bool = False, ) -> VarType: """ Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in a few ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. - There is some flexibility about how to treat binary / boolean data. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric', 'categorical', or 'boolean' Type to use for vectors containing only 0s and 1s (and NAs). strict_boolean : bool If True, only consider data to be boolean when the dtype is bool or Boolean. Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """
/usr/src/app/target_test_cases/failed_tests_rules.variable_type.txt
def variable_type( vector: Series, boolean_type: Literal["numeric", "categorical", "boolean"] = "numeric", strict_boolean: bool = False, ) -> VarType: """ Determine whether a vector contains numeric, categorical, or datetime data. This function differs from the pandas typing API in a few ways: - Python sequences or object-typed PyData objects are considered numeric if all of their entries are numeric. - String or mixed-type data are considered categorical even if not explicitly represented as a :class:`pandas.api.types.CategoricalDtype`. - There is some flexibility about how to treat binary / boolean data. Parameters ---------- vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence Input data to test. boolean_type : 'numeric', 'categorical', or 'boolean' Type to use for vectors containing only 0s and 1s (and NAs). strict_boolean : bool If True, only consider data to be boolean when the dtype is bool or Boolean. Returns ------- var_type : 'numeric', 'categorical', or 'datetime' Name identifying the type of data in the vector. """ # If a categorical dtype is set, infer categorical if isinstance(getattr(vector, 'dtype', None), pd.CategoricalDtype): return VarType("categorical") # Special-case all-na data, which is always "numeric" if pd.isna(vector).all(): return VarType("numeric") # Now drop nulls to simplify further type inference vector = vector.dropna() # Special-case binary/boolean data, allow caller to determine # This triggers a numpy warning when vector has strings/objects # https://github.com/numpy/numpy/issues/6784 # Because we reduce with .all(), we are agnostic about whether the # comparison returns a scalar or vector, so we will ignore the warning. # It triggers a separate DeprecationWarning when the vector has datetimes: # https://github.com/numpy/numpy/issues/13548 # This is considered a bug by numpy and will likely go away. with warnings.catch_warnings(): warnings.simplefilter( action='ignore', category=(FutureWarning, DeprecationWarning) # type: ignore # mypy bug? ) if strict_boolean: if isinstance(vector.dtype, pd.core.dtypes.base.ExtensionDtype): boolean_dtypes = ["bool", "boolean"] else: boolean_dtypes = ["bool"] boolean_vector = vector.dtype in boolean_dtypes else: try: boolean_vector = bool(np.isin(vector, [0, 1]).all()) except TypeError: # .isin comparison is not guaranteed to be possible under NumPy # casting rules, depending on the (unknown) dtype of 'vector' boolean_vector = False if boolean_vector: return VarType(boolean_type) # Defer to positive pandas tests if pd.api.types.is_numeric_dtype(vector): return VarType("numeric") if pd.api.types.is_datetime64_dtype(vector): return VarType("datetime") # --- If we get to here, we need to check the entries # Check for a collection where everything is a number def all_numeric(x): for x_i in x: if not isinstance(x_i, Number): return False return True if all_numeric(vector): return VarType("numeric") # Check for a collection where everything is a datetime def all_datetime(x): for x_i in x: if not isinstance(x_i, (datetime, np.datetime64)): return False return True if all_datetime(vector): return VarType("datetime") # Otherwise, our final fallback is to consider things categorical return VarType("categorical")
rules.variable_type
seaborn
67
seaborn/_core/scales.py
def tick( self, locator: Locator | None = None, *, at: Sequence[float] | None = None, upto: int | None = None, count: int | None = None, every: float | None = None, between: tuple[float, float] | None = None, minor: int | None = None, ) -> Continuous: """ Configure the selection of ticks for the scale's axis or legend. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. at : sequence of floats Place ticks at these specific locations (in data units). upto : int Choose "nice" locations for ticks, but do not exceed this number. count : int Choose exactly this number of ticks, bounded by `between` or axis limits. every : float Choose locations at this interval of separation (in data units). between : pair of floats Bound upper / lower ticks when using `every` or `count`. minor : int Number of unlabeled ticks to draw between labeled "major" ticks. Returns ------- scale Copy of self with new tick configuration. """
/usr/src/app/target_test_cases/failed_tests_scales.Continuous.tick.txt
def tick( self, locator: Locator | None = None, *, at: Sequence[float] | None = None, upto: int | None = None, count: int | None = None, every: float | None = None, between: tuple[float, float] | None = None, minor: int | None = None, ) -> Continuous: """ Configure the selection of ticks for the scale's axis or legend. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. at : sequence of floats Place ticks at these specific locations (in data units). upto : int Choose "nice" locations for ticks, but do not exceed this number. count : int Choose exactly this number of ticks, bounded by `between` or axis limits. every : float Choose locations at this interval of separation (in data units). between : pair of floats Bound upper / lower ticks when using `every` or `count`. minor : int Number of unlabeled ticks to draw between labeled "major" ticks. Returns ------- scale Copy of self with new tick configuration. """ # Input checks if locator is not None and not isinstance(locator, Locator): raise TypeError( f"Tick locator must be an instance of {Locator!r}, " f"not {type(locator)!r}." ) log_base, symlog_thresh = self._parse_for_log_params(self.trans) if log_base or symlog_thresh: if count is not None and between is None: raise RuntimeError("`count` requires `between` with log transform.") if every is not None: raise RuntimeError("`every` not supported with log transform.") new = copy(self) new._tick_params = { "locator": locator, "at": at, "upto": upto, "count": count, "every": every, "between": between, "minor": minor, } return new
scales.Continuous.tick
seaborn
68
seaborn/_core/scales.py
def label( self, formatter: Formatter | None = None, *, concise: bool = False, ) -> Temporal: """ Configure the appearance of tick labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. concise : bool If True, use :class:`matplotlib.dates.ConciseDateFormatter` to make the tick labels as compact as possible. Returns ------- scale Copy of self with new label configuration. """
/usr/src/app/target_test_cases/failed_tests_scales.Temporal.label.txt
def label( self, formatter: Formatter | None = None, *, concise: bool = False, ) -> Temporal: """ Configure the appearance of tick labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- formatter : :class:`matplotlib.ticker.Formatter` subclass Pre-configured formatter to use; other parameters will be ignored. concise : bool If True, use :class:`matplotlib.dates.ConciseDateFormatter` to make the tick labels as compact as possible. Returns ------- scale Copy of self with new label configuration. """ new = copy(self) new._label_params = {"formatter": formatter, "concise": concise} return new
scales.Temporal.label
seaborn
69
seaborn/_core/scales.py
def tick( self, locator: Locator | None = None, *, upto: int | None = None, ) -> Temporal: """ Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. upto : int Choose "nice" locations for ticks, but do not exceed this number. Returns ------- scale Copy of self with new tick configuration. """
/usr/src/app/target_test_cases/failed_tests_scales.Temporal.tick.txt
def tick( self, locator: Locator | None = None, *, upto: int | None = None, ) -> Temporal: """ Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. Parameters ---------- locator : :class:`matplotlib.ticker.Locator` subclass Pre-configured matplotlib locator; other parameters will not be used. upto : int Choose "nice" locations for ticks, but do not exceed this number. Returns ------- scale Copy of self with new tick configuration. """ if locator is not None and not isinstance(locator, Locator): err = ( f"Tick locator must be an instance of {Locator!r}, " f"not {type(locator)!r}." ) raise TypeError(err) new = copy(self) new._tick_params = {"locator": locator, "upto": upto} return new
scales.Temporal.tick
seaborn
70
seaborn/utils.py
def ci_to_errsize(cis, heights): """Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar """
/usr/src/app/target_test_cases/failed_tests_utils.ci_to_errsize.txt
def ci_to_errsize(cis, heights): """Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar """ cis = np.atleast_2d(cis).reshape(2, -1) heights = np.atleast_1d(heights) errsize = [] for i, (low, high) in enumerate(np.transpose(cis)): h = heights[i] elow = h - low ehigh = high - h errsize.append([elow, ehigh]) errsize = np.asarray(errsize).T return errsize
utils.ci_to_errsize
seaborn
71
seaborn/utils.py
def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=False, offset=None, trim=False): """Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None """
/usr/src/app/target_test_cases/failed_tests_utils.despine.txt
def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=False, offset=None, trim=False): """Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None """ # Get references to the axes we want if fig is None and ax is None: axes = plt.gcf().axes elif fig is not None: axes = fig.axes elif ax is not None: axes = [ax] for ax_i in axes: for side in ["top", "right", "left", "bottom"]: # Toggle the spine objects is_visible = not locals()[side] ax_i.spines[side].set_visible(is_visible) if offset is not None and is_visible: try: val = offset.get(side, 0) except AttributeError: val = offset ax_i.spines[side].set_position(('outward', val)) # Potentially move the ticks if left and not right: maj_on = any( t.tick1line.get_visible() for t in ax_i.yaxis.majorTicks ) min_on = any( t.tick1line.get_visible() for t in ax_i.yaxis.minorTicks ) ax_i.yaxis.set_ticks_position("right") for t in ax_i.yaxis.majorTicks: t.tick2line.set_visible(maj_on) for t in ax_i.yaxis.minorTicks: t.tick2line.set_visible(min_on) if bottom and not top: maj_on = any( t.tick1line.get_visible() for t in ax_i.xaxis.majorTicks ) min_on = any( t.tick1line.get_visible() for t in ax_i.xaxis.minorTicks ) ax_i.xaxis.set_ticks_position("top") for t in ax_i.xaxis.majorTicks: t.tick2line.set_visible(maj_on) for t in ax_i.xaxis.minorTicks: t.tick2line.set_visible(min_on) if trim: # clip off the parts of the spines that extend past major ticks xticks = np.asarray(ax_i.get_xticks()) if xticks.size: firsttick = np.compress(xticks >= min(ax_i.get_xlim()), xticks)[0] lasttick = np.compress(xticks <= max(ax_i.get_xlim()), xticks)[-1] ax_i.spines['bottom'].set_bounds(firsttick, lasttick) ax_i.spines['top'].set_bounds(firsttick, lasttick) newticks = xticks.compress(xticks <= lasttick) newticks = newticks.compress(newticks >= firsttick) ax_i.set_xticks(newticks) yticks = np.asarray(ax_i.get_yticks()) if yticks.size: firsttick = np.compress(yticks >= min(ax_i.get_ylim()), yticks)[0] lasttick = np.compress(yticks <= max(ax_i.get_ylim()), yticks)[-1] ax_i.spines['left'].set_bounds(firsttick, lasttick) ax_i.spines['right'].set_bounds(firsttick, lasttick) newticks = yticks.compress(yticks <= lasttick) newticks = newticks.compress(newticks >= firsttick) ax_i.set_yticks(newticks)
utils.despine
seaborn
72
seaborn/utils.py
def get_color_cycle(): """Return the list of colors in the current matplotlib color cycle Parameters ---------- None Returns ------- colors : list List of matplotlib colors in the current cycle, or dark gray if the current color cycle is empty. """
/usr/src/app/target_test_cases/failed_tests_utils.get_color_cycle.txt
def get_color_cycle(): """Return the list of colors in the current matplotlib color cycle Parameters ---------- None Returns ------- colors : list List of matplotlib colors in the current cycle, or dark gray if the current color cycle is empty. """ cycler = mpl.rcParams['axes.prop_cycle'] return cycler.by_key()['color'] if 'color' in cycler.keys else [".15"]
utils.get_color_cycle
seaborn
73
seaborn/utils.py
def move_legend(obj, loc, **kwargs): """ Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst """
/usr/src/app/target_test_cases/failed_tests_utils.move_legend.txt
def move_legend(obj, loc, **kwargs): """ Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst """ # This is a somewhat hackish solution that will hopefully be obviated by # upstream improvements to matplotlib legends that make them easier to # modify after creation. from seaborn.axisgrid import Grid # Avoid circular import # Locate the legend object and a method to recreate the legend if isinstance(obj, Grid): old_legend = obj.legend legend_func = obj.figure.legend elif isinstance(obj, mpl.axes.Axes): old_legend = obj.legend_ legend_func = obj.legend elif isinstance(obj, mpl.figure.Figure): if obj.legends: old_legend = obj.legends[-1] else: old_legend = None legend_func = obj.legend else: err = "`obj` must be a seaborn Grid or matplotlib Axes or Figure instance." raise TypeError(err) if old_legend is None: err = f"{obj} has no legend attached." raise ValueError(err) # Extract the components of the legend we need to reuse # Import here to avoid a circular import from seaborn._compat import get_legend_handles handles = get_legend_handles(old_legend) labels = [t.get_text() for t in old_legend.get_texts()] # Handle the case where the user is trying to override the labels if (new_labels := kwargs.pop("labels", None)) is not None: if len(new_labels) != len(labels): err = "Length of new labels does not match existing legend." raise ValueError(err) labels = new_labels # Extract legend properties that can be passed to the recreation method # (Vexingly, these don't all round-trip) legend_kws = inspect.signature(mpl.legend.Legend).parameters props = {k: v for k, v in old_legend.properties().items() if k in legend_kws} # Delegate default bbox_to_anchor rules to matplotlib props.pop("bbox_to_anchor") # Try to propagate the existing title and font properties; respect new ones too title = props.pop("title") if "title" in kwargs: title.set_text(kwargs.pop("title")) title_kwargs = {k: v for k, v in kwargs.items() if k.startswith("title_")} for key, val in title_kwargs.items(): title.set(**{key[6:]: val}) kwargs.pop(key) # Try to respect the frame visibility kwargs.setdefault("frameon", old_legend.legendPatch.get_visible()) # Remove the old legend and create the new one props.update(kwargs) old_legend.remove() new_legend = legend_func(handles, labels, loc=loc, **props) new_legend.set_title(title.get_text(), title.get_fontproperties()) # Let the Grid object continue to track the correct legend object if isinstance(obj, Grid): obj._legend = new_legend
utils.move_legend
seaborn
74
seaborn/utils.py
def remove_na(vector): """Helper method for removing null values from data vectors. Parameters ---------- vector : vector object Must implement boolean masking with [] subscript syntax. Returns ------- clean_clean : same type as ``vector`` Vector of data with null values removed. May be a copy or a view. """
/usr/src/app/target_test_cases/failed_tests_utils.remove_na.txt
def remove_na(vector): """Helper method for removing null values from data vectors. Parameters ---------- vector : vector object Must implement boolean masking with [] subscript syntax. Returns ------- clean_clean : same type as ``vector`` Vector of data with null values removed. May be a copy or a view. """ return vector[pd.notnull(vector)]
utils.remove_na
seaborn
75
seaborn/utils.py
def saturate(color): """Return a fully saturated color with the same hue. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name Returns ------- new_color : rgb tuple saturated color code in RGB tuple representation """
/usr/src/app/target_test_cases/failed_tests_utils.saturate.txt
def saturate(color): """Return a fully saturated color with the same hue. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name Returns ------- new_color : rgb tuple saturated color code in RGB tuple representation """ return set_hls_values(color, s=1)
utils.saturate
seaborn
76
seaborn/utils.py
def set_hls_values(color, h=None, l=None, s=None): # noqa """Independently manipulate the h, l, or s channels of a color. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name h, l, s : floats between 0 and 1, or None new values for each channel in hls space Returns ------- new_color : rgb tuple new color code in RGB tuple representation """
/usr/src/app/target_test_cases/failed_tests_utils.set_hls_values.txt
def set_hls_values(color, h=None, l=None, s=None): # noqa """Independently manipulate the h, l, or s channels of a color. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name h, l, s : floats between 0 and 1, or None new values for each channel in hls space Returns ------- new_color : rgb tuple new color code in RGB tuple representation """ # Get an RGB tuple representation rgb = to_rgb(color) vals = list(colorsys.rgb_to_hls(*rgb)) for i, val in enumerate([h, l, s]): if val is not None: vals[i] = val rgb = colorsys.hls_to_rgb(*vals) return rgb
utils.set_hls_values
seaborn
77
seaborn/utils.py
def to_utf8(obj): """Return a string representing a Python object. Strings (i.e. type ``str``) are returned unchanged. Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings. For other objects, the method ``__str__()`` is called, and the result is returned as a string. Parameters ---------- obj : object Any Python object Returns ------- s : str UTF-8-decoded string representation of ``obj`` """
/usr/src/app/target_test_cases/failed_tests_utils.to_utf8.txt
def to_utf8(obj): """Return a string representing a Python object. Strings (i.e. type ``str``) are returned unchanged. Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings. For other objects, the method ``__str__()`` is called, and the result is returned as a string. Parameters ---------- obj : object Any Python object Returns ------- s : str UTF-8-decoded string representation of ``obj`` """ if isinstance(obj, str): return obj try: return obj.decode(encoding="utf-8") except AttributeError: # obj is not bytes-like return str(obj)
utils.to_utf8
scikit-learn
0
sklearn/linear_model/_bayes.py
def fit(self, X, y): """Fit the model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_ARDRegression.fit.txt
def fit(self, X, y): """Fit the model according to the given training data and parameters. Iterative procedure to maximize the evidence Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Will be cast to X's dtype if necessary. Returns ------- self : object Fitted estimator. """ X, y = validate_data( self, X, y, dtype=[np.float64, np.float32], force_writeable=True, y_numeric=True, ensure_min_samples=2, ) dtype = X.dtype n_samples, n_features = X.shape coef_ = np.zeros(n_features, dtype=dtype) X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=self.copy_X ) self.X_offset_ = X_offset_ self.X_scale_ = X_scale_ # Launch the convergence loop keep_lambda = np.ones(n_features, dtype=bool) lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 verbose = self.verbose # Initialization of the values of the parameters eps = np.finfo(np.float64).eps # Add `eps` in the denominator to omit division by zero if `np.var(y)` # is zero. # Explicitly set dtype to avoid unintended type promotion with numpy 2. alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype) lambda_ = np.ones(n_features, dtype=dtype) self.scores_ = list() coef_old_ = None def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_): coef_[keep_lambda] = alpha_ * np.linalg.multi_dot( [sigma_, X[:, keep_lambda].T, y] ) return coef_ update_sigma = ( self._update_sigma if n_samples >= n_features else self._update_sigma_woodbury ) # Iterative procedure of ARDRegression for iter_ in range(self.max_iter): sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) # Update alpha and lambda rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_) lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / ( (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2 ) alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / ( rmse_ + 2.0 * alpha_2 ) # Prune the weights with a precision over a threshold keep_lambda = lambda_ < self.threshold_lambda coef_[~keep_lambda] = 0 # Compute the objective function if self.compute_score: s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() s += alpha_1 * log(alpha_) - alpha_2 * alpha_ s += 0.5 * ( fast_logdet(sigma_) + n_samples * log(alpha_) + np.sum(np.log(lambda_)) ) s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum()) self.scores_.append(s) # Check for convergence if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Converged after %s iterations" % iter_) break coef_old_ = np.copy(coef_) if not keep_lambda.any(): break self.n_iter_ = iter_ + 1 if keep_lambda.any(): # update sigma and mu using updated params from the last iteration sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) else: sigma_ = np.array([]).reshape(0, 0) self.coef_ = coef_ self.alpha_ = alpha_ self.sigma_ = sigma_ self.lambda_ = lambda_ self._set_intercept(X_offset_, y_offset_, X_scale_) return self
ARDRegression.fit
scikit-learn
1
sklearn/linear_model/_bayes.py
def predict(self, X, return_std=False): """Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points. """
/usr/src/app/target_test_cases/failed_tests_ARDRegression.predict.txt
def predict(self, X, return_std=False): """Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points. """ y_mean = self._decision_function(X) if return_std is False: return y_mean else: col_index = self.lambda_ < self.threshold_lambda X = _safe_indexing(X, indices=col_index, axis=1) sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) return y_mean, y_std
ARDRegression.predict
scikit-learn
2
sklearn/ensemble/_weight_boosting.py
def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- score : ndarray of shape of (n_samples, k) The decision function of the input samples. The order of outputs is the same as that of the :term:`classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """
/usr/src/app/target_test_cases/failed_tests_AdaBoostClassifier.decision_function.txt
def decision_function(self, X): """Compute the decision function of ``X``. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- score : ndarray of shape of (n_samples, k) The decision function of the input samples. The order of outputs is the same as that of the :term:`classes_` attribute. Binary classification is a special cases with ``k == 1``, otherwise ``k==n_classes``. For binary classification, values closer to -1 or 1 mean more like the first or second class in ``classes_``, respectively. """ check_is_fitted(self) X = self._check_X(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] # TODO(1.6): Remove, because "algorithm" param will be deprecated in 1.6 if self.algorithm == "SAMME.R": # The weights are all 1. for SAMME.R pred = sum( _samme_proba(estimator, n_classes, X) for estimator in self.estimators_ ) else: # self.algorithm == "SAMME" pred = sum( np.where( (estimator.predict(X) == classes).T, w, -1 / (n_classes - 1) * w, ) for estimator, w in zip(self.estimators_, self.estimator_weights_) ) pred /= self.estimator_weights_.sum() if n_classes == 2: pred[:, 0] *= -1 return pred.sum(axis=1) return pred
AdaBoostClassifier.decision_function
scikit-learn
3
sklearn/kernel_approximation.py
def fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the transformer. """
/usr/src/app/target_test_cases/failed_tests_AdditiveChi2Sampler.fit.txt
def fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the transformer. """ X = validate_data(self, X, accept_sparse="csr", ensure_non_negative=True) if self.sample_interval is None and self.sample_steps not in (1, 2, 3): raise ValueError( "If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval" ) return self
AdditiveChi2Sampler.fit
scikit-learn
4
sklearn/kernel_approximation.py
def transform(self, X): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X. """
/usr/src/app/target_test_cases/failed_tests_AdditiveChi2Sampler.transform.txt
def transform(self, X): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X. """ X = validate_data( self, X, accept_sparse="csr", reset=False, ensure_non_negative=True ) sparse = sp.issparse(X) if self.sample_interval is None: # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf> # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa # 2011 if self.sample_steps == 1: sample_interval = 0.8 elif self.sample_steps == 2: sample_interval = 0.5 elif self.sample_steps == 3: sample_interval = 0.4 else: raise ValueError( "If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval" ) else: sample_interval = self.sample_interval # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X, self.sample_steps, sample_interval)
AdditiveChi2Sampler.transform
scikit-learn
5
sklearn/cluster/_affinity_propagation.py
def fit(self, X, y=None): """Fit the clustering from features, or affinity matrix. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ array-like of shape (n_samples, n_samples) Training instances to cluster, or similarities / affinities between instances if ``affinity='precomputed'``. If a sparse feature matrix is provided, it will be converted into a sparse ``csr_matrix``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_AffinityPropagation.fit.txt
def fit(self, X, y=None): """Fit the clustering from features, or affinity matrix. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or \ array-like of shape (n_samples, n_samples) Training instances to cluster, or similarities / affinities between instances if ``affinity='precomputed'``. If a sparse feature matrix is provided, it will be converted into a sparse ``csr_matrix``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Returns the instance itself. """ if self.affinity == "precomputed": X = validate_data(self, X, copy=self.copy, force_writeable=True) self.affinity_matrix_ = X else: # self.affinity == "euclidean" X = validate_data(self, X, accept_sparse="csr") self.affinity_matrix_ = -euclidean_distances(X, squared=True) if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]: raise ValueError( "The matrix of similarities must be a square array. " f"Got {self.affinity_matrix_.shape} instead." ) if self.preference is None: preference = np.median(self.affinity_matrix_) else: preference = self.preference preference = np.asarray(preference) random_state = check_random_state(self.random_state) ( self.cluster_centers_indices_, self.labels_, self.n_iter_, ) = _affinity_propagation( self.affinity_matrix_, max_iter=self.max_iter, convergence_iter=self.convergence_iter, preference=preference, damping=self.damping, verbose=self.verbose, return_n_iter=True, random_state=random_state, ) if self.affinity != "precomputed": self.cluster_centers_ = X[self.cluster_centers_indices_].copy() return self
AffinityPropagation.fit
scikit-learn
6
sklearn/cluster/_affinity_propagation.py
def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels. """
/usr/src/app/target_test_cases/failed_tests_AffinityPropagation.predict.txt
def predict(self, X): """Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. If a sparse matrix is provided, it will be converted into a sparse ``csr_matrix``. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels. """ check_is_fitted(self) X = validate_data(self, X, reset=False, accept_sparse="csr") if not hasattr(self, "cluster_centers_"): raise ValueError( "Predict method is not supported when affinity='precomputed'." ) if self.cluster_centers_.shape[0] > 0: with config_context(assume_finite=True): return pairwise_distances_argmin(X, self.cluster_centers_) else: warnings.warn( ( "This model does not have any cluster centers " "because affinity propagation did not converge. " "Labeling every sample as '-1'." ), ConvergenceWarning, ) return np.array([-1] * X.shape[0])
AffinityPropagation.predict
scikit-learn
7
sklearn/ensemble/_bagging.py
def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the base estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """
/usr/src/app/target_test_cases/failed_tests_BaggingClassifier.predict_log_proba.txt
def predict_log_proba(self, X): """Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the base estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) if hasattr(self.estimator_, "predict_log_proba"): # Check data X = validate_data( self, X, accept_sparse=["csr", "csc"], dtype=None, ensure_all_finite=False, reset=False, ) # Parallel loop n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_log_proba)( self.estimators_[starts[i] : starts[i + 1]], self.estimators_features_[starts[i] : starts[i + 1]], X, self.n_classes_, ) for i in range(n_jobs) ) # Reduce log_proba = all_log_proba[0] for j in range(1, len(all_log_proba)): log_proba = np.logaddexp(log_proba, all_log_proba[j]) log_proba -= np.log(self.n_estimators) else: log_proba = np.log(self.predict_proba(X)) return log_proba
BaggingClassifier.predict_log_proba
scikit-learn
8
sklearn/ensemble/_bagging.py
def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """
/usr/src/app/target_test_cases/failed_tests_BaggingClassifier.predict_proba.txt
def predict_proba(self, X): """Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. """ check_is_fitted(self) # Check data X = validate_data( self, X, accept_sparse=["csr", "csc"], dtype=None, ensure_all_finite=False, reset=False, ) # Parallel loop n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_proba = Parallel( n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args() )( delayed(_parallel_predict_proba)( self.estimators_[starts[i] : starts[i + 1]], self.estimators_features_[starts[i] : starts[i + 1]], X, self.n_classes_, ) for i in range(n_jobs) ) # Reduce proba = sum(all_proba) / self.n_estimators return proba
BaggingClassifier.predict_proba
scikit-learn
9
sklearn/linear_model/_bayes.py
def fit(self, X, y, sample_weight=None): """Fit the model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. sample_weight : ndarray of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.20 parameter *sample_weight* support to BayesianRidge. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_BayesianRidge.fit.txt
def fit(self, X, y, sample_weight=None): """Fit the model. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. sample_weight : ndarray of shape (n_samples,), default=None Individual weights for each sample. .. versionadded:: 0.20 parameter *sample_weight* support to BayesianRidge. Returns ------- self : object Returns the instance itself. """ X, y = validate_data( self, X, y, dtype=[np.float64, np.float32], force_writeable=True, y_numeric=True, ) dtype = X.dtype if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype) X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( X, y, fit_intercept=self.fit_intercept, copy=self.copy_X, sample_weight=sample_weight, ) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y, _ = _rescale_data(X, y, sample_weight) self.X_offset_ = X_offset_ self.X_scale_ = X_scale_ n_samples, n_features = X.shape # Initialization of the values of the parameters eps = np.finfo(np.float64).eps # Add `eps` in the denominator to omit division by zero if `np.var(y)` # is zero alpha_ = self.alpha_init lambda_ = self.lambda_init if alpha_ is None: alpha_ = 1.0 / (np.var(y) + eps) if lambda_ is None: lambda_ = 1.0 # Avoid unintended type promotion to float64 with numpy 2 alpha_ = np.asarray(alpha_, dtype=dtype) lambda_ = np.asarray(lambda_, dtype=dtype) verbose = self.verbose lambda_1 = self.lambda_1 lambda_2 = self.lambda_2 alpha_1 = self.alpha_1 alpha_2 = self.alpha_2 self.scores_ = list() coef_old_ = None XT_y = np.dot(X.T, y) U, S, Vh = linalg.svd(X, full_matrices=False) eigen_vals_ = S**2 # Convergence loop of the bayesian ridge regression for iter_ in range(self.max_iter): # update posterior mean coef_ based on alpha_ and lambda_ and # compute corresponding rmse coef_, rmse_ = self._update_coef_( X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ ) if self.compute_score: # compute the log marginal likelihood s = self._log_marginal_likelihood( n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ ) self.scores_.append(s) # Update alpha and lambda according to (MacKay, 1992) gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_)) lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2) alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2) # Check for convergence if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: if verbose: print("Convergence after ", str(iter_), " iterations") break coef_old_ = np.copy(coef_) self.n_iter_ = iter_ + 1 # return regularization parameters and corresponding posterior mean, # log marginal likelihood and posterior covariance self.alpha_ = alpha_ self.lambda_ = lambda_ self.coef_, rmse_ = self._update_coef_( X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ ) if self.compute_score: # compute the log marginal likelihood s = self._log_marginal_likelihood( n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ ) self.scores_.append(s) self.scores_ = np.array(self.scores_) # posterior covariance is given by 1/alpha_ * scaled_sigma_ scaled_sigma_ = np.dot( Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis] ) self.sigma_ = (1.0 / alpha_) * scaled_sigma_ self._set_intercept(X_offset_, y_offset_, X_scale_) return self
BayesianRidge.fit
scikit-learn
10
sklearn/linear_model/_bayes.py
def predict(self, X, return_std=False): """Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points. """
/usr/src/app/target_test_cases/failed_tests_BayesianRidge.predict.txt
def predict(self, X, return_std=False): """Predict using the linear model. In addition to the mean of the predictive distribution, also its standard deviation can be returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. Returns ------- y_mean : array-like of shape (n_samples,) Mean of predictive distribution of query points. y_std : array-like of shape (n_samples,) Standard deviation of predictive distribution of query points. """ y_mean = self._decision_function(X) if not return_std: return y_mean else: sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) return y_mean, y_std
BayesianRidge.predict
scikit-learn
11
sklearn/neural_network/_rbm.py
def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """
/usr/src/app/target_test_cases/failed_tests_BernoulliRBM.fit.txt
def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ X = validate_data(self, X, accept_sparse="csr", dtype=(np.float64, np.float32)) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", dtype=X.dtype, ) self._n_features_out = self.components_.shape[0] self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype) self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype) self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list( gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples) ) verbose = self.verbose begin = time.time() for iteration in range(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print( "[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs" % ( type(self).__name__, iteration, self.score_samples(X).mean(), end - begin, ) ) begin = end return self
BernoulliRBM.fit
scikit-learn
12
sklearn/neural_network/_rbm.py
def partial_fit(self, X, y=None): """Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """
/usr/src/app/target_test_cases/failed_tests_BernoulliRBM.partial_fit.txt
def partial_fit(self, X, y=None): """Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model. """ first_pass = not hasattr(self, "components_") X = validate_data( self, X, accept_sparse="csr", dtype=np.float64, reset=first_pass ) if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, "components_"): self.components_ = np.asarray( self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order="F", ) self._n_features_out = self.components_.shape[0] if not hasattr(self, "intercept_hidden_"): self.intercept_hidden_ = np.zeros( self.n_components, ) if not hasattr(self, "intercept_visible_"): self.intercept_visible_ = np.zeros( X.shape[1], ) if not hasattr(self, "h_samples_"): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_)
BernoulliRBM.partial_fit
scikit-learn
13
sklearn/neural_network/_rbm.py
def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """
/usr/src/app/target_test_cases/failed_tests_BernoulliRBM.score_samples.txt
def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self) v = validate_data(self, X, accept_sparse="csr", reset=False) rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if sp.issparse(v): data = -2 * v[ind] + 1 if isinstance(data, np.matrix): # v is a sparse matrix v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: # v is a sparse array v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) # log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x) return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))
BernoulliRBM.score_samples
scikit-learn
14
sklearn/cluster/_bisect_k_means.py
def fit(self, X, y=None, sample_weight=None): """Compute bisecting k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. .. note:: The data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable. Returns ------- self Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_BisectingKMeans.fit.txt
def fit(self, X, y=None, sample_weight=None): """Compute bisecting k-means clustering. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training instances to cluster. .. note:: The data will be converted to C ordering, which will cause a memory copy if the given data is not C-contiguous. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. `sample_weight` is not used during initialization if `init` is a callable. Returns ------- self Fitted estimator. """ X = validate_data( self, X, accept_sparse="csr", dtype=[np.float64, np.float32], order="C", copy=self.copy_x, accept_large_sparse=False, ) self._check_params_vs_input(X) self._random_state = check_random_state(self.random_state) sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) self._n_threads = _openmp_effective_n_threads() if self.algorithm == "lloyd" or self.n_clusters == 1: self._kmeans_single = _kmeans_single_lloyd self._check_mkl_vcomp(X, X.shape[0]) else: self._kmeans_single = _kmeans_single_elkan # Subtract of mean of X for more accurate distance computations if not sp.issparse(X): self._X_mean = X.mean(axis=0) X -= self._X_mean # Initialize the hierarchical clusters tree self._bisecting_tree = _BisectingTree( indices=np.arange(X.shape[0]), center=X.mean(axis=0), score=0, ) x_squared_norms = row_norms(X, squared=True) for _ in range(self.n_clusters - 1): # Chose cluster to bisect cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect() # Split this cluster into 2 subclusters self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect) # Aggregate final labels and centers from the bisecting tree self.labels_ = np.full(X.shape[0], -1, dtype=np.int32) self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype) for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()): self.labels_[cluster_node.indices] = i self.cluster_centers_[i] = cluster_node.center cluster_node.label = i # label final clusters for future prediction cluster_node.indices = None # release memory # Restore original data if not sp.issparse(X): X += self._X_mean self.cluster_centers_ += self._X_mean _inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense self.inertia_ = _inertia( X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads ) self._n_features_out = self.cluster_centers_.shape[0] return self
BisectingKMeans.fit
scikit-learn
15
sklearn/calibration.py
def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """
/usr/src/app/target_test_cases/failed_tests_CalibratedClassifierCV.fit.txt
def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ check_classification_targets(y) X, y = indexable(X, y) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) estimator = self._get_estimator() self.calibrated_classifiers_ = [] if self.cv == "prefit": # `classes_` should be consistent with that of estimator check_is_fitted(self.estimator, attributes=["classes_"]) self.classes_ = self.estimator.classes_ predictions, _ = _get_response_values( estimator, X, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) calibrated_classifier = _fit_calibrator( estimator, predictions, y, self.classes_, self.method, sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) else: # Set `classes_` using all `y` label_encoder_ = LabelEncoder().fit(y) self.classes_ = label_encoder_.classes_ if _routing_enabled(): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params, ) else: # sample_weight checks fit_parameters = signature(estimator.fit).parameters supports_sw = "sample_weight" in fit_parameters if sample_weight is not None and not supports_sw: estimator_name = type(estimator).__name__ warnings.warn( f"Since {estimator_name} does not appear to accept" " sample_weight, sample weights will only be used for the" " calibration itself. This can be caused by a limitation of" " the current scikit-learn API. See the following issue for" " more details:" " https://github.com/scikit-learn/scikit-learn/issues/21134." " Be warned that the result of the calibration is likely to be" " incorrect." ) routed_params = Bunch() routed_params.splitter = Bunch(split={}) # no routing for splitter routed_params.estimator = Bunch(fit=fit_params) if sample_weight is not None and supports_sw: routed_params.estimator.fit["sample_weight"] = sample_weight # Check that each cross-validation fold can have at least one # example per class if isinstance(self.cv, int): n_folds = self.cv elif hasattr(self.cv, "n_splits"): n_folds = self.cv.n_splits else: n_folds = None if n_folds and np.any(np.unique(y, return_counts=True)[1] < n_folds): raise ValueError( f"Requesting {n_folds}-fold " "cross-validation but provided less than " f"{n_folds} examples for at least one class." ) if isinstance(self.cv, LeaveOneOut): raise ValueError( "LeaveOneOut cross-validation does not allow" "all classes to be present in test splits. " "Please use a cross-validation generator that allows " "all classes to appear in every test and train split." ) cv = check_cv(self.cv, y, classifier=True) if self.ensemble: parallel = Parallel(n_jobs=self.n_jobs) self.calibrated_classifiers_ = parallel( delayed(_fit_classifier_calibrator_pair)( clone(estimator), X, y, train=train, test=test, method=self.method, classes=self.classes_, sample_weight=sample_weight, fit_params=routed_params.estimator.fit, ) for train, test in cv.split(X, y, **routed_params.splitter.split) ) else: this_estimator = clone(estimator) method_name = _check_response_method( this_estimator, ["decision_function", "predict_proba"], ).__name__ predictions = cross_val_predict( estimator=this_estimator, X=X, y=y, cv=cv, method=method_name, n_jobs=self.n_jobs, params=routed_params.estimator.fit, ) if len(self.classes_) == 2: # Ensure shape (n_samples, 1) in the binary case if method_name == "predict_proba": # Select the probability column of the postive class predictions = _process_predict_proba( y_pred=predictions, target_type="binary", classes=self.classes_, pos_label=self.classes_[1], ) predictions = predictions.reshape(-1, 1) this_estimator.fit(X, y, **routed_params.estimator.fit) # Note: Here we don't pass on fit_params because the supported # calibrators don't support fit_params anyway calibrated_classifier = _fit_calibrator( this_estimator, predictions, y, self.classes_, self.method, sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) first_clf = self.calibrated_classifiers_[0].estimator if hasattr(first_clf, "n_features_in_"): self.n_features_in_ = first_clf.n_features_in_ if hasattr(first_clf, "feature_names_in_"): self.feature_names_in_ = first_clf.feature_names_in_ return self
CalibratedClassifierCV.fit
scikit-learn
16
sklearn/calibration.py
def predict_proba(self, X): """Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict_proba`. Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas. """
/usr/src/app/target_test_cases/failed_tests_CalibratedClassifierCV.predict_proba.txt
def predict_proba(self, X): """Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict_proba`. Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self) # Compute the arithmetic mean of the predictions of the calibrated # classifiers mean_proba = np.zeros((_num_samples(X), len(self.classes_))) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba
CalibratedClassifierCV.predict_proba
scikit-learn
17
sklearn/calibration.py
def plot(self, *, ax=None, name=None, ref_line=True, **kwargs): """Plot visualization. Extra keyword arguments will be passed to :func:`matplotlib.pyplot.plot`. Parameters ---------- ax : Matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. name : str, default=None Name for labeling curve. If `None`, use `estimator_name` if not `None`, otherwise no labeling is shown. ref_line : bool, default=True If `True`, plots a reference line representing a perfectly calibrated classifier. **kwargs : dict Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. Returns ------- display : :class:`~sklearn.calibration.CalibrationDisplay` Object that stores computed values. """
/usr/src/app/target_test_cases/failed_tests_CalibrationDisplay.plot.txt
def plot(self, *, ax=None, name=None, ref_line=True, **kwargs): """Plot visualization. Extra keyword arguments will be passed to :func:`matplotlib.pyplot.plot`. Parameters ---------- ax : Matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. name : str, default=None Name for labeling curve. If `None`, use `estimator_name` if not `None`, otherwise no labeling is shown. ref_line : bool, default=True If `True`, plots a reference line representing a perfectly calibrated classifier. **kwargs : dict Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`. Returns ------- display : :class:`~sklearn.calibration.CalibrationDisplay` Object that stores computed values. """ self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name) info_pos_label = ( f"(Positive class: {self.pos_label})" if self.pos_label is not None else "" ) line_kwargs = {"marker": "s", "linestyle": "-"} if name is not None: line_kwargs["label"] = name line_kwargs.update(**kwargs) ref_line_label = "Perfectly calibrated" existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1] if ref_line and not existing_ref_line: self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label) self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0] # We always have to show the legend for at least the reference line self.ax_.legend(loc="lower right") xlabel = f"Mean predicted probability {info_pos_label}" ylabel = f"Fraction of positives {info_pos_label}" self.ax_.set(xlabel=xlabel, ylabel=ylabel) return self
CalibrationDisplay.plot
scikit-learn
18
sklearn/utils/_mocking.py
def decision_function(self, X): """Confidence score. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- decision : ndarray of shape (n_samples,) if n_classes == 2\ else (n_samples, n_classes) Confidence score. """
/usr/src/app/target_test_cases/failed_tests_CheckingClassifier.decision_function.txt
def decision_function(self, X): """Confidence score. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- decision : ndarray of shape (n_samples,) if n_classes == 2\ else (n_samples, n_classes) Confidence score. """ if ( self.methods_to_check == "all" or "decision_function" in self.methods_to_check ): X, y = self._check_X_y(X) rng = check_random_state(self.random_state) if len(self.classes_) == 2: # for binary classifier, the confidence score is related to # classes_[1] and therefore should be null. return rng.randn(_num_samples(X)) else: return rng.randn(_num_samples(X), len(self.classes_))
CheckingClassifier.decision_function
scikit-learn
19
sklearn/utils/_mocking.py
def fit(self, X, y, sample_weight=None, **fit_params): """Fit classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples, n_outputs) or (n_samples,), \ default=None Target relative to X for classification or regression; None for unsupervised learning. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator Returns ------- self """
/usr/src/app/target_test_cases/failed_tests_CheckingClassifier.fit.txt
def fit(self, X, y, sample_weight=None, **fit_params): """Fit classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples, n_outputs) or (n_samples,), \ default=None Target relative to X for classification or regression; None for unsupervised learning. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of the estimator Returns ------- self """ assert _num_samples(X) == _num_samples(y) if self.methods_to_check == "all" or "fit" in self.methods_to_check: X, y = self._check_X_y(X, y, should_be_fitted=False) self.n_features_in_ = np.shape(X)[1] self.classes_ = np.unique(check_array(y, ensure_2d=False, allow_nd=True)) if self.expected_fit_params: missing = set(self.expected_fit_params) - set(fit_params) if missing: raise AssertionError( f"Expected fit parameter(s) {list(missing)} not seen." ) for key, value in fit_params.items(): if _num_samples(value) != _num_samples(X): raise AssertionError( f"Fit parameter {key} has length {_num_samples(value)}" f"; expected {_num_samples(X)}." ) if self.expected_sample_weight: if sample_weight is None: raise AssertionError("Expected sample_weight to be passed") _check_sample_weight(sample_weight, X) return self
CheckingClassifier.fit
scikit-learn
20
sklearn/utils/_mocking.py
def predict_proba(self, X): """Predict probabilities for each class. Here, the dummy classifier will provide a probability of 1 for the first class of `classes_` and 0 otherwise. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- proba : ndarray of shape (n_samples, n_classes) The probabilities for each sample and class. """
/usr/src/app/target_test_cases/failed_tests_CheckingClassifier.predict_proba.txt
def predict_proba(self, X): """Predict probabilities for each class. Here, the dummy classifier will provide a probability of 1 for the first class of `classes_` and 0 otherwise. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- proba : ndarray of shape (n_samples, n_classes) The probabilities for each sample and class. """ if self.methods_to_check == "all" or "predict_proba" in self.methods_to_check: X, y = self._check_X_y(X) rng = check_random_state(self.random_state) proba = rng.randn(_num_samples(X), len(self.classes_)) proba = np.abs(proba, out=proba) proba /= np.sum(proba, axis=1)[:, np.newaxis] return proba
CheckingClassifier.predict_proba
scikit-learn
21
sklearn/utils/_mocking.py
def score(self, X=None, Y=None): """Fake score. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. Y : array-like of shape (n_samples, n_output) or (n_samples,) Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 => score=1` otherwise `score=0`). """
/usr/src/app/target_test_cases/failed_tests_CheckingClassifier.score.txt
def score(self, X=None, Y=None): """Fake score. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. Y : array-like of shape (n_samples, n_output) or (n_samples,) Target relative to X for classification or regression; None for unsupervised learning. Returns ------- score : float Either 0 or 1 depending of `foo_param` (i.e. `foo_param > 1 => score=1` otherwise `score=0`). """ if self.methods_to_check == "all" or "score" in self.methods_to_check: self._check_X_y(X, Y) if self.foo_param > 1: score = 1.0 else: score = 0.0 return score
CheckingClassifier.score